dd                115 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 	const struct dpll_data *dd;
dd                134 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 		dd = clk->dpll_data;
dd                135 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 		if (!dd)
dd                139 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 			omap_clk_ll_ops.clk_readl(&dd->mult_div1_reg);
dd                140 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 		tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
dd                141 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 					   dd->div1_mask);
dd                154 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 		tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask));
dd                155 arch/arm/mach-omap2/clkt2xxx_dpllcore.c 		tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask));
dd                343 arch/arm/vfp/vfp.h u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func);
dd                373 arch/arm/vfp/vfp.h 	u32 (* const fn)(int dd, int dn, int dm, u32 fpscr);
dd                 70 arch/arm/vfp/vfpdouble.c u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func)
dd                196 arch/arm/vfp/vfpdouble.c 			 dd, d, exceptions);
dd                197 arch/arm/vfp/vfpdouble.c 		vfp_put_double(d, dd);
dd                250 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr)
dd                252 arch/arm/vfp/vfpdouble.c 	vfp_put_double(vfp_double_packed_abs(vfp_get_double(dm)), dd);
dd                256 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr)
dd                258 arch/arm/vfp/vfpdouble.c 	vfp_put_double(vfp_get_double(dm), dd);
dd                262 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr)
dd                264 arch/arm/vfp/vfpdouble.c 	vfp_put_double(vfp_double_packed_negate(vfp_get_double(dm)), dd);
dd                268 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr)
dd                289 arch/arm/vfp/vfpdouble.c 		vfp_put_double(vfp_double_pack(vdp), dd);
dd                349 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt");
dd                358 arch/arm/vfp/vfpdouble.c static u32 vfp_compare(int dd, int signal_on_qnan, int dm, u32 fpscr)
dd                373 arch/arm/vfp/vfpdouble.c 	d = vfp_get_double(dd);
dd                419 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fcmp(int dd, int unused, int dm, u32 fpscr)
dd                421 arch/arm/vfp/vfpdouble.c 	return vfp_compare(dd, 0, dm, fpscr);
dd                424 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fcmpe(int dd, int unused, int dm, u32 fpscr)
dd                426 arch/arm/vfp/vfpdouble.c 	return vfp_compare(dd, 1, dm, fpscr);
dd                429 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fcmpz(int dd, int unused, int dm, u32 fpscr)
dd                431 arch/arm/vfp/vfpdouble.c 	return vfp_compare(dd, 0, VFP_REG_ZERO, fpscr);
dd                434 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fcmpez(int dd, int unused, int dm, u32 fpscr)
dd                436 arch/arm/vfp/vfpdouble.c 	return vfp_compare(dd, 1, VFP_REG_ZERO, fpscr);
dd                482 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fuito(int dd, int unused, int dm, u32 fpscr)
dd                491 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito");
dd                494 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fsito(int dd, int unused, int dm, u32 fpscr)
dd                503 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito");
dd                655 arch/arm/vfp/vfpdouble.c static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr)
dd                657 arch/arm/vfp/vfpdouble.c 	return vfp_double_ftosi(dd, unused, dm, FPSCR_ROUND_TOZERO);
dd                851 arch/arm/vfp/vfpdouble.c vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, char *func)
dd                868 arch/arm/vfp/vfpdouble.c 	vfp_double_unpack(&vdn, vfp_get_double(dd));
dd                876 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func);
dd                886 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fmac(int dd, int dn, int dm, u32 fpscr)
dd                888 arch/arm/vfp/vfpdouble.c 	return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, 0, "fmac");
dd                894 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fnmac(int dd, int dn, int dm, u32 fpscr)
dd                896 arch/arm/vfp/vfpdouble.c 	return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac");
dd                902 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fmsc(int dd, int dn, int dm, u32 fpscr)
dd                904 arch/arm/vfp/vfpdouble.c 	return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc");
dd                910 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fnmsc(int dd, int dn, int dm, u32 fpscr)
dd                912 arch/arm/vfp/vfpdouble.c 	return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
dd                918 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fmul(int dd, int dn, int dm, u32 fpscr)
dd                932 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul");
dd                938 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fnmul(int dd, int dn, int dm, u32 fpscr)
dd                954 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul");
dd                960 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fadd(int dd, int dn, int dm, u32 fpscr)
dd                975 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd");
dd                981 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fsub(int dd, int dn, int dm, u32 fpscr)
dd               1001 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub");
dd               1007 arch/arm/vfp/vfpdouble.c static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr)
dd               1083 arch/arm/vfp/vfpdouble.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv");
dd               1088 arch/arm/vfp/vfpdouble.c 	vfp_put_double(vfp_double_pack(&vdd), dd);
dd               1108 arch/arm/vfp/vfpdouble.c 	vfp_put_double(vfp_double_pack(&vfp_double_default_qnan), dd);
dd                482 arch/arm/vfp/vfpsingle.c static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr)
dd                518 arch/arm/vfp/vfpsingle.c 	return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd");
dd                521 arch/arm/vfp/vfpsingle.c 	vfp_put_double(vfp_double_pack(&vdd), dd);
dd                115 arch/ia64/include/asm/processor.h 	__u64 dd : 1;
dd                 45 arch/microblaze/include/asm/cacheflush.h 	void (*dd)(void); /* disable */
dd                 68 arch/microblaze/include/asm/cacheflush.h #define disable_dcache()				mbc->dd();
dd                520 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_msr,
dd                536 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_nomsr,
dd                552 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_msr,
dd                567 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_nomsr,
dd                583 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_msr,
dd                598 arch/microblaze/kernel/cpu/cache.c 	.dd = __disable_dcache_nomsr,
dd               1203 arch/mips/cavium-octeon/octeon-irq.c 	struct octeon_irq_ciu_domain_data *dd = d->host_data;
dd               1208 arch/mips/cavium-octeon/octeon-irq.c 	if (ciu >= dd->num_sum || bit > 63)
dd               1227 arch/mips/cavium-octeon/octeon-irq.c 	struct octeon_irq_ciu_domain_data *dd = d->host_data;
dd               1229 arch/mips/cavium-octeon/octeon-irq.c 	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
dd               1453 arch/mips/cavium-octeon/octeon-irq.c 	struct octeon_irq_ciu_domain_data *dd;
dd               1455 arch/mips/cavium-octeon/octeon-irq.c 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
dd               1456 arch/mips/cavium-octeon/octeon-irq.c 	if (!dd)
dd               1467 arch/mips/cavium-octeon/octeon-irq.c 		dd->num_sum = 3;
dd               1471 arch/mips/cavium-octeon/octeon-irq.c 		dd->num_sum = 2;
dd               1497 arch/mips/cavium-octeon/octeon-irq.c 		ciu_node, &octeon_irq_domain_ciu_ops, dd);
dd                154 arch/x86/kernel/cpu/centaur.c 	u32  aa, bb, cc, dd;
dd                238 arch/x86/kernel/cpu/centaur.c 			cpuid(0x80000005, &aa, &bb, &cc, &dd);
dd                240 arch/x86/kernel/cpu/centaur.c 			c->x86_cache_size = (cc>>24)+(dd>>24);
dd                 68 block/mq-deadline.c deadline_rb_root(struct deadline_data *dd, struct request *rq)
dd                 70 block/mq-deadline.c 	return &dd->sort_list[rq_data_dir(rq)];
dd                 88 block/mq-deadline.c deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
dd                 90 block/mq-deadline.c 	struct rb_root *root = deadline_rb_root(dd, rq);
dd                 96 block/mq-deadline.c deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
dd                100 block/mq-deadline.c 	if (dd->next_rq[data_dir] == rq)
dd                101 block/mq-deadline.c 		dd->next_rq[data_dir] = deadline_latter_request(rq);
dd                103 block/mq-deadline.c 	elv_rb_del(deadline_rb_root(dd, rq), rq);
dd                111 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                119 block/mq-deadline.c 		deadline_del_rq_rb(dd, rq);
dd                129 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                135 block/mq-deadline.c 		elv_rb_del(deadline_rb_root(dd, req), req);
dd                136 block/mq-deadline.c 		deadline_add_rq_rb(dd, req);
dd                165 block/mq-deadline.c deadline_move_request(struct deadline_data *dd, struct request *rq)
dd                169 block/mq-deadline.c 	dd->next_rq[READ] = NULL;
dd                170 block/mq-deadline.c 	dd->next_rq[WRITE] = NULL;
dd                171 block/mq-deadline.c 	dd->next_rq[data_dir] = deadline_latter_request(rq);
dd                183 block/mq-deadline.c static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
dd                185 block/mq-deadline.c 	struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
dd                201 block/mq-deadline.c deadline_fifo_request(struct deadline_data *dd, int data_dir)
dd                209 block/mq-deadline.c 	if (list_empty(&dd->fifo_list[data_dir]))
dd                212 block/mq-deadline.c 	rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
dd                220 block/mq-deadline.c 	spin_lock_irqsave(&dd->zone_lock, flags);
dd                221 block/mq-deadline.c 	list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
dd                227 block/mq-deadline.c 	spin_unlock_irqrestore(&dd->zone_lock, flags);
dd                237 block/mq-deadline.c deadline_next_request(struct deadline_data *dd, int data_dir)
dd                245 block/mq-deadline.c 	rq = dd->next_rq[data_dir];
dd                256 block/mq-deadline.c 	spin_lock_irqsave(&dd->zone_lock, flags);
dd                262 block/mq-deadline.c 	spin_unlock_irqrestore(&dd->zone_lock, flags);
dd                271 block/mq-deadline.c static struct request *__dd_dispatch_request(struct deadline_data *dd)
dd                277 block/mq-deadline.c 	if (!list_empty(&dd->dispatch)) {
dd                278 block/mq-deadline.c 		rq = list_first_entry(&dd->dispatch, struct request, queuelist);
dd                283 block/mq-deadline.c 	reads = !list_empty(&dd->fifo_list[READ]);
dd                284 block/mq-deadline.c 	writes = !list_empty(&dd->fifo_list[WRITE]);
dd                289 block/mq-deadline.c 	rq = deadline_next_request(dd, WRITE);
dd                291 block/mq-deadline.c 		rq = deadline_next_request(dd, READ);
dd                293 block/mq-deadline.c 	if (rq && dd->batching < dd->fifo_batch)
dd                303 block/mq-deadline.c 		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
dd                305 block/mq-deadline.c 		if (deadline_fifo_request(dd, WRITE) &&
dd                306 block/mq-deadline.c 		    (dd->starved++ >= dd->writes_starved))
dd                320 block/mq-deadline.c 		BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
dd                322 block/mq-deadline.c 		dd->starved = 0;
dd                335 block/mq-deadline.c 	next_rq = deadline_next_request(dd, data_dir);
dd                336 block/mq-deadline.c 	if (deadline_check_fifo(dd, data_dir) || !next_rq) {
dd                342 block/mq-deadline.c 		rq = deadline_fifo_request(dd, data_dir);
dd                358 block/mq-deadline.c 	dd->batching = 0;
dd                364 block/mq-deadline.c 	dd->batching++;
dd                365 block/mq-deadline.c 	deadline_move_request(dd, rq);
dd                383 block/mq-deadline.c 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
dd                386 block/mq-deadline.c 	spin_lock(&dd->lock);
dd                387 block/mq-deadline.c 	rq = __dd_dispatch_request(dd);
dd                388 block/mq-deadline.c 	spin_unlock(&dd->lock);
dd                395 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;
dd                397 block/mq-deadline.c 	BUG_ON(!list_empty(&dd->fifo_list[READ]));
dd                398 block/mq-deadline.c 	BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
dd                400 block/mq-deadline.c 	kfree(dd);
dd                408 block/mq-deadline.c 	struct deadline_data *dd;
dd                415 block/mq-deadline.c 	dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
dd                416 block/mq-deadline.c 	if (!dd) {
dd                420 block/mq-deadline.c 	eq->elevator_data = dd;
dd                422 block/mq-deadline.c 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
dd                423 block/mq-deadline.c 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
dd                424 block/mq-deadline.c 	dd->sort_list[READ] = RB_ROOT;
dd                425 block/mq-deadline.c 	dd->sort_list[WRITE] = RB_ROOT;
dd                426 block/mq-deadline.c 	dd->fifo_expire[READ] = read_expire;
dd                427 block/mq-deadline.c 	dd->fifo_expire[WRITE] = write_expire;
dd                428 block/mq-deadline.c 	dd->writes_starved = writes_starved;
dd                429 block/mq-deadline.c 	dd->front_merges = 1;
dd                430 block/mq-deadline.c 	dd->fifo_batch = fifo_batch;
dd                431 block/mq-deadline.c 	spin_lock_init(&dd->lock);
dd                432 block/mq-deadline.c 	spin_lock_init(&dd->zone_lock);
dd                433 block/mq-deadline.c 	INIT_LIST_HEAD(&dd->dispatch);
dd                442 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                446 block/mq-deadline.c 	if (!dd->front_merges)
dd                449 block/mq-deadline.c 	__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
dd                466 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                470 block/mq-deadline.c 	spin_lock(&dd->lock);
dd                472 block/mq-deadline.c 	spin_unlock(&dd->lock);
dd                487 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                503 block/mq-deadline.c 			list_add(&rq->queuelist, &dd->dispatch);
dd                505 block/mq-deadline.c 			list_add_tail(&rq->queuelist, &dd->dispatch);
dd                507 block/mq-deadline.c 		deadline_add_rq_rb(dd, rq);
dd                518 block/mq-deadline.c 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
dd                519 block/mq-deadline.c 		list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
dd                527 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                529 block/mq-deadline.c 	spin_lock(&dd->lock);
dd                537 block/mq-deadline.c 	spin_unlock(&dd->lock);
dd                567 block/mq-deadline.c 		struct deadline_data *dd = q->elevator->elevator_data;
dd                570 block/mq-deadline.c 		spin_lock_irqsave(&dd->zone_lock, flags);
dd                572 block/mq-deadline.c 		if (!list_empty(&dd->fifo_list[WRITE]))
dd                574 block/mq-deadline.c 		spin_unlock_irqrestore(&dd->zone_lock, flags);
dd                580 block/mq-deadline.c 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
dd                582 block/mq-deadline.c 	return !list_empty_careful(&dd->dispatch) ||
dd                583 block/mq-deadline.c 		!list_empty_careful(&dd->fifo_list[0]) ||
dd                584 block/mq-deadline.c 		!list_empty_careful(&dd->fifo_list[1]);
dd                607 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;			\
dd                613 block/mq-deadline.c SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
dd                614 block/mq-deadline.c SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
dd                615 block/mq-deadline.c SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
dd                616 block/mq-deadline.c SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
dd                617 block/mq-deadline.c SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
dd                623 block/mq-deadline.c 	struct deadline_data *dd = e->elevator_data;			\
dd                636 block/mq-deadline.c STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
dd                637 block/mq-deadline.c STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
dd                638 block/mq-deadline.c STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
dd                639 block/mq-deadline.c STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
dd                640 block/mq-deadline.c STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
dd                659 block/mq-deadline.c 	__acquires(&dd->lock)						\
dd                662 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;		\
dd                664 block/mq-deadline.c 	spin_lock(&dd->lock);						\
dd                665 block/mq-deadline.c 	return seq_list_start(&dd->fifo_list[ddir], *pos);		\
dd                672 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;		\
dd                674 block/mq-deadline.c 	return seq_list_next(v, &dd->fifo_list[ddir], pos);		\
dd                678 block/mq-deadline.c 	__releases(&dd->lock)						\
dd                681 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;		\
dd                683 block/mq-deadline.c 	spin_unlock(&dd->lock);						\
dd                697 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;		\
dd                698 block/mq-deadline.c 	struct request *rq = dd->next_rq[ddir];				\
dd                711 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                713 block/mq-deadline.c 	seq_printf(m, "%u\n", dd->batching);
dd                720 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                722 block/mq-deadline.c 	seq_printf(m, "%u\n", dd->starved);
dd                727 block/mq-deadline.c 	__acquires(&dd->lock)
dd                730 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                732 block/mq-deadline.c 	spin_lock(&dd->lock);
dd                733 block/mq-deadline.c 	return seq_list_start(&dd->dispatch, *pos);
dd                739 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                741 block/mq-deadline.c 	return seq_list_next(v, &dd->dispatch, pos);
dd                745 block/mq-deadline.c 	__releases(&dd->lock)
dd                748 block/mq-deadline.c 	struct deadline_data *dd = q->elevator->elevator_data;
dd                750 block/mq-deadline.c 	spin_unlock(&dd->lock);
dd                 47 crypto/rmd128.c 	u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
dd                 53 crypto/rmd128.c 	dd = state[3];
dd                 62 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F1, K1, in[0],  11);
dd                 63 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F1, K1, in[1],  14);
dd                 64 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F1, K1, in[2],  15);
dd                 65 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F1, K1, in[3],  12);
dd                 66 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F1, K1, in[4],   5);
dd                 67 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F1, K1, in[5],   8);
dd                 68 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F1, K1, in[6],   7);
dd                 69 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F1, K1, in[7],   9);
dd                 70 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F1, K1, in[8],  11);
dd                 71 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F1, K1, in[9],  13);
dd                 72 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
dd                 73 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
dd                 74 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F1, K1, in[12],  6);
dd                 75 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F1, K1, in[13],  7);
dd                 76 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F1, K1, in[14],  9);
dd                 77 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F1, K1, in[15],  8);
dd                 80 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F2, K2, in[7],   7);
dd                 81 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F2, K2, in[4],   6);
dd                 82 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F2, K2, in[13],  8);
dd                 83 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F2, K2, in[1],  13);
dd                 84 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
dd                 85 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F2, K2, in[6],   9);
dd                 86 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F2, K2, in[15],  7);
dd                 87 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F2, K2, in[3],  15);
dd                 88 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F2, K2, in[12],  7);
dd                 89 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F2, K2, in[0],  12);
dd                 90 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F2, K2, in[9],  15);
dd                 91 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F2, K2, in[5],   9);
dd                 92 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F2, K2, in[2],  11);
dd                 93 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F2, K2, in[14],  7);
dd                 94 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
dd                 95 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F2, K2, in[8],  12);
dd                 98 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F3, K3, in[3],  11);
dd                 99 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
dd                100 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F3, K3, in[14],  6);
dd                101 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F3, K3, in[4],   7);
dd                102 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F3, K3, in[9],  14);
dd                103 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F3, K3, in[15],  9);
dd                104 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F3, K3, in[8],  13);
dd                105 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F3, K3, in[1],  15);
dd                106 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F3, K3, in[2],  14);
dd                107 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F3, K3, in[7],   8);
dd                108 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F3, K3, in[0],  13);
dd                109 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F3, K3, in[6],   6);
dd                110 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F3, K3, in[13],  5);
dd                111 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
dd                112 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F3, K3, in[5],   7);
dd                113 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F3, K3, in[12],  5);
dd                116 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F4, K4, in[1],  11);
dd                117 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F4, K4, in[9],  12);
dd                118 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
dd                119 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
dd                120 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F4, K4, in[0],  14);
dd                121 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F4, K4, in[8],  15);
dd                122 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F4, K4, in[12],  9);
dd                123 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F4, K4, in[4],   8);
dd                124 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F4, K4, in[13],  9);
dd                125 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F4, K4, in[3],  14);
dd                126 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F4, K4, in[7],   5);
dd                127 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F4, K4, in[15],  6);
dd                128 crypto/rmd128.c 	ROUND(aa, bb, cc, dd, F4, K4, in[14],  8);
dd                129 crypto/rmd128.c 	ROUND(dd, aa, bb, cc, F4, K4, in[5],   6);
dd                130 crypto/rmd128.c 	ROUND(cc, dd, aa, bb, F4, K4, in[6],   5);
dd                131 crypto/rmd128.c 	ROUND(bb, cc, dd, aa, F4, K4, in[2],  12);
dd                207 crypto/rmd128.c 	state[1] = state[2] + dd + aaa;
dd                 51 crypto/rmd160.c 	u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
dd                 57 crypto/rmd160.c 	dd = state[3];
dd                 68 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[0],  11);
dd                 69 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[1],  14);
dd                 70 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[2],  15);
dd                 71 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[3],  12);
dd                 72 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[4],   5);
dd                 73 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[5],   8);
dd                 74 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[6],   7);
dd                 75 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[7],   9);
dd                 76 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[8],  11);
dd                 77 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[9],  13);
dd                 78 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
dd                 79 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
dd                 80 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[12],  6);
dd                 81 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[13],  7);
dd                 82 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[14],  9);
dd                 83 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[15],  8);
dd                 86 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[7],   7);
dd                 87 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[4],   6);
dd                 88 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[13],  8);
dd                 89 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[1],  13);
dd                 90 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
dd                 91 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[6],   9);
dd                 92 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[15],  7);
dd                 93 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[3],  15);
dd                 94 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[12],  7);
dd                 95 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[0],  12);
dd                 96 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[9],  15);
dd                 97 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[5],   9);
dd                 98 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[2],  11);
dd                 99 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[14],  7);
dd                100 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
dd                101 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[8],  12);
dd                104 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[3],  11);
dd                105 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
dd                106 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[14],  6);
dd                107 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[4],   7);
dd                108 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[9],  14);
dd                109 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[15],  9);
dd                110 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[8],  13);
dd                111 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[1],  15);
dd                112 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[2],  14);
dd                113 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[7],   8);
dd                114 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[0],  13);
dd                115 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[6],   6);
dd                116 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[13],  5);
dd                117 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
dd                118 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[5],   7);
dd                119 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[12],  5);
dd                122 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[1],  11);
dd                123 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[9],  12);
dd                124 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
dd                125 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
dd                126 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[0],  14);
dd                127 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[8],  15);
dd                128 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[12],  9);
dd                129 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[4],   8);
dd                130 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[13],  9);
dd                131 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[3],  14);
dd                132 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[7],   5);
dd                133 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[15],  6);
dd                134 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[14],  8);
dd                135 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[5],   6);
dd                136 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[6],   5);
dd                137 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[2],  12);
dd                140 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[4],   9);
dd                141 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[0],  15);
dd                142 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[5],   5);
dd                143 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[9],  11);
dd                144 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[7],   6);
dd                145 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[12],  8);
dd                146 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[2],  13);
dd                147 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
dd                148 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[14],  5);
dd                149 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[1],  12);
dd                150 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[3],  13);
dd                151 crypto/rmd160.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[8],  14);
dd                152 crypto/rmd160.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
dd                153 crypto/rmd160.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[6],   8);
dd                154 crypto/rmd160.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[15],  5);
dd                155 crypto/rmd160.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[13],  6);
dd                249 crypto/rmd160.c 	state[1] = state[2] + dd + eee;
dd                 47 crypto/rmd256.c 	u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
dd                 53 crypto/rmd256.c 	dd = state[3];
dd                 62 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F1, K1, in[0],  11);
dd                 63 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F1, K1, in[1],  14);
dd                 64 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F1, K1, in[2],  15);
dd                 65 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F1, K1, in[3],  12);
dd                 66 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F1, K1, in[4],   5);
dd                 67 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F1, K1, in[5],   8);
dd                 68 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F1, K1, in[6],   7);
dd                 69 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F1, K1, in[7],   9);
dd                 70 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F1, K1, in[8],  11);
dd                 71 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F1, K1, in[9],  13);
dd                 72 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
dd                 73 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
dd                 74 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F1, K1, in[12],  6);
dd                 75 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F1, K1, in[13],  7);
dd                 76 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F1, K1, in[14],  9);
dd                 77 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F1, K1, in[15],  8);
dd                101 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F2, K2, in[7],   7);
dd                102 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F2, K2, in[4],   6);
dd                103 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F2, K2, in[13],  8);
dd                104 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F2, K2, in[1],  13);
dd                105 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
dd                106 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F2, K2, in[6],   9);
dd                107 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F2, K2, in[15],  7);
dd                108 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F2, K2, in[3],  15);
dd                109 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F2, K2, in[12],  7);
dd                110 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F2, K2, in[0],  12);
dd                111 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F2, K2, in[9],  15);
dd                112 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F2, K2, in[5],   9);
dd                113 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F2, K2, in[2],  11);
dd                114 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F2, K2, in[14],  7);
dd                115 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
dd                116 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F2, K2, in[8],  12);
dd                140 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F3, K3, in[3],  11);
dd                141 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
dd                142 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F3, K3, in[14],  6);
dd                143 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F3, K3, in[4],   7);
dd                144 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F3, K3, in[9],  14);
dd                145 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F3, K3, in[15],  9);
dd                146 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F3, K3, in[8],  13);
dd                147 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F3, K3, in[1],  15);
dd                148 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F3, K3, in[2],  14);
dd                149 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F3, K3, in[7],   8);
dd                150 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F3, K3, in[0],  13);
dd                151 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F3, K3, in[6],   6);
dd                152 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F3, K3, in[13],  5);
dd                153 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
dd                154 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F3, K3, in[5],   7);
dd                155 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F3, K3, in[12],  5);
dd                179 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F4, K4, in[1],  11);
dd                180 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F4, K4, in[9],  12);
dd                181 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
dd                182 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
dd                183 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F4, K4, in[0],  14);
dd                184 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F4, K4, in[8],  15);
dd                185 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F4, K4, in[12],  9);
dd                186 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F4, K4, in[4],   8);
dd                187 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F4, K4, in[13],  9);
dd                188 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F4, K4, in[3],  14);
dd                189 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F4, K4, in[7],   5);
dd                190 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F4, K4, in[15],  6);
dd                191 crypto/rmd256.c 	ROUND(aa, bb, cc, dd, F4, K4, in[14],  8);
dd                192 crypto/rmd256.c 	ROUND(dd, aa, bb, cc, F4, K4, in[5],   6);
dd                193 crypto/rmd256.c 	ROUND(cc, dd, aa, bb, F4, K4, in[6],   5);
dd                194 crypto/rmd256.c 	ROUND(bb, cc, dd, aa, F4, K4, in[2],  12);
dd                215 crypto/rmd256.c 	swap(dd, ddd);
dd                221 crypto/rmd256.c 	state[3] += dd;
dd                 51 crypto/rmd320.c 	u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
dd                 57 crypto/rmd320.c 	dd = state[3];
dd                 68 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[0],  11);
dd                 69 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[1],  14);
dd                 70 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[2],  15);
dd                 71 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[3],  12);
dd                 72 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[4],   5);
dd                 73 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[5],   8);
dd                 74 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[6],   7);
dd                 75 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[7],   9);
dd                 76 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[8],  11);
dd                 77 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[9],  13);
dd                 78 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
dd                 79 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
dd                 80 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F1, K1, in[12],  6);
dd                 81 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F1, K1, in[13],  7);
dd                 82 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F1, K1, in[14],  9);
dd                 83 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F1, K1, in[15],  8);
dd                107 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[7],   7);
dd                108 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[4],   6);
dd                109 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[13],  8);
dd                110 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[1],  13);
dd                111 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
dd                112 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[6],   9);
dd                113 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[15],  7);
dd                114 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[3],  15);
dd                115 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[12],  7);
dd                116 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[0],  12);
dd                117 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[9],  15);
dd                118 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F2, K2, in[5],   9);
dd                119 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F2, K2, in[2],  11);
dd                120 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F2, K2, in[14],  7);
dd                121 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
dd                122 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F2, K2, in[8],  12);
dd                146 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[3],  11);
dd                147 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
dd                148 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[14],  6);
dd                149 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[4],   7);
dd                150 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[9],  14);
dd                151 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[15],  9);
dd                152 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[8],  13);
dd                153 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[1],  15);
dd                154 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[2],  14);
dd                155 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[7],   8);
dd                156 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[0],  13);
dd                157 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F3, K3, in[6],   6);
dd                158 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F3, K3, in[13],  5);
dd                159 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
dd                160 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F3, K3, in[5],   7);
dd                161 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F3, K3, in[12],  5);
dd                185 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[1],  11);
dd                186 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[9],  12);
dd                187 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
dd                188 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
dd                189 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[0],  14);
dd                190 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[8],  15);
dd                191 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[12],  9);
dd                192 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[4],   8);
dd                193 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[13],  9);
dd                194 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[3],  14);
dd                195 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[7],   5);
dd                196 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F4, K4, in[15],  6);
dd                197 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F4, K4, in[14],  8);
dd                198 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F4, K4, in[5],   6);
dd                199 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F4, K4, in[6],   5);
dd                200 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F4, K4, in[2],  12);
dd                221 crypto/rmd320.c 	swap(dd, ddd);
dd                224 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[4],   9);
dd                225 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[0],  15);
dd                226 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[5],   5);
dd                227 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[9],  11);
dd                228 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[7],   6);
dd                229 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[12],  8);
dd                230 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[2],  13);
dd                231 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
dd                232 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[14],  5);
dd                233 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[1],  12);
dd                234 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[3],  13);
dd                235 crypto/rmd320.c 	ROUND(aa, bb, cc, dd, ee, F5, K5, in[8],  14);
dd                236 crypto/rmd320.c 	ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
dd                237 crypto/rmd320.c 	ROUND(dd, ee, aa, bb, cc, F5, K5, in[6],   8);
dd                238 crypto/rmd320.c 	ROUND(cc, dd, ee, aa, bb, F5, K5, in[15],  5);
dd                239 crypto/rmd320.c 	ROUND(bb, cc, dd, ee, aa, F5, K5, in[13],  6);
dd                266 crypto/rmd320.c 	state[3] += dd;
dd                428 drivers/acpi/acpi_tad.c 	struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
dd                430 drivers/acpi/acpi_tad.c 	return sprintf(buf, "0x%02X\n", dd->capabilities);
dd                560 drivers/acpi/acpi_tad.c 	struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
dd                566 drivers/acpi/acpi_tad.c 	if (dd->capabilities & ACPI_TAD_DC_WAKE)
dd                573 drivers/acpi/acpi_tad.c 	if (dd->capabilities & ACPI_TAD_DC_WAKE) {
dd                587 drivers/acpi/acpi_tad.c 	struct acpi_tad_driver_data *dd;
dd                612 drivers/acpi/acpi_tad.c 	dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
dd                613 drivers/acpi/acpi_tad.c 	if (!dd)
dd                616 drivers/acpi/acpi_tad.c 	dd->capabilities = caps;
dd                617 drivers/acpi/acpi_tad.c 	dev_set_drvdata(dev, dd);
dd                 76 drivers/acpi/dock.c 	struct dock_dependent_device *dd;
dd                 78 drivers/acpi/dock.c 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
dd                 79 drivers/acpi/dock.c 	if (!dd)
dd                 82 drivers/acpi/dock.c 	dd->adev = adev;
dd                 83 drivers/acpi/dock.c 	INIT_LIST_HEAD(&dd->list);
dd                 84 drivers/acpi/dock.c 	list_add_tail(&dd->list, &ds->dependent_devices);
dd                 89 drivers/acpi/dock.c static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
dd                 92 drivers/acpi/dock.c 	struct acpi_device *adev = dd->adev;
dd                154 drivers/acpi/dock.c 	struct dock_dependent_device *dd;
dd                156 drivers/acpi/dock.c 	list_for_each_entry(dd, &ds->dependent_devices, list)
dd                157 drivers/acpi/dock.c 		if (adev == dd->adev)
dd                158 drivers/acpi/dock.c 			return dd;
dd                228 drivers/acpi/dock.c 	struct dock_dependent_device *dd;
dd                235 drivers/acpi/dock.c 	list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
dd                236 drivers/acpi/dock.c 		dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
dd                238 drivers/acpi/dock.c 	list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
dd                239 drivers/acpi/dock.c 		acpi_bus_trim(dd->adev);
dd                254 drivers/acpi/dock.c 	struct dock_dependent_device *dd;
dd                257 drivers/acpi/dock.c 	list_for_each_entry(dd, &ds->dependent_devices, list)
dd                258 drivers/acpi/dock.c 		dock_hotplug_event(dd, event, DOCK_CALL_FIXUP);
dd                261 drivers/acpi/dock.c 	list_for_each_entry(dd, &ds->dependent_devices, list)
dd                262 drivers/acpi/dock.c 		dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
dd                270 drivers/acpi/dock.c 	list_for_each_entry(dd, &ds->dependent_devices, list) {
dd                271 drivers/acpi/dock.c 		struct acpi_device *adev = dd->adev;
dd                286 drivers/acpi/dock.c 	struct dock_dependent_device *dd;
dd                300 drivers/acpi/dock.c 	list_for_each_entry(dd, &ds->dependent_devices, list)
dd                301 drivers/acpi/dock.c 		dock_hotplug_event(dd, event, DOCK_CALL_UEVENT);
dd                591 drivers/acpi/dock.c 	struct platform_device *dd;
dd                600 drivers/acpi/dock.c 	dd = platform_device_register_full(&pdevinfo);
dd                601 drivers/acpi/dock.c 	if (IS_ERR(dd))
dd                604 drivers/acpi/dock.c 	dock_station = dd->dev.platform_data;
dd                607 drivers/acpi/dock.c 	dock_station->dock_device = dd;
dd                614 drivers/acpi/dock.c 	dev_set_uevent_suppress(&dd->dev, 0);
dd                623 drivers/acpi/dock.c 	ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
dd                640 drivers/acpi/dock.c 	sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
dd                643 drivers/acpi/dock.c 	platform_device_unregister(dd);
dd                307 drivers/block/aoe/aoedev.c 	struct aoedev *d, **dd;
dd                373 drivers/block/aoe/aoedev.c 	for (dd = &devlist, d = *dd; d; d = *dd) {
dd                378 drivers/block/aoe/aoedev.c 			*dd = d->next;
dd                381 drivers/block/aoe/aoedev.c 			dd = &d->next;
dd               1798 drivers/block/drbd/drbd_nl.c 	enum determine_dev_size dd;
dd               2089 drivers/block/drbd/drbd_nl.c 	dd = drbd_determine_dev_size(device, 0, NULL);
dd               2090 drivers/block/drbd/drbd_nl.c 	if (dd <= DS_ERROR) {
dd               2093 drivers/block/drbd/drbd_nl.c 	} else if (dd == DS_GREW)
dd               2844 drivers/block/drbd/drbd_nl.c 	enum determine_dev_size dd;
dd               2940 drivers/block/drbd/drbd_nl.c 	dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
dd               2943 drivers/block/drbd/drbd_nl.c 	if (dd == DS_ERROR) {
dd               2946 drivers/block/drbd/drbd_nl.c 	} else if (dd == DS_ERROR_SPACE_MD) {
dd               2949 drivers/block/drbd/drbd_nl.c 	} else if (dd == DS_ERROR_SHRINK) {
dd               2955 drivers/block/drbd/drbd_nl.c 		if (dd == DS_GREW)
dd               4109 drivers/block/drbd/drbd_receiver.c 	enum determine_dev_size dd = DS_UNCHANGED;
dd               4194 drivers/block/drbd/drbd_receiver.c 		dd = drbd_determine_dev_size(device, ddsf, NULL);
dd               4196 drivers/block/drbd/drbd_receiver.c 		if (dd == DS_ERROR)
dd               4261 drivers/block/drbd/drbd_receiver.c 		    (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
dd                114 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_initialize(struct driver_data *dd);
dd                142 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = pci_get_drvdata(pdev);
dd                144 drivers/block/mtip32xx/mtip32xx.c 	if (dd->sr)
dd                150 drivers/block/mtip32xx/mtip32xx.c 		dd->sr = true;
dd                151 drivers/block/mtip32xx/mtip32xx.c 		if (dd->queue)
dd                152 drivers/block/mtip32xx/mtip32xx.c 			blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
dd                154 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&dd->pdev->dev,
dd                162 drivers/block/mtip32xx/mtip32xx.c static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
dd                165 drivers/block/mtip32xx/mtip32xx.c 	struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
dd                179 drivers/block/mtip32xx/mtip32xx.c static int mtip_hba_reset(struct driver_data *dd)
dd                184 drivers/block/mtip32xx/mtip32xx.c 	writel(HOST_RESET, dd->mmio + HOST_CTL);
dd                187 drivers/block/mtip32xx/mtip32xx.c 	readl(dd->mmio + HOST_CTL);
dd                196 drivers/block/mtip32xx/mtip32xx.c 		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
dd                199 drivers/block/mtip32xx/mtip32xx.c 	} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
dd                202 drivers/block/mtip32xx/mtip32xx.c 	if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
dd                338 drivers/block/mtip32xx/mtip32xx.c 	if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
dd                354 drivers/block/mtip32xx/mtip32xx.c 	for (i = 0; i < port->dd->slot_groups; i++)
dd                361 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(port->dd->mmio + HOST_IRQ_STAT),
dd                362 drivers/block/mtip32xx/mtip32xx.c 					port->dd->mmio + HOST_IRQ_STAT);
dd                389 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
dd                397 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev,
dd                400 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_hba_reset(port->dd))
dd                401 drivers/block/mtip32xx/mtip32xx.c 			dev_err(&port->dd->pdev->dev,
dd                408 drivers/block/mtip32xx/mtip32xx.c 	dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
dd                420 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
dd                434 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
dd                438 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev,
dd                446 drivers/block/mtip32xx/mtip32xx.c static int mtip_device_reset(struct driver_data *dd)
dd                450 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_check_surprise_removal(dd->pdev))
dd                453 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_hba_reset(dd) < 0)
dd                457 drivers/block/mtip32xx/mtip32xx.c 	mtip_init_port(dd->port);
dd                458 drivers/block/mtip32xx/mtip32xx.c 	mtip_start_port(dd->port);
dd                461 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd                462 drivers/block/mtip32xx/mtip32xx.c 					dd->mmio + HOST_CTL);
dd                469 drivers/block/mtip32xx/mtip32xx.c static void print_tags(struct driver_data *dd,
dd                481 drivers/block/mtip32xx/mtip32xx.c 	dev_warn(&dd->pdev->dev,
dd                506 drivers/block/mtip32xx/mtip32xx.c static void mtip_handle_tfe(struct driver_data *dd)
dd                519 drivers/block/mtip32xx/mtip32xx.c 	dev_warn(&dd->pdev->dev, "Taskfile error\n");
dd                521 drivers/block/mtip32xx/mtip32xx.c 	port = dd->port;
dd                524 drivers/block/mtip32xx/mtip32xx.c 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dd                534 drivers/block/mtip32xx/mtip32xx.c 	for (group = 0; group < dd->slot_groups; group++) {
dd                537 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
dd                552 drivers/block/mtip32xx/mtip32xx.c 			cmd = mtip_cmd_from_tag(dd, tag);
dd                559 drivers/block/mtip32xx/mtip32xx.c 	print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
dd                566 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd                567 drivers/block/mtip32xx/mtip32xx.c 				dd->port->log_buf,
dd                568 drivers/block/mtip32xx/mtip32xx.c 				dd->port->log_buf_dma, 1);
dd                570 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd                574 drivers/block/mtip32xx/mtip32xx.c 		buf = (unsigned char *)dd->port->log_buf;
dd                576 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd                578 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
dd                583 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd                585 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
dd                590 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dd                591 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd                602 drivers/block/mtip32xx/mtip32xx.c 	for (group = 0; group < dd->slot_groups; group++) {
dd                606 drivers/block/mtip32xx/mtip32xx.c 			cmd = mtip_cmd_from_tag(dd, tag);
dd                618 drivers/block/mtip32xx/mtip32xx.c 					dev_warn(&dd->pdev->dev,
dd                645 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&port->dd->pdev->dev,
dd                651 drivers/block/mtip32xx/mtip32xx.c 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
dd                660 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = port->dd;
dd                680 drivers/block/mtip32xx/mtip32xx.c 			command = mtip_cmd_from_tag(dd, tag);
dd                687 drivers/block/mtip32xx/mtip32xx.c 	if (atomic_dec_return(&dd->irq_workers_active) == 0)
dd                688 drivers/block/mtip32xx/mtip32xx.c 		writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
dd                694 drivers/block/mtip32xx/mtip32xx.c static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
dd                696 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd                697 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dd                711 drivers/block/mtip32xx/mtip32xx.c static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
dd                714 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd                716 drivers/block/mtip32xx/mtip32xx.c 		writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
dd                720 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd                722 drivers/block/mtip32xx/mtip32xx.c 		writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
dd                726 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd                729 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_check_surprise_removal(dd->pdev))
dd                733 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
dd                734 drivers/block/mtip32xx/mtip32xx.c 		wake_up_interruptible(&dd->port->svc_wait);
dd                740 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = (struct driver_data *) data;
dd                741 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd                747 drivers/block/mtip32xx/mtip32xx.c 	hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
dd                754 drivers/block/mtip32xx/mtip32xx.c 			mtip_check_surprise_removal(dd->pdev);
dd                762 drivers/block/mtip32xx/mtip32xx.c 			WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
dd                767 drivers/block/mtip32xx/mtip32xx.c 				twork = &dd->work[i];
dd                773 drivers/block/mtip32xx/mtip32xx.c 			atomic_set(&dd->irq_workers_active, workers);
dd                776 drivers/block/mtip32xx/mtip32xx.c 					twork = &dd->work[i];
dd                780 drivers/block/mtip32xx/mtip32xx.c 							dd->isr_workq,
dd                784 drivers/block/mtip32xx/mtip32xx.c 				if (likely(dd->work[0].completed))
dd                786 drivers/block/mtip32xx/mtip32xx.c 							dd->work[0].completed);
dd                798 drivers/block/mtip32xx/mtip32xx.c 			if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
dd                803 drivers/block/mtip32xx/mtip32xx.c 							&dd->dd_flag))
dd                806 drivers/block/mtip32xx/mtip32xx.c 			mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
dd                810 drivers/block/mtip32xx/mtip32xx.c 			mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
dd                815 drivers/block/mtip32xx/mtip32xx.c 		writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
dd                832 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = instance;
dd                834 drivers/block/mtip32xx/mtip32xx.c 	return mtip_handle_irq(dd);
dd                863 drivers/block/mtip32xx/mtip32xx.c 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
dd                864 drivers/block/mtip32xx/mtip32xx.c 		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
dd                884 drivers/block/mtip32xx/mtip32xx.c 	for (n = 1; n < port->dd->slot_groups; n++)
dd                905 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_quiesce_queue(port->dd->queue);
dd                917 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_check_surprise_removal(port->dd->pdev))
dd                925 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(port->dd->queue);
dd                928 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(port->dd->queue);
dd                966 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = port->dd;
dd                978 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
dd                982 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_check_surprise_removal(dd->pdev))
dd                985 drivers/block/mtip32xx/mtip32xx.c 	rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
dd               1001 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
dd               1020 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
dd               1024 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_check_surprise_removal(dd->pdev) ||
dd               1026 drivers/block/mtip32xx/mtip32xx.c 					&dd->dd_flag)) {
dd               1027 drivers/block/mtip32xx/mtip32xx.c 			dev_err(&dd->pdev->dev,
dd               1033 drivers/block/mtip32xx/mtip32xx.c 		mtip_device_reset(dd); /* recover from timeout issue */
dd               1041 drivers/block/mtip32xx/mtip32xx.c 		if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
dd               1042 drivers/block/mtip32xx/mtip32xx.c 			mtip_device_reset(dd);
dd               1079 drivers/block/mtip32xx/mtip32xx.c static void mtip_set_timeout(struct driver_data *dd,
dd               1090 drivers/block/mtip32xx/mtip32xx.c 			*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
dd               1092 drivers/block/mtip32xx/mtip32xx.c 			*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
dd               1134 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
dd               1181 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
dd               1183 drivers/block/mtip32xx/mtip32xx.c 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
dd               1224 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(port->dd, &fis, &timeout, 0);
dd               1237 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev,
dd               1336 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
dd               1340 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
dd               1344 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
dd               1351 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
dd               1363 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&port->dd->pdev->dev,
dd               1381 drivers/block/mtip32xx/mtip32xx.c static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
dd               1383 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd               1412 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&port->dd->pdev->dev,
dd               1416 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&port->dd->pdev->dev,
dd               1420 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
dd               1422 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
dd               1426 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_hw_get_capacity(port->dd, &sectors))
dd               1427 drivers/block/mtip32xx/mtip32xx.c 		dev_info(&port->dd->pdev->dev,
dd               1432 drivers/block/mtip32xx/mtip32xx.c 	pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
dd               1444 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&port->dd->pdev->dev,
dd               1457 drivers/block/mtip32xx/mtip32xx.c static inline void fill_command_sg(struct driver_data *dd,
dd               1471 drivers/block/mtip32xx/mtip32xx.c 			dev_err(&dd->pdev->dev,
dd               1505 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(port->dd, &fis, &to, 0);
dd               1570 drivers/block/mtip32xx/mtip32xx.c 		buf = dma_alloc_coherent(&port->dd->pdev->dev,
dd               1575 drivers/block/mtip32xx/mtip32xx.c 			dev_err(&port->dd->pdev->dev,
dd               1595 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(port->dd, &fis, &to, 0);
dd               1647 drivers/block/mtip32xx/mtip32xx.c 		dma_free_coherent(&port->dd->pdev->dev,
dd               1703 drivers/block/mtip32xx/mtip32xx.c static int exec_drive_taskfile(struct driver_data *dd,
dd               1737 drivers/block/mtip32xx/mtip32xx.c 		outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
dd               1739 drivers/block/mtip32xx/mtip32xx.c 		if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
dd               1753 drivers/block/mtip32xx/mtip32xx.c 		inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
dd               1755 drivers/block/mtip32xx/mtip32xx.c 		if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
dd               1766 drivers/block/mtip32xx/mtip32xx.c 		reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
dd               1769 drivers/block/mtip32xx/mtip32xx.c 		reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
dd               1772 drivers/block/mtip32xx/mtip32xx.c 		reply = (dd->port->rxfis + RX_FIS_D2H_REG);
dd               1814 drivers/block/mtip32xx/mtip32xx.c 				dev_warn(&dd->pdev->dev,
dd               1842 drivers/block/mtip32xx/mtip32xx.c 	mtip_set_timeout(dd, &fis, &timeout, erasemode);
dd               1851 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_exec_internal_command(dd->port,
dd               1862 drivers/block/mtip32xx/mtip32xx.c 	task_file_data = readl(dd->port->mmio+PORT_TFDATA);
dd               1865 drivers/block/mtip32xx/mtip32xx.c 		reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
dd               1868 drivers/block/mtip32xx/mtip32xx.c 		reply = dd->port->rxfis + RX_FIS_D2H_REG;
dd               1874 drivers/block/mtip32xx/mtip32xx.c 		dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
dd               1877 drivers/block/mtip32xx/mtip32xx.c 		dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
dd               1925 drivers/block/mtip32xx/mtip32xx.c 		dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
dd               1928 drivers/block/mtip32xx/mtip32xx.c 		dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
dd               1953 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
dd               1959 drivers/block/mtip32xx/mtip32xx.c 		if (copy_to_user((void __user *)arg, dd->port->identify,
dd               1975 drivers/block/mtip32xx/mtip32xx.c 		if (exec_drive_command(dd->port,
dd               1999 drivers/block/mtip32xx/mtip32xx.c 		if (exec_drive_task(dd->port, drive_command))
dd               2020 drivers/block/mtip32xx/mtip32xx.c 		ret = exec_drive_taskfile(dd, (void __user *) arg,
dd               2056 drivers/block/mtip32xx/mtip32xx.c static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
dd               2061 drivers/block/mtip32xx/mtip32xx.c 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
dd               2063 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd               2071 drivers/block/mtip32xx/mtip32xx.c 	nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
dd               2105 drivers/block/mtip32xx/mtip32xx.c 	fill_command_sg(dd, command, nents);
dd               2112 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
dd               2147 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = dev_to_disk(dev)->private_data;
dd               2150 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
dd               2152 drivers/block/mtip32xx/mtip32xx.c 	else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
dd               2167 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd, *tmp;
dd               2174 drivers/block/mtip32xx/mtip32xx.c 	list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
dd               2175 drivers/block/mtip32xx/mtip32xx.c 		if (dd->pdev) {
dd               2176 drivers/block/mtip32xx/mtip32xx.c 			if (dd->port &&
dd               2177 drivers/block/mtip32xx/mtip32xx.c 			    dd->port->identify &&
dd               2178 drivers/block/mtip32xx/mtip32xx.c 			    dd->port->identify_valid) {
dd               2180 drivers/block/mtip32xx/mtip32xx.c 					(char *) (dd->port->identify + 10), 21);
dd               2181 drivers/block/mtip32xx/mtip32xx.c 				status = *(dd->port->identify + 141);
dd               2187 drivers/block/mtip32xx/mtip32xx.c 			if (dd->port &&
dd               2188 drivers/block/mtip32xx/mtip32xx.c 			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
dd               2191 drivers/block/mtip32xx/mtip32xx.c 					dev_name(&dd->pdev->dev),
dd               2197 drivers/block/mtip32xx/mtip32xx.c 					dev_name(&dd->pdev->dev),
dd               2204 drivers/block/mtip32xx/mtip32xx.c 	list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
dd               2205 drivers/block/mtip32xx/mtip32xx.c 		if (dd->pdev) {
dd               2206 drivers/block/mtip32xx/mtip32xx.c 			if (dd->port &&
dd               2207 drivers/block/mtip32xx/mtip32xx.c 			    dd->port->identify &&
dd               2208 drivers/block/mtip32xx/mtip32xx.c 			    dd->port->identify_valid) {
dd               2210 drivers/block/mtip32xx/mtip32xx.c 					(char *) (dd->port->identify+10), 21);
dd               2211 drivers/block/mtip32xx/mtip32xx.c 				status = *(dd->port->identify + 141);
dd               2217 drivers/block/mtip32xx/mtip32xx.c 			if (dd->port &&
dd               2218 drivers/block/mtip32xx/mtip32xx.c 			    test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
dd               2221 drivers/block/mtip32xx/mtip32xx.c 					dev_name(&dd->pdev->dev),
dd               2227 drivers/block/mtip32xx/mtip32xx.c 					dev_name(&dd->pdev->dev),
dd               2240 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd =  (struct driver_data *)f->private_data;
dd               2250 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               2269 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd =  (struct driver_data *)f->private_data;
dd               2280 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               2287 drivers/block/mtip32xx/mtip32xx.c 	for (n = dd->slot_groups-1; n >= 0; n--)
dd               2289 drivers/block/mtip32xx/mtip32xx.c 					 readl(dd->port->s_active[n]));
dd               2294 drivers/block/mtip32xx/mtip32xx.c 	for (n = dd->slot_groups-1; n >= 0; n--)
dd               2296 drivers/block/mtip32xx/mtip32xx.c 					readl(dd->port->cmd_issue[n]));
dd               2301 drivers/block/mtip32xx/mtip32xx.c 	for (n = dd->slot_groups-1; n >= 0; n--)
dd               2303 drivers/block/mtip32xx/mtip32xx.c 				readl(dd->port->completed[n]));
dd               2307 drivers/block/mtip32xx/mtip32xx.c 				readl(dd->port->mmio + PORT_IRQ_STAT));
dd               2309 drivers/block/mtip32xx/mtip32xx.c 				readl(dd->mmio + HOST_IRQ_STAT));
dd               2314 drivers/block/mtip32xx/mtip32xx.c 	for (n = dd->slot_groups-1; n >= 0; n--) {
dd               2317 drivers/block/mtip32xx/mtip32xx.c 				dd->port->cmds_to_issue[n/2] >> (32*(n&1));
dd               2319 drivers/block/mtip32xx/mtip32xx.c 			group_allocated = dd->port->cmds_to_issue[n];
dd               2336 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd =  (struct driver_data *)f->private_data;
dd               2346 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               2352 drivers/block/mtip32xx/mtip32xx.c 							dd->port->flags);
dd               2354 drivers/block/mtip32xx/mtip32xx.c 							dd->dd_flag);
dd               2396 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
dd               2398 drivers/block/mtip32xx/mtip32xx.c 	if (!kobj || !dd)
dd               2402 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2417 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
dd               2419 drivers/block/mtip32xx/mtip32xx.c 	if (!kobj || !dd)
dd               2427 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_debugfs_init(struct driver_data *dd)
dd               2432 drivers/block/mtip32xx/mtip32xx.c 	dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
dd               2433 drivers/block/mtip32xx/mtip32xx.c 	if (IS_ERR_OR_NULL(dd->dfs_node)) {
dd               2434 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2436 drivers/block/mtip32xx/mtip32xx.c 						dd->disk->disk_name);
dd               2437 drivers/block/mtip32xx/mtip32xx.c 		dd->dfs_node = NULL;
dd               2441 drivers/block/mtip32xx/mtip32xx.c 	debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
dd               2442 drivers/block/mtip32xx/mtip32xx.c 	debugfs_create_file("registers", 0444, dd->dfs_node, dd,
dd               2448 drivers/block/mtip32xx/mtip32xx.c static void mtip_hw_debugfs_exit(struct driver_data *dd)
dd               2450 drivers/block/mtip32xx/mtip32xx.c 	debugfs_remove_recursive(dd->dfs_node);
dd               2461 drivers/block/mtip32xx/mtip32xx.c static inline void hba_setup(struct driver_data *dd)
dd               2464 drivers/block/mtip32xx/mtip32xx.c 	hwdata = readl(dd->mmio + HOST_HSORG);
dd               2470 drivers/block/mtip32xx/mtip32xx.c 		dd->mmio + HOST_HSORG);
dd               2473 drivers/block/mtip32xx/mtip32xx.c static int mtip_device_unaligned_constrained(struct driver_data *dd)
dd               2475 drivers/block/mtip32xx/mtip32xx.c 	return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
dd               2488 drivers/block/mtip32xx/mtip32xx.c static void mtip_detect_product(struct driver_data *dd)
dd               2500 drivers/block/mtip32xx/mtip32xx.c 	hwdata = readl(dd->mmio + HOST_HSORG);
dd               2502 drivers/block/mtip32xx/mtip32xx.c 	dd->product_type = MTIP_PRODUCT_UNKNOWN;
dd               2503 drivers/block/mtip32xx/mtip32xx.c 	dd->slot_groups = 1;
dd               2506 drivers/block/mtip32xx/mtip32xx.c 		dd->product_type = MTIP_PRODUCT_ASICFPGA;
dd               2509 drivers/block/mtip32xx/mtip32xx.c 		dev_info(&dd->pdev->dev,
dd               2517 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&dd->pdev->dev,
dd               2522 drivers/block/mtip32xx/mtip32xx.c 		dd->slot_groups = slotgroups;
dd               2526 drivers/block/mtip32xx/mtip32xx.c 	dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
dd               2538 drivers/block/mtip32xx/mtip32xx.c static int mtip_ftl_rebuild_poll(struct driver_data *dd)
dd               2542 drivers/block/mtip32xx/mtip32xx.c 	dev_warn(&dd->pdev->dev,
dd               2550 drivers/block/mtip32xx/mtip32xx.c 				&dd->dd_flag)))
dd               2552 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_check_surprise_removal(dd->pdev))
dd               2555 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_get_identify(dd->port, NULL) < 0)
dd               2558 drivers/block/mtip32xx/mtip32xx.c 		if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
dd               2563 drivers/block/mtip32xx/mtip32xx.c 				dev_warn(&dd->pdev->dev,
dd               2569 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&dd->pdev->dev,
dd               2572 drivers/block/mtip32xx/mtip32xx.c 			mtip_block_initialize(dd);
dd               2578 drivers/block/mtip32xx/mtip32xx.c 	dev_err(&dd->pdev->dev,
dd               2587 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = rq->q->queuedata;
dd               2590 drivers/block/mtip32xx/mtip32xx.c 	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
dd               2594 drivers/block/mtip32xx/mtip32xx.c 		atomic_inc(&dd->port->cmd_slot_unal);
dd               2602 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = data;
dd               2606 drivers/block/mtip32xx/mtip32xx.c 	clear_bit(req->tag, dd->port->cmds_to_issue);
dd               2614 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = data;
dd               2616 drivers/block/mtip32xx/mtip32xx.c 	set_bit(req->tag, dd->port->cmds_to_issue);
dd               2632 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = (struct driver_data *)data;
dd               2634 drivers/block/mtip32xx/mtip32xx.c 	unsigned int num_cmd_slots = dd->slot_groups * 32;
dd               2635 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd               2655 drivers/block/mtip32xx/mtip32xx.c 				&dd->dd_flag)))
dd               2663 drivers/block/mtip32xx/mtip32xx.c 			mtip_handle_tfe(dd);
dd               2675 drivers/block/mtip32xx/mtip32xx.c 			} while (atomic_read(&dd->irq_workers_active) != 0 &&
dd               2678 drivers/block/mtip32xx/mtip32xx.c 			if (atomic_read(&dd->irq_workers_active) != 0)
dd               2679 drivers/block/mtip32xx/mtip32xx.c 				dev_warn(&dd->pdev->dev,
dd               2682 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_quiesce_queue(dd->queue);
dd               2684 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
dd               2686 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
dd               2688 drivers/block/mtip32xx/mtip32xx.c 			if (mtip_device_reset(dd))
dd               2689 drivers/block/mtip32xx/mtip32xx.c 				blk_mq_tagset_busy_iter(&dd->tags,
dd               2690 drivers/block/mtip32xx/mtip32xx.c 							mtip_abort_cmd, dd);
dd               2692 drivers/block/mtip32xx/mtip32xx.c 			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
dd               2694 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_unquiesce_queue(dd->queue);
dd               2729 drivers/block/mtip32xx/mtip32xx.c 			if (mtip_ftl_rebuild_poll(dd) == 0)
dd               2746 drivers/block/mtip32xx/mtip32xx.c static void mtip_dma_free(struct driver_data *dd)
dd               2748 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd               2751 drivers/block/mtip32xx/mtip32xx.c 		dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
dd               2755 drivers/block/mtip32xx/mtip32xx.c 		dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
dd               2768 drivers/block/mtip32xx/mtip32xx.c static int mtip_dma_alloc(struct driver_data *dd)
dd               2770 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_port *port = dd->port;
dd               2774 drivers/block/mtip32xx/mtip32xx.c 		dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
dd               2781 drivers/block/mtip32xx/mtip32xx.c 		dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
dd               2784 drivers/block/mtip32xx/mtip32xx.c 		dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
dd               2804 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_get_identify(struct driver_data *dd)
dd               2810 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_get_identify(dd->port, NULL) < 0)
dd               2813 drivers/block/mtip32xx/mtip32xx.c 	if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
dd               2815 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
dd               2818 drivers/block/mtip32xx/mtip32xx.c 	mtip_dump_identify(dd->port);
dd               2821 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd               2822 drivers/block/mtip32xx/mtip32xx.c 				dd->port->log_buf,
dd               2823 drivers/block/mtip32xx/mtip32xx.c 				dd->port->log_buf_dma, 1);
dd               2825 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2829 drivers/block/mtip32xx/mtip32xx.c 		buf = (unsigned char *)dd->port->log_buf;
dd               2831 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd               2833 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
dd               2836 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd               2838 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
dd               2841 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd               2843 drivers/block/mtip32xx/mtip32xx.c 			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dd               2849 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_get_smart_attr(dd->port, 242, &attr242))
dd               2850 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2853 drivers/block/mtip32xx/mtip32xx.c 		dev_info(&dd->pdev->dev,
dd               2868 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_init(struct driver_data *dd)
dd               2874 drivers/block/mtip32xx/mtip32xx.c 	dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
dd               2876 drivers/block/mtip32xx/mtip32xx.c 	mtip_detect_product(dd);
dd               2877 drivers/block/mtip32xx/mtip32xx.c 	if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
dd               2882 drivers/block/mtip32xx/mtip32xx.c 	hba_setup(dd);
dd               2884 drivers/block/mtip32xx/mtip32xx.c 	dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
dd               2885 drivers/block/mtip32xx/mtip32xx.c 				dd->numa_node);
dd               2886 drivers/block/mtip32xx/mtip32xx.c 	if (!dd->port) {
dd               2887 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               2894 drivers/block/mtip32xx/mtip32xx.c 		dd->work[i].port = dd->port;
dd               2897 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_device_unaligned_constrained(dd))
dd               2898 drivers/block/mtip32xx/mtip32xx.c 		dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
dd               2900 drivers/block/mtip32xx/mtip32xx.c 		dd->unal_qdepth = 0;
dd               2902 drivers/block/mtip32xx/mtip32xx.c 	atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
dd               2906 drivers/block/mtip32xx/mtip32xx.c 		spin_lock_init(&dd->port->cmd_issue_lock[i]);
dd               2909 drivers/block/mtip32xx/mtip32xx.c 	dd->port->mmio	= dd->mmio + PORT_OFFSET;
dd               2910 drivers/block/mtip32xx/mtip32xx.c 	dd->port->dd	= dd;
dd               2913 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_dma_alloc(dd);
dd               2918 drivers/block/mtip32xx/mtip32xx.c 	for (i = 0; i < dd->slot_groups; i++) {
dd               2919 drivers/block/mtip32xx/mtip32xx.c 		dd->port->s_active[i] =
dd               2920 drivers/block/mtip32xx/mtip32xx.c 			dd->port->mmio + i*0x80 + PORT_SCR_ACT;
dd               2921 drivers/block/mtip32xx/mtip32xx.c 		dd->port->cmd_issue[i] =
dd               2922 drivers/block/mtip32xx/mtip32xx.c 			dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
dd               2923 drivers/block/mtip32xx/mtip32xx.c 		dd->port->completed[i] =
dd               2924 drivers/block/mtip32xx/mtip32xx.c 			dd->port->mmio + i*0x80 + PORT_SDBV;
dd               2929 drivers/block/mtip32xx/mtip32xx.c 	while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
dd               2933 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
dd               2935 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2941 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
dd               2943 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               2951 drivers/block/mtip32xx/mtip32xx.c 	if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
dd               2952 drivers/block/mtip32xx/mtip32xx.c 		if (mtip_hba_reset(dd) < 0) {
dd               2953 drivers/block/mtip32xx/mtip32xx.c 			dev_err(&dd->pdev->dev,
dd               2960 drivers/block/mtip32xx/mtip32xx.c 		writel(readl(dd->mmio + HOST_IRQ_STAT),
dd               2961 drivers/block/mtip32xx/mtip32xx.c 			dd->mmio + HOST_IRQ_STAT);
dd               2964 drivers/block/mtip32xx/mtip32xx.c 	mtip_init_port(dd->port);
dd               2965 drivers/block/mtip32xx/mtip32xx.c 	mtip_start_port(dd->port);
dd               2968 drivers/block/mtip32xx/mtip32xx.c 	rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
dd               2969 drivers/block/mtip32xx/mtip32xx.c 			 dev_driver_string(&dd->pdev->dev), dd);
dd               2971 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               2972 drivers/block/mtip32xx/mtip32xx.c 			"Unable to allocate IRQ %d\n", dd->pdev->irq);
dd               2975 drivers/block/mtip32xx/mtip32xx.c 	irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
dd               2978 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd               2979 drivers/block/mtip32xx/mtip32xx.c 					dd->mmio + HOST_CTL);
dd               2981 drivers/block/mtip32xx/mtip32xx.c 	init_waitqueue_head(&dd->port->svc_wait);
dd               2983 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
dd               2992 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd               2993 drivers/block/mtip32xx/mtip32xx.c 			dd->mmio + HOST_CTL);
dd               2996 drivers/block/mtip32xx/mtip32xx.c 	irq_set_affinity_hint(dd->pdev->irq, NULL);
dd               2997 drivers/block/mtip32xx/mtip32xx.c 	free_irq(dd->pdev->irq, dd);
dd               3000 drivers/block/mtip32xx/mtip32xx.c 	mtip_deinit_port(dd->port);
dd               3001 drivers/block/mtip32xx/mtip32xx.c 	mtip_dma_free(dd);
dd               3005 drivers/block/mtip32xx/mtip32xx.c 	kfree(dd->port);
dd               3010 drivers/block/mtip32xx/mtip32xx.c static int mtip_standby_drive(struct driver_data *dd)
dd               3014 drivers/block/mtip32xx/mtip32xx.c 	if (dd->sr || !dd->port)
dd               3020 drivers/block/mtip32xx/mtip32xx.c 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
dd               3021 drivers/block/mtip32xx/mtip32xx.c 	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
dd               3022 drivers/block/mtip32xx/mtip32xx.c 	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
dd               3023 drivers/block/mtip32xx/mtip32xx.c 		rv = mtip_standby_immediate(dd->port);
dd               3025 drivers/block/mtip32xx/mtip32xx.c 			dev_warn(&dd->pdev->dev,
dd               3039 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_exit(struct driver_data *dd)
dd               3041 drivers/block/mtip32xx/mtip32xx.c 	if (!dd->sr) {
dd               3043 drivers/block/mtip32xx/mtip32xx.c 		mtip_deinit_port(dd->port);
dd               3046 drivers/block/mtip32xx/mtip32xx.c 		writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd               3047 drivers/block/mtip32xx/mtip32xx.c 				dd->mmio + HOST_CTL);
dd               3051 drivers/block/mtip32xx/mtip32xx.c 	irq_set_affinity_hint(dd->pdev->irq, NULL);
dd               3052 drivers/block/mtip32xx/mtip32xx.c 	free_irq(dd->pdev->irq, dd);
dd               3056 drivers/block/mtip32xx/mtip32xx.c 	mtip_dma_free(dd);
dd               3059 drivers/block/mtip32xx/mtip32xx.c 	kfree(dd->port);
dd               3060 drivers/block/mtip32xx/mtip32xx.c 	dd->port = NULL;
dd               3076 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_shutdown(struct driver_data *dd)
dd               3082 drivers/block/mtip32xx/mtip32xx.c 	mtip_standby_drive(dd);
dd               3099 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_suspend(struct driver_data *dd)
dd               3105 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_standby_drive(dd) != 0) {
dd               3106 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3112 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd               3113 drivers/block/mtip32xx/mtip32xx.c 			dd->mmio + HOST_CTL);
dd               3114 drivers/block/mtip32xx/mtip32xx.c 	mtip_deinit_port(dd->port);
dd               3131 drivers/block/mtip32xx/mtip32xx.c static int mtip_hw_resume(struct driver_data *dd)
dd               3134 drivers/block/mtip32xx/mtip32xx.c 	hba_setup(dd);
dd               3137 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_hba_reset(dd) != 0) {
dd               3138 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3147 drivers/block/mtip32xx/mtip32xx.c 	mtip_init_port(dd->port);
dd               3148 drivers/block/mtip32xx/mtip32xx.c 	mtip_start_port(dd->port);
dd               3151 drivers/block/mtip32xx/mtip32xx.c 	writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd               3152 drivers/block/mtip32xx/mtip32xx.c 			dd->mmio + HOST_CTL);
dd               3206 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = dev->bd_disk->private_data;
dd               3211 drivers/block/mtip32xx/mtip32xx.c 	if (!dd)
dd               3214 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
dd               3221 drivers/block/mtip32xx/mtip32xx.c 		return mtip_hw_ioctl(dd, cmd, arg);
dd               3244 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = dev->bd_disk->private_data;
dd               3249 drivers/block/mtip32xx/mtip32xx.c 	if (!dd)
dd               3252 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
dd               3281 drivers/block/mtip32xx/mtip32xx.c 		ret = exec_drive_taskfile(dd, (void __user *) arg,
dd               3298 drivers/block/mtip32xx/mtip32xx.c 		return mtip_hw_ioctl(dd, cmd, arg);
dd               3324 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = dev->bd_disk->private_data;
dd               3327 drivers/block/mtip32xx/mtip32xx.c 	if (!dd)
dd               3330 drivers/block/mtip32xx/mtip32xx.c 	if (!(mtip_hw_get_capacity(dd, &capacity))) {
dd               3331 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               3345 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd;
dd               3348 drivers/block/mtip32xx/mtip32xx.c 		dd = (struct driver_data *) dev->bd_disk->private_data;
dd               3350 drivers/block/mtip32xx/mtip32xx.c 		if (dd) {
dd               3352 drivers/block/mtip32xx/mtip32xx.c 							&dd->dd_flag)) {
dd               3382 drivers/block/mtip32xx/mtip32xx.c static inline bool is_se_active(struct driver_data *dd)
dd               3384 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
dd               3385 drivers/block/mtip32xx/mtip32xx.c 		if (dd->port->ic_pause_timer) {
dd               3386 drivers/block/mtip32xx/mtip32xx.c 			unsigned long to = dd->port->ic_pause_timer +
dd               3390 drivers/block/mtip32xx/mtip32xx.c 							&dd->port->flags);
dd               3391 drivers/block/mtip32xx/mtip32xx.c 				clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
dd               3392 drivers/block/mtip32xx/mtip32xx.c 				dd->port->ic_pause_timer = 0;
dd               3393 drivers/block/mtip32xx/mtip32xx.c 				wake_up_interruptible(&dd->port->svc_wait);
dd               3402 drivers/block/mtip32xx/mtip32xx.c static inline bool is_stopped(struct driver_data *dd, struct request *rq)
dd               3404 drivers/block/mtip32xx/mtip32xx.c 	if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
dd               3407 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
dd               3409 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
dd               3411 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
dd               3414 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
dd               3416 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
dd               3425 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
dd               3428 drivers/block/mtip32xx/mtip32xx.c 	if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
dd               3440 drivers/block/mtip32xx/mtip32xx.c 	if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
dd               3449 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
dd               3453 drivers/block/mtip32xx/mtip32xx.c 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
dd               3456 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_commands_active(dd->port))
dd               3460 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
dd               3479 drivers/block/mtip32xx/mtip32xx.c 	mtip_issue_non_ncq_command(dd->port, rq->tag);
dd               3486 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
dd               3496 drivers/block/mtip32xx/mtip32xx.c 	if (is_se_active(dd) || is_stopped(dd, rq))
dd               3501 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_submit_io(dd, rq, cmd, hctx);
dd               3508 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = set->driver_data;
dd               3514 drivers/block/mtip32xx/mtip32xx.c 	dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
dd               3521 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = set->driver_data;
dd               3524 drivers/block/mtip32xx/mtip32xx.c 	cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
dd               3536 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = req->q->queuedata;
dd               3546 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(req->tag, dd->port->cmds_to_issue))
dd               3549 drivers/block/mtip32xx/mtip32xx.c 	if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
dd               3552 drivers/block/mtip32xx/mtip32xx.c 	wake_up_interruptible(&dd->port->svc_wait);
dd               3576 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_initialize(struct driver_data *dd)
dd               3583 drivers/block/mtip32xx/mtip32xx.c 	if (dd->disk)
dd               3586 drivers/block/mtip32xx/mtip32xx.c 	if (mtip_hw_init(dd)) {
dd               3591 drivers/block/mtip32xx/mtip32xx.c 	dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
dd               3592 drivers/block/mtip32xx/mtip32xx.c 	if (dd->disk  == NULL) {
dd               3593 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3606 drivers/block/mtip32xx/mtip32xx.c 				dd->disk->disk_name,
dd               3611 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->major		= dd->major;
dd               3612 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->first_minor	= index * MTIP_MAX_MINORS;
dd               3613 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->minors 	= MTIP_MAX_MINORS;
dd               3614 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->fops		= &mtip_block_ops;
dd               3615 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->private_data	= dd;
dd               3616 drivers/block/mtip32xx/mtip32xx.c 	dd->index		= index;
dd               3618 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_debugfs_init(dd);
dd               3620 drivers/block/mtip32xx/mtip32xx.c 	memset(&dd->tags, 0, sizeof(dd->tags));
dd               3621 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.ops = &mtip_mq_ops;
dd               3622 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.nr_hw_queues = 1;
dd               3623 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
dd               3624 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.reserved_tags = 1;
dd               3625 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd               3626 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.numa_node = dd->numa_node;
dd               3627 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd               3628 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.driver_data = dd;
dd               3629 drivers/block/mtip32xx/mtip32xx.c 	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
dd               3631 drivers/block/mtip32xx/mtip32xx.c 	rv = blk_mq_alloc_tag_set(&dd->tags);
dd               3633 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3639 drivers/block/mtip32xx/mtip32xx.c 	dd->queue = blk_mq_init_queue(&dd->tags);
dd               3640 drivers/block/mtip32xx/mtip32xx.c 	if (IS_ERR(dd->queue)) {
dd               3641 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3647 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->queue		= dd->queue;
dd               3648 drivers/block/mtip32xx/mtip32xx.c 	dd->queue->queuedata	= dd;
dd               3652 drivers/block/mtip32xx/mtip32xx.c 	wait_for_rebuild = mtip_hw_get_identify(dd);
dd               3654 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev,
dd               3668 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
dd               3669 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
dd               3670 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
dd               3671 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_physical_block_size(dd->queue, 4096);
dd               3672 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_hw_sectors(dd->queue, 0xffff);
dd               3673 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_segment_size(dd->queue, 0x400000);
dd               3674 drivers/block/mtip32xx/mtip32xx.c 	dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
dd               3675 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_io_min(dd->queue, 4096);
dd               3678 drivers/block/mtip32xx/mtip32xx.c 	if (!(mtip_hw_get_capacity(dd, &capacity))) {
dd               3679 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               3684 drivers/block/mtip32xx/mtip32xx.c 	set_capacity(dd->disk, capacity);
dd               3687 drivers/block/mtip32xx/mtip32xx.c 	device_add_disk(&dd->pdev->dev, dd->disk, NULL);
dd               3689 drivers/block/mtip32xx/mtip32xx.c 	dd->bdev = bdget_disk(dd->disk, 0);
dd               3694 drivers/block/mtip32xx/mtip32xx.c 	kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
dd               3696 drivers/block/mtip32xx/mtip32xx.c 		mtip_hw_sysfs_init(dd, kobj);
dd               3700 drivers/block/mtip32xx/mtip32xx.c 	if (dd->mtip_svc_handler) {
dd               3701 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
dd               3706 drivers/block/mtip32xx/mtip32xx.c 	dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
dd               3707 drivers/block/mtip32xx/mtip32xx.c 						dd, dd->numa_node,
dd               3710 drivers/block/mtip32xx/mtip32xx.c 	if (IS_ERR(dd->mtip_svc_handler)) {
dd               3711 drivers/block/mtip32xx/mtip32xx.c 		dev_err(&dd->pdev->dev, "service thread failed to start\n");
dd               3712 drivers/block/mtip32xx/mtip32xx.c 		dd->mtip_svc_handler = NULL;
dd               3716 drivers/block/mtip32xx/mtip32xx.c 	wake_up_process(dd->mtip_svc_handler);
dd               3723 drivers/block/mtip32xx/mtip32xx.c 	bdput(dd->bdev);
dd               3724 drivers/block/mtip32xx/mtip32xx.c 	dd->bdev = NULL;
dd               3727 drivers/block/mtip32xx/mtip32xx.c 	del_gendisk(dd->disk);
dd               3731 drivers/block/mtip32xx/mtip32xx.c 	blk_cleanup_queue(dd->queue);
dd               3733 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_free_tag_set(&dd->tags);
dd               3735 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_debugfs_exit(dd);
dd               3740 drivers/block/mtip32xx/mtip32xx.c 	put_disk(dd->disk);
dd               3743 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_exit(dd); /* De-initialize the protocol layer. */
dd               3768 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_remove(struct driver_data *dd)
dd               3772 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_debugfs_exit(dd);
dd               3774 drivers/block/mtip32xx/mtip32xx.c 	if (dd->mtip_svc_handler) {
dd               3775 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
dd               3776 drivers/block/mtip32xx/mtip32xx.c 		wake_up_interruptible(&dd->port->svc_wait);
dd               3777 drivers/block/mtip32xx/mtip32xx.c 		kthread_stop(dd->mtip_svc_handler);
dd               3781 drivers/block/mtip32xx/mtip32xx.c 	if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
dd               3782 drivers/block/mtip32xx/mtip32xx.c 		kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
dd               3784 drivers/block/mtip32xx/mtip32xx.c 			mtip_hw_sysfs_exit(dd, kobj);
dd               3789 drivers/block/mtip32xx/mtip32xx.c 	if (!dd->sr) {
dd               3794 drivers/block/mtip32xx/mtip32xx.c 		if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
dd               3795 drivers/block/mtip32xx/mtip32xx.c 			mtip_standby_drive(dd);
dd               3798 drivers/block/mtip32xx/mtip32xx.c 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd               3799 drivers/block/mtip32xx/mtip32xx.c 						dd->disk->disk_name);
dd               3801 drivers/block/mtip32xx/mtip32xx.c 	blk_freeze_queue_start(dd->queue);
dd               3802 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_quiesce_queue(dd->queue);
dd               3803 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
dd               3804 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(dd->queue);
dd               3810 drivers/block/mtip32xx/mtip32xx.c 	if (dd->bdev) {
dd               3811 drivers/block/mtip32xx/mtip32xx.c 		bdput(dd->bdev);
dd               3812 drivers/block/mtip32xx/mtip32xx.c 		dd->bdev = NULL;
dd               3814 drivers/block/mtip32xx/mtip32xx.c 	if (dd->disk) {
dd               3815 drivers/block/mtip32xx/mtip32xx.c 		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
dd               3816 drivers/block/mtip32xx/mtip32xx.c 			del_gendisk(dd->disk);
dd               3817 drivers/block/mtip32xx/mtip32xx.c 		if (dd->disk->queue) {
dd               3818 drivers/block/mtip32xx/mtip32xx.c 			blk_cleanup_queue(dd->queue);
dd               3819 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_free_tag_set(&dd->tags);
dd               3820 drivers/block/mtip32xx/mtip32xx.c 			dd->queue = NULL;
dd               3822 drivers/block/mtip32xx/mtip32xx.c 		put_disk(dd->disk);
dd               3824 drivers/block/mtip32xx/mtip32xx.c 	dd->disk  = NULL;
dd               3826 drivers/block/mtip32xx/mtip32xx.c 	ida_free(&rssd_index_ida, dd->index);
dd               3829 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_exit(dd);
dd               3846 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_shutdown(struct driver_data *dd)
dd               3848 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_shutdown(dd);
dd               3851 drivers/block/mtip32xx/mtip32xx.c 	if (dd->disk) {
dd               3852 drivers/block/mtip32xx/mtip32xx.c 		dev_info(&dd->pdev->dev,
dd               3853 drivers/block/mtip32xx/mtip32xx.c 			"Shutting down %s ...\n", dd->disk->disk_name);
dd               3855 drivers/block/mtip32xx/mtip32xx.c 		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
dd               3856 drivers/block/mtip32xx/mtip32xx.c 			del_gendisk(dd->disk);
dd               3857 drivers/block/mtip32xx/mtip32xx.c 		if (dd->disk->queue) {
dd               3858 drivers/block/mtip32xx/mtip32xx.c 			blk_cleanup_queue(dd->queue);
dd               3859 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_free_tag_set(&dd->tags);
dd               3861 drivers/block/mtip32xx/mtip32xx.c 		put_disk(dd->disk);
dd               3862 drivers/block/mtip32xx/mtip32xx.c 		dd->disk  = NULL;
dd               3863 drivers/block/mtip32xx/mtip32xx.c 		dd->queue = NULL;
dd               3866 drivers/block/mtip32xx/mtip32xx.c 	ida_free(&rssd_index_ida, dd->index);
dd               3870 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_suspend(struct driver_data *dd)
dd               3872 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&dd->pdev->dev,
dd               3873 drivers/block/mtip32xx/mtip32xx.c 		"Suspending %s ...\n", dd->disk->disk_name);
dd               3874 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_suspend(dd);
dd               3878 drivers/block/mtip32xx/mtip32xx.c static int mtip_block_resume(struct driver_data *dd)
dd               3880 drivers/block/mtip32xx/mtip32xx.c 	dev_info(&dd->pdev->dev, "Resuming %s ...\n",
dd               3881 drivers/block/mtip32xx/mtip32xx.c 		dd->disk->disk_name);
dd               3882 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_resume(dd);
dd               3936 drivers/block/mtip32xx/mtip32xx.c static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
dd               3948 drivers/block/mtip32xx/mtip32xx.c 			dev_info(&dd->pdev->dev,
dd               3960 drivers/block/mtip32xx/mtip32xx.c static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
dd               3969 drivers/block/mtip32xx/mtip32xx.c 			mtip_disable_link_opts(dd, pdev->bus->self);
dd               3980 drivers/block/mtip32xx/mtip32xx.c 				mtip_disable_link_opts(dd,
dd               4000 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = NULL;
dd               4020 drivers/block/mtip32xx/mtip32xx.c 	dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
dd               4021 drivers/block/mtip32xx/mtip32xx.c 	if (dd == NULL) {
dd               4028 drivers/block/mtip32xx/mtip32xx.c 	pci_set_drvdata(pdev, dd);
dd               4050 drivers/block/mtip32xx/mtip32xx.c 	dd->major	= mtip_major;
dd               4051 drivers/block/mtip32xx/mtip32xx.c 	dd->instance	= instance;
dd               4052 drivers/block/mtip32xx/mtip32xx.c 	dd->pdev	= pdev;
dd               4053 drivers/block/mtip32xx/mtip32xx.c 	dd->numa_node	= my_node;
dd               4055 drivers/block/mtip32xx/mtip32xx.c 	INIT_LIST_HEAD(&dd->online_list);
dd               4056 drivers/block/mtip32xx/mtip32xx.c 	INIT_LIST_HEAD(&dd->remove_list);
dd               4058 drivers/block/mtip32xx/mtip32xx.c 	memset(dd->workq_name, 0, 32);
dd               4059 drivers/block/mtip32xx/mtip32xx.c 	snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
dd               4061 drivers/block/mtip32xx/mtip32xx.c 	dd->isr_workq = create_workqueue(dd->workq_name);
dd               4062 drivers/block/mtip32xx/mtip32xx.c 	if (!dd->isr_workq) {
dd               4063 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
dd               4070 drivers/block/mtip32xx/mtip32xx.c 	node_mask = cpumask_of_node(dd->numa_node);
dd               4079 drivers/block/mtip32xx/mtip32xx.c 			dd->numa_node,
dd               4081 drivers/block/mtip32xx/mtip32xx.c 			nr_cpus_node(dd->numa_node),
dd               4086 drivers/block/mtip32xx/mtip32xx.c 	dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
dd               4088 drivers/block/mtip32xx/mtip32xx.c 		cpu_to_node(dd->isr_binding), dd->isr_binding);
dd               4091 drivers/block/mtip32xx/mtip32xx.c 	dd->work[0].cpu_binding = dd->isr_binding;
dd               4092 drivers/block/mtip32xx/mtip32xx.c 	dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
dd               4093 drivers/block/mtip32xx/mtip32xx.c 	dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
dd               4094 drivers/block/mtip32xx/mtip32xx.c 	dd->work[3].cpu_binding = dd->work[0].cpu_binding;
dd               4095 drivers/block/mtip32xx/mtip32xx.c 	dd->work[4].cpu_binding = dd->work[1].cpu_binding;
dd               4096 drivers/block/mtip32xx/mtip32xx.c 	dd->work[5].cpu_binding = dd->work[2].cpu_binding;
dd               4097 drivers/block/mtip32xx/mtip32xx.c 	dd->work[6].cpu_binding = dd->work[2].cpu_binding;
dd               4098 drivers/block/mtip32xx/mtip32xx.c 	dd->work[7].cpu_binding = dd->work[1].cpu_binding;
dd               4104 drivers/block/mtip32xx/mtip32xx.c 			if (dd->work[i].cpu_binding == cpu) {
dd               4113 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
dd               4114 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
dd               4115 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
dd               4116 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
dd               4117 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
dd               4118 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
dd               4119 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
dd               4120 drivers/block/mtip32xx/mtip32xx.c 	INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
dd               4130 drivers/block/mtip32xx/mtip32xx.c 	mtip_fix_ero_nosnoop(dd, pdev);
dd               4133 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_block_initialize(dd);
dd               4146 drivers/block/mtip32xx/mtip32xx.c 		set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
dd               4152 drivers/block/mtip32xx/mtip32xx.c 	list_add(&dd->online_list, &online_list);
dd               4161 drivers/block/mtip32xx/mtip32xx.c 	if (dd->isr_workq) {
dd               4162 drivers/block/mtip32xx/mtip32xx.c 		flush_workqueue(dd->isr_workq);
dd               4163 drivers/block/mtip32xx/mtip32xx.c 		destroy_workqueue(dd->isr_workq);
dd               4164 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[0].cpu_binding);
dd               4165 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[1].cpu_binding);
dd               4166 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[2].cpu_binding);
dd               4172 drivers/block/mtip32xx/mtip32xx.c 	kfree(dd);
dd               4188 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = pci_get_drvdata(pdev);
dd               4191 drivers/block/mtip32xx/mtip32xx.c 	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
dd               4194 drivers/block/mtip32xx/mtip32xx.c 	list_del_init(&dd->online_list);
dd               4195 drivers/block/mtip32xx/mtip32xx.c 	list_add(&dd->remove_list, &removing_list);
dd               4199 drivers/block/mtip32xx/mtip32xx.c 	synchronize_irq(dd->pdev->irq);
dd               4205 drivers/block/mtip32xx/mtip32xx.c 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
dd               4208 drivers/block/mtip32xx/mtip32xx.c 	if (!dd->sr)
dd               4209 drivers/block/mtip32xx/mtip32xx.c 		fsync_bdev(dd->bdev);
dd               4211 drivers/block/mtip32xx/mtip32xx.c 	if (atomic_read(&dd->irq_workers_active) != 0) {
dd               4212 drivers/block/mtip32xx/mtip32xx.c 		dev_warn(&dd->pdev->dev,
dd               4216 drivers/block/mtip32xx/mtip32xx.c 	blk_set_queue_dying(dd->queue);
dd               4217 drivers/block/mtip32xx/mtip32xx.c 	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
dd               4220 drivers/block/mtip32xx/mtip32xx.c 	mtip_block_remove(dd);
dd               4222 drivers/block/mtip32xx/mtip32xx.c 	if (dd->isr_workq) {
dd               4223 drivers/block/mtip32xx/mtip32xx.c 		flush_workqueue(dd->isr_workq);
dd               4224 drivers/block/mtip32xx/mtip32xx.c 		destroy_workqueue(dd->isr_workq);
dd               4225 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[0].cpu_binding);
dd               4226 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[1].cpu_binding);
dd               4227 drivers/block/mtip32xx/mtip32xx.c 		drop_cpu(dd->work[2].cpu_binding);
dd               4233 drivers/block/mtip32xx/mtip32xx.c 	list_del_init(&dd->remove_list);
dd               4236 drivers/block/mtip32xx/mtip32xx.c 	kfree(dd);
dd               4252 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = pci_get_drvdata(pdev);
dd               4254 drivers/block/mtip32xx/mtip32xx.c 	if (!dd) {
dd               4260 drivers/block/mtip32xx/mtip32xx.c 	set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
dd               4263 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_block_suspend(dd);
dd               4293 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd;
dd               4295 drivers/block/mtip32xx/mtip32xx.c 	dd = pci_get_drvdata(pdev);
dd               4296 drivers/block/mtip32xx/mtip32xx.c 	if (!dd) {
dd               4321 drivers/block/mtip32xx/mtip32xx.c 	rv = mtip_block_resume(dd);
dd               4326 drivers/block/mtip32xx/mtip32xx.c 	clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
dd               4339 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = pci_get_drvdata(pdev);
dd               4340 drivers/block/mtip32xx/mtip32xx.c 	if (dd)
dd               4341 drivers/block/mtip32xx/mtip32xx.c 		mtip_block_shutdown(dd);
dd                325 drivers/block/mtip32xx/mtip32xx.h 	struct driver_data *dd;
dd                642 drivers/clk/berlin/bg2.c 		const struct berlin2_div_data *dd = &bg2_divs[n];
dd                645 drivers/clk/berlin/bg2.c 		for (k = 0; k < dd->num_parents; k++)
dd                646 drivers/clk/berlin/bg2.c 			parent_names[k] = clk_names[dd->parent_ids[k]];
dd                648 drivers/clk/berlin/bg2.c 		hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
dd                649 drivers/clk/berlin/bg2.c 				dd->name, dd->div_flags, parent_names,
dd                650 drivers/clk/berlin/bg2.c 				dd->num_parents, dd->flags, &lock);
dd                335 drivers/clk/berlin/bg2q.c 		const struct berlin2_div_data *dd = &bg2q_divs[n];
dd                338 drivers/clk/berlin/bg2q.c 		for (k = 0; k < dd->num_parents; k++)
dd                339 drivers/clk/berlin/bg2q.c 			parent_names[k] = clk_names[dd->parent_ids[k]];
dd                341 drivers/clk/berlin/bg2q.c 		hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase,
dd                342 drivers/clk/berlin/bg2q.c 				dd->name, dd->div_flags, parent_names,
dd                343 drivers/clk/berlin/bg2q.c 				dd->num_parents, dd->flags, &lock);
dd                 69 drivers/clk/ti/clkt_dpll.c 	struct dpll_data *dd;
dd                 73 drivers/clk/ti/clkt_dpll.c 	dd = clk->dpll_data;
dd                 78 drivers/clk/ti/clkt_dpll.c 	if (dd->flags & DPLL_J_TYPE) {
dd                 94 drivers/clk/ti/clkt_dpll.c 		dd->max_divider = n;
dd                 99 drivers/clk/ti/clkt_dpll.c 		dd->min_divider = n;
dd                207 drivers/clk/ti/clkt_dpll.c 	struct dpll_data *dd;
dd                209 drivers/clk/ti/clkt_dpll.c 	dd = clk->dpll_data;
dd                210 drivers/clk/ti/clkt_dpll.c 	if (!dd)
dd                213 drivers/clk/ti/clkt_dpll.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                214 drivers/clk/ti/clkt_dpll.c 	v &= dd->enable_mask;
dd                215 drivers/clk/ti/clkt_dpll.c 	v >>= __ffs(dd->enable_mask);
dd                242 drivers/clk/ti/clkt_dpll.c 	struct dpll_data *dd;
dd                244 drivers/clk/ti/clkt_dpll.c 	dd = clk->dpll_data;
dd                245 drivers/clk/ti/clkt_dpll.c 	if (!dd)
dd                249 drivers/clk/ti/clkt_dpll.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                250 drivers/clk/ti/clkt_dpll.c 	v &= dd->enable_mask;
dd                251 drivers/clk/ti/clkt_dpll.c 	v >>= __ffs(dd->enable_mask);
dd                254 drivers/clk/ti/clkt_dpll.c 		return clk_hw_get_rate(dd->clk_bypass);
dd                256 drivers/clk/ti/clkt_dpll.c 	v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                257 drivers/clk/ti/clkt_dpll.c 	dpll_mult = v & dd->mult_mask;
dd                258 drivers/clk/ti/clkt_dpll.c 	dpll_mult >>= __ffs(dd->mult_mask);
dd                259 drivers/clk/ti/clkt_dpll.c 	dpll_div = v & dd->div1_mask;
dd                260 drivers/clk/ti/clkt_dpll.c 	dpll_div >>= __ffs(dd->div1_mask);
dd                262 drivers/clk/ti/clkt_dpll.c 	dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult;
dd                290 drivers/clk/ti/clkt_dpll.c 	struct dpll_data *dd;
dd                299 drivers/clk/ti/clkt_dpll.c 	dd = clk->dpll_data;
dd                301 drivers/clk/ti/clkt_dpll.c 	if (dd->max_rate && target_rate > dd->max_rate)
dd                302 drivers/clk/ti/clkt_dpll.c 		target_rate = dd->max_rate;
dd                304 drivers/clk/ti/clkt_dpll.c 	ref_rate = clk_hw_get_rate(dd->clk_ref);
dd                310 drivers/clk/ti/clkt_dpll.c 	scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd                312 drivers/clk/ti/clkt_dpll.c 	dd->last_rounded_rate = 0;
dd                314 drivers/clk/ti/clkt_dpll.c 	for (n = dd->min_divider; n <= dd->max_divider; n++) {
dd                365 drivers/clk/ti/clkt_dpll.c 	dd->last_rounded_m = min_delta_m;
dd                366 drivers/clk/ti/clkt_dpll.c 	dd->last_rounded_n = min_delta_n;
dd                367 drivers/clk/ti/clkt_dpll.c 	dd->last_rounded_rate = target_rate - prev_min_delta;
dd                369 drivers/clk/ti/clkt_dpll.c 	return dd->last_rounded_rate;
dd                166 drivers/clk/ti/dpll.c 	struct dpll_data *dd = clk_hw->dpll_data;
dd                180 drivers/clk/ti/dpll.c 	dd->clk_ref = __clk_get_hw(clk);
dd                193 drivers/clk/ti/dpll.c 	dd->clk_bypass = __clk_get_hw(clk);
dd                292 drivers/clk/ti/dpll.c 	struct dpll_data *dd = NULL;
dd                295 drivers/clk/ti/dpll.c 	dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL);
dd                298 drivers/clk/ti/dpll.c 	if (!dd || !clk_hw || !init)
dd                301 drivers/clk/ti/dpll.c 	clk_hw->dpll_data = dd;
dd                322 drivers/clk/ti/dpll.c 	if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
dd                330 drivers/clk/ti/dpll.c 	if (!dd->idlest_mask) {
dd                331 drivers/clk/ti/dpll.c 		if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
dd                338 drivers/clk/ti/dpll.c 		if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
dd                341 drivers/clk/ti/dpll.c 		if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
dd                345 drivers/clk/ti/dpll.c 	if (dd->autoidle_mask) {
dd                346 drivers/clk/ti/dpll.c 		if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
dd                360 drivers/clk/ti/dpll.c 		dd->modes = dpll_mode;
dd                366 drivers/clk/ti/dpll.c 	kfree(dd);
dd                394 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                410 drivers/clk/ti/dpll.c 		of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
dd                412 drivers/clk/ti/dpll.c 		of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
dd                419 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                431 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
dd                438 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                451 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
dd                458 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                473 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
dd                481 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                493 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
dd                500 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                514 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
dd                521 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                533 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
dd                542 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                556 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
dd                563 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                577 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
dd                585 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                597 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
dd                604 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                617 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
dd                624 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                637 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
dd                645 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                657 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
dd                663 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                675 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
dd                682 drivers/clk/ti/dpll.c 	const struct dpll_data dd = {
dd                690 drivers/clk/ti/dpll.c 	of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
dd                 49 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                 52 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                 54 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                 55 drivers/clk/ti/dpll3xxx.c 	v &= ~dd->enable_mask;
dd                 56 drivers/clk/ti/dpll3xxx.c 	v |= clken_bits << __ffs(dd->enable_mask);
dd                 57 drivers/clk/ti/dpll3xxx.c 	ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
dd                 63 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                 68 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                 71 drivers/clk/ti/dpll3xxx.c 	state <<= __ffs(dd->idlest_mask);
dd                 73 drivers/clk/ti/dpll3xxx.c 	while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
dd                140 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                147 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                148 drivers/clk/ti/dpll3xxx.c 	state <<= __ffs(dd->idlest_mask);
dd                151 drivers/clk/ti/dpll3xxx.c 	if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
dd                304 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd = clk->dpll_data;
dd                317 drivers/clk/ti/dpll3xxx.c 		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                318 drivers/clk/ti/dpll3xxx.c 		v &= ~dd->freqsel_mask;
dd                319 drivers/clk/ti/dpll3xxx.c 		v |= freqsel << __ffs(dd->freqsel_mask);
dd                320 drivers/clk/ti/dpll3xxx.c 		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
dd                324 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                327 drivers/clk/ti/dpll3xxx.c 	if (dd->dcc_mask) {
dd                328 drivers/clk/ti/dpll3xxx.c 		if (dd->last_rounded_rate >= dd->dcc_rate)
dd                329 drivers/clk/ti/dpll3xxx.c 			v |= dd->dcc_mask; /* Enable DCC */
dd                331 drivers/clk/ti/dpll3xxx.c 			v &= ~dd->dcc_mask; /* Disable DCC */
dd                334 drivers/clk/ti/dpll3xxx.c 	v &= ~(dd->mult_mask | dd->div1_mask);
dd                335 drivers/clk/ti/dpll3xxx.c 	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
dd                336 drivers/clk/ti/dpll3xxx.c 	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
dd                339 drivers/clk/ti/dpll3xxx.c 	if (dd->dco_mask) {
dd                340 drivers/clk/ti/dpll3xxx.c 		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
dd                341 drivers/clk/ti/dpll3xxx.c 		v &= ~(dd->dco_mask);
dd                342 drivers/clk/ti/dpll3xxx.c 		v |= dco << __ffs(dd->dco_mask);
dd                344 drivers/clk/ti/dpll3xxx.c 	if (dd->sddiv_mask) {
dd                345 drivers/clk/ti/dpll3xxx.c 		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
dd                346 drivers/clk/ti/dpll3xxx.c 			      dd->last_rounded_n);
dd                347 drivers/clk/ti/dpll3xxx.c 		v &= ~(dd->sddiv_mask);
dd                348 drivers/clk/ti/dpll3xxx.c 		v |= sd_div << __ffs(dd->sddiv_mask);
dd                370 drivers/clk/ti/dpll3xxx.c 	ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
dd                373 drivers/clk/ti/dpll3xxx.c 	if (dd->m4xen_mask || dd->lpmode_mask) {
dd                374 drivers/clk/ti/dpll3xxx.c 		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                376 drivers/clk/ti/dpll3xxx.c 		if (dd->m4xen_mask) {
dd                377 drivers/clk/ti/dpll3xxx.c 			if (dd->last_rounded_m4xen)
dd                378 drivers/clk/ti/dpll3xxx.c 				v |= dd->m4xen_mask;
dd                380 drivers/clk/ti/dpll3xxx.c 				v &= ~dd->m4xen_mask;
dd                383 drivers/clk/ti/dpll3xxx.c 		if (dd->lpmode_mask) {
dd                384 drivers/clk/ti/dpll3xxx.c 			if (dd->last_rounded_lpmode)
dd                385 drivers/clk/ti/dpll3xxx.c 				v |= dd->lpmode_mask;
dd                387 drivers/clk/ti/dpll3xxx.c 				v &= ~dd->lpmode_mask;
dd                390 drivers/clk/ti/dpll3xxx.c 		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
dd                440 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd                443 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                444 drivers/clk/ti/dpll3xxx.c 	if (!dd)
dd                460 drivers/clk/ti/dpll3xxx.c 	if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
dd                461 drivers/clk/ti/dpll3xxx.c 		WARN_ON(parent != dd->clk_bypass);
dd                464 drivers/clk/ti/dpll3xxx.c 		WARN_ON(parent != dd->clk_ref);
dd                503 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd                508 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                509 drivers/clk/ti/dpll3xxx.c 	if (!dd)
dd                512 drivers/clk/ti/dpll3xxx.c 	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
dd                513 drivers/clk/ti/dpll3xxx.c 	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
dd                514 drivers/clk/ti/dpll3xxx.c 		req->best_parent_hw = dd->clk_bypass;
dd                518 drivers/clk/ti/dpll3xxx.c 		req->best_parent_hw = dd->clk_ref;
dd                565 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd                572 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                573 drivers/clk/ti/dpll3xxx.c 	if (!dd)
dd                576 drivers/clk/ti/dpll3xxx.c 	if (clk_hw_get_parent(hw) != dd->clk_ref)
dd                579 drivers/clk/ti/dpll3xxx.c 	if (dd->last_rounded_rate == 0)
dd                584 drivers/clk/ti/dpll3xxx.c 		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
dd                644 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                650 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                652 drivers/clk/ti/dpll3xxx.c 	if (!dd->autoidle_mask)
dd                655 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
dd                656 drivers/clk/ti/dpll3xxx.c 	v &= dd->autoidle_mask;
dd                657 drivers/clk/ti/dpll3xxx.c 	v >>= __ffs(dd->autoidle_mask);
dd                673 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                679 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                681 drivers/clk/ti/dpll3xxx.c 	if (!dd->autoidle_mask)
dd                689 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
dd                690 drivers/clk/ti/dpll3xxx.c 	v &= ~dd->autoidle_mask;
dd                691 drivers/clk/ti/dpll3xxx.c 	v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
dd                692 drivers/clk/ti/dpll3xxx.c 	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
dd                703 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                709 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                711 drivers/clk/ti/dpll3xxx.c 	if (!dd->autoidle_mask)
dd                714 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
dd                715 drivers/clk/ti/dpll3xxx.c 	v &= ~dd->autoidle_mask;
dd                716 drivers/clk/ti/dpll3xxx.c 	v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
dd                717 drivers/clk/ti/dpll3xxx.c 	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
dd                756 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                769 drivers/clk/ti/dpll3xxx.c 	dd = pclk->dpll_data;
dd                771 drivers/clk/ti/dpll3xxx.c 	WARN_ON(!dd->enable_mask);
dd                773 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
dd                774 drivers/clk/ti/dpll3xxx.c 	v >>= __ffs(dd->enable_mask);
dd                775 drivers/clk/ti/dpll3xxx.c 	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
dd                792 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd                795 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                797 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                798 drivers/clk/ti/dpll3xxx.c 	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
dd                801 drivers/clk/ti/dpll3xxx.c 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                802 drivers/clk/ti/dpll3xxx.c 		dd->last_rounded_m = (v & dd->mult_mask) >>
dd                803 drivers/clk/ti/dpll3xxx.c 						__ffs(dd->mult_mask);
dd                804 drivers/clk/ti/dpll3xxx.c 		dd->last_rounded_n = ((v & dd->div1_mask) >>
dd                805 drivers/clk/ti/dpll3xxx.c 						__ffs(dd->div1_mask)) + 1;
dd                821 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                824 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                830 drivers/clk/ti/dpll3xxx.c 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                831 drivers/clk/ti/dpll3xxx.c 		v &= ~(dd->mult_mask | dd->div1_mask);
dd                832 drivers/clk/ti/dpll3xxx.c 		v |= dd->last_rounded_m << __ffs(dd->mult_mask);
dd                833 drivers/clk/ti/dpll3xxx.c 		v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
dd                834 drivers/clk/ti/dpll3xxx.c 		ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
dd                853 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd                856 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                858 drivers/clk/ti/dpll3xxx.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                859 drivers/clk/ti/dpll3xxx.c 	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
dd                862 drivers/clk/ti/dpll3xxx.c 		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                863 drivers/clk/ti/dpll3xxx.c 		dd->last_rounded_m = (v & dd->mult_mask) >>
dd                864 drivers/clk/ti/dpll3xxx.c 						__ffs(dd->mult_mask);
dd                865 drivers/clk/ti/dpll3xxx.c 		dd->last_rounded_n = ((v & dd->div1_mask) >>
dd                866 drivers/clk/ti/dpll3xxx.c 						__ffs(dd->div1_mask)) + 1;
dd                882 drivers/clk/ti/dpll3xxx.c 	const struct dpll_data *dd;
dd                885 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd                887 drivers/clk/ti/dpll3xxx.c 	ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                888 drivers/clk/ti/dpll3xxx.c 	mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
dd                890 drivers/clk/ti/dpll3xxx.c 	if (clk->context == ((ctrl & dd->enable_mask) >>
dd                891 drivers/clk/ti/dpll3xxx.c 			     __ffs(dd->enable_mask)) &&
dd                892 drivers/clk/ti/dpll3xxx.c 	    dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
dd                893 drivers/clk/ti/dpll3xxx.c 				   __ffs(dd->mult_mask)) &&
dd                894 drivers/clk/ti/dpll3xxx.c 	    dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
dd                895 drivers/clk/ti/dpll3xxx.c 				   __ffs(dd->div1_mask)) + 1) {
dd                987 drivers/clk/ti/dpll3xxx.c 	struct dpll_data *dd;
dd               1001 drivers/clk/ti/dpll3xxx.c 	dd = clk->dpll_data;
dd               1002 drivers/clk/ti/dpll3xxx.c 	dd->last_rounded_m = d->m;
dd               1003 drivers/clk/ti/dpll3xxx.c 	dd->last_rounded_n = d->n;
dd               1004 drivers/clk/ti/dpll3xxx.c 	dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
dd                 90 drivers/clk/ti/dpll44xx.c static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
dd                 94 drivers/clk/ti/dpll44xx.c 	fint = clk_hw_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
dd                 95 drivers/clk/ti/dpll44xx.c 	fout = fint * dd->last_rounded_m;
dd                 98 drivers/clk/ti/dpll44xx.c 		dd->last_rounded_lpmode = 1;
dd                100 drivers/clk/ti/dpll44xx.c 		dd->last_rounded_lpmode = 0;
dd                118 drivers/clk/ti/dpll44xx.c 	struct dpll_data *dd;
dd                123 drivers/clk/ti/dpll44xx.c 	dd = clk->dpll_data;
dd                128 drivers/clk/ti/dpll44xx.c 	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
dd                152 drivers/clk/ti/dpll44xx.c 	struct dpll_data *dd;
dd                158 drivers/clk/ti/dpll44xx.c 	dd = clk->dpll_data;
dd                160 drivers/clk/ti/dpll44xx.c 	dd->last_rounded_m4xen = 0;
dd                180 drivers/clk/ti/dpll44xx.c 	dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
dd                181 drivers/clk/ti/dpll44xx.c 	dd->last_rounded_m4xen = 1;
dd                184 drivers/clk/ti/dpll44xx.c 	omap4_dpll_lpmode_recalc(dd);
dd                186 drivers/clk/ti/dpll44xx.c 	return dd->last_rounded_rate;
dd                203 drivers/clk/ti/dpll44xx.c 	struct dpll_data *dd;
dd                208 drivers/clk/ti/dpll44xx.c 	dd = clk->dpll_data;
dd                209 drivers/clk/ti/dpll44xx.c 	if (!dd)
dd                212 drivers/clk/ti/dpll44xx.c 	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
dd                213 drivers/clk/ti/dpll44xx.c 	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
dd                214 drivers/clk/ti/dpll44xx.c 		req->best_parent_hw = dd->clk_bypass;
dd                218 drivers/clk/ti/dpll44xx.c 		req->best_parent_hw = dd->clk_ref;
dd                104 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev	*dd;
dd                344 drivers/crypto/atmel-aes.c static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
dd                346 drivers/crypto/atmel-aes.c 	u32 value = readl_relaxed(dd->io_base + offset);
dd                349 drivers/crypto/atmel-aes.c 	if (dd->flags & AES_FLAGS_DUMP_REG) {
dd                352 drivers/crypto/atmel-aes.c 		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
dd                360 drivers/crypto/atmel-aes.c static inline void atmel_aes_write(struct atmel_aes_dev *dd,
dd                364 drivers/crypto/atmel-aes.c 	if (dd->flags & AES_FLAGS_DUMP_REG) {
dd                367 drivers/crypto/atmel-aes.c 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
dd                372 drivers/crypto/atmel-aes.c 	writel_relaxed(value, dd->io_base + offset);
dd                375 drivers/crypto/atmel-aes.c static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
dd                379 drivers/crypto/atmel-aes.c 		*value = atmel_aes_read(dd, offset);
dd                382 drivers/crypto/atmel-aes.c static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
dd                386 drivers/crypto/atmel-aes.c 		atmel_aes_write(dd, offset, *value);
dd                389 drivers/crypto/atmel-aes.c static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
dd                392 drivers/crypto/atmel-aes.c 	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
dd                395 drivers/crypto/atmel-aes.c static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
dd                398 drivers/crypto/atmel-aes.c 	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
dd                401 drivers/crypto/atmel-aes.c static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
dd                404 drivers/crypto/atmel-aes.c 	u32 isr = atmel_aes_read(dd, AES_ISR);
dd                407 drivers/crypto/atmel-aes.c 		return resume(dd);
dd                409 drivers/crypto/atmel-aes.c 	dd->resume = resume;
dd                410 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
dd                426 drivers/crypto/atmel-aes.c 	if (!ctx->dd) {
dd                431 drivers/crypto/atmel-aes.c 		ctx->dd = aes_dd;
dd                433 drivers/crypto/atmel-aes.c 		aes_dd = ctx->dd;
dd                441 drivers/crypto/atmel-aes.c static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
dd                445 drivers/crypto/atmel-aes.c 	err = clk_enable(dd->iclk);
dd                449 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
dd                450 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
dd                455 drivers/crypto/atmel-aes.c static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
dd                457 drivers/crypto/atmel-aes.c 	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
dd                460 drivers/crypto/atmel-aes.c static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
dd                464 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd                468 drivers/crypto/atmel-aes.c 	dd->hw_version = atmel_aes_get_version(dd);
dd                470 drivers/crypto/atmel-aes.c 	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
dd                472 drivers/crypto/atmel-aes.c 	clk_disable(dd->iclk);
dd                476 drivers/crypto/atmel-aes.c static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
dd                480 drivers/crypto/atmel-aes.c 	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
dd                483 drivers/crypto/atmel-aes.c static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
dd                485 drivers/crypto/atmel-aes.c 	return (dd->flags & AES_FLAGS_ENCRYPT);
dd                489 drivers/crypto/atmel-aes.c static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
dd                492 drivers/crypto/atmel-aes.c static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
dd                494 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd                515 drivers/crypto/atmel-aes.c static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
dd                518 drivers/crypto/atmel-aes.c 	if (dd->ctx->is_aead)
dd                519 drivers/crypto/atmel-aes.c 		atmel_aes_authenc_complete(dd, err);
dd                522 drivers/crypto/atmel-aes.c 	clk_disable(dd->iclk);
dd                523 drivers/crypto/atmel-aes.c 	dd->flags &= ~AES_FLAGS_BUSY;
dd                525 drivers/crypto/atmel-aes.c 	if (!dd->ctx->is_aead)
dd                526 drivers/crypto/atmel-aes.c 		atmel_aes_set_iv_as_last_ciphertext_block(dd);
dd                528 drivers/crypto/atmel-aes.c 	if (dd->is_async)
dd                529 drivers/crypto/atmel-aes.c 		dd->areq->complete(dd->areq, err);
dd                531 drivers/crypto/atmel-aes.c 	tasklet_schedule(&dd->queue_task);
dd                536 drivers/crypto/atmel-aes.c static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
dd                549 drivers/crypto/atmel-aes.c 	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
dd                553 drivers/crypto/atmel-aes.c 		if (dd->caps.has_dualbuff)
dd                559 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_MR, valmr);
dd                561 drivers/crypto/atmel-aes.c 	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
dd                564 drivers/crypto/atmel-aes.c 		atmel_aes_write_block(dd, AES_IVR(0), iv);
dd                567 drivers/crypto/atmel-aes.c static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
dd                571 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl_key(dd, use_dma, iv,
dd                572 drivers/crypto/atmel-aes.c 				 dd->ctx->key, dd->ctx->keylen);
dd                577 drivers/crypto/atmel-aes.c static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
dd                583 drivers/crypto/atmel-aes.c 		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
dd                584 drivers/crypto/atmel-aes.c 		dd->data += 4;
dd                585 drivers/crypto/atmel-aes.c 		dd->datalen -= AES_BLOCK_SIZE;
dd                587 drivers/crypto/atmel-aes.c 		if (dd->datalen < AES_BLOCK_SIZE)
dd                590 drivers/crypto/atmel-aes.c 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
dd                592 drivers/crypto/atmel-aes.c 		isr = atmel_aes_read(dd, AES_ISR);
dd                594 drivers/crypto/atmel-aes.c 			dd->resume = atmel_aes_cpu_transfer;
dd                595 drivers/crypto/atmel-aes.c 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
dd                600 drivers/crypto/atmel-aes.c 	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
dd                601 drivers/crypto/atmel-aes.c 				 dd->buf, dd->total))
dd                605 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd                607 drivers/crypto/atmel-aes.c 	return dd->cpu_transfer_complete(dd);
dd                610 drivers/crypto/atmel-aes.c static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
dd                621 drivers/crypto/atmel-aes.c 	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
dd                623 drivers/crypto/atmel-aes.c 	dd->total = len;
dd                624 drivers/crypto/atmel-aes.c 	dd->real_dst = dst;
dd                625 drivers/crypto/atmel-aes.c 	dd->cpu_transfer_complete = resume;
dd                626 drivers/crypto/atmel-aes.c 	dd->datalen = len + padlen;
dd                627 drivers/crypto/atmel-aes.c 	dd->data = (u32 *)dd->buf;
dd                628 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
dd                629 drivers/crypto/atmel-aes.c 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
dd                637 drivers/crypto/atmel-aes.c static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
dd                644 drivers/crypto/atmel-aes.c 	if (!IS_ALIGNED(len, dd->ctx->block_size))
dd                652 drivers/crypto/atmel-aes.c 			if (!IS_ALIGNED(len, dd->ctx->block_size))
dd                661 drivers/crypto/atmel-aes.c 		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
dd                687 drivers/crypto/atmel-aes.c static int atmel_aes_map(struct atmel_aes_dev *dd,
dd                695 drivers/crypto/atmel-aes.c 	dd->total = len;
dd                696 drivers/crypto/atmel-aes.c 	dd->src.sg = src;
dd                697 drivers/crypto/atmel-aes.c 	dd->dst.sg = dst;
dd                698 drivers/crypto/atmel-aes.c 	dd->real_dst = dst;
dd                700 drivers/crypto/atmel-aes.c 	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
dd                704 drivers/crypto/atmel-aes.c 		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
dd                706 drivers/crypto/atmel-aes.c 		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
dd                708 drivers/crypto/atmel-aes.c 		if (dd->buflen < len + padlen)
dd                712 drivers/crypto/atmel-aes.c 			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
dd                713 drivers/crypto/atmel-aes.c 			dd->src.sg = &dd->aligned_sg;
dd                714 drivers/crypto/atmel-aes.c 			dd->src.nents = 1;
dd                715 drivers/crypto/atmel-aes.c 			dd->src.remainder = 0;
dd                719 drivers/crypto/atmel-aes.c 			dd->dst.sg = &dd->aligned_sg;
dd                720 drivers/crypto/atmel-aes.c 			dd->dst.nents = 1;
dd                721 drivers/crypto/atmel-aes.c 			dd->dst.remainder = 0;
dd                724 drivers/crypto/atmel-aes.c 		sg_init_table(&dd->aligned_sg, 1);
dd                725 drivers/crypto/atmel-aes.c 		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
dd                728 drivers/crypto/atmel-aes.c 	if (dd->src.sg == dd->dst.sg) {
dd                729 drivers/crypto/atmel-aes.c 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
dd                731 drivers/crypto/atmel-aes.c 		dd->dst.sg_len = dd->src.sg_len;
dd                732 drivers/crypto/atmel-aes.c 		if (!dd->src.sg_len)
dd                735 drivers/crypto/atmel-aes.c 		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
dd                737 drivers/crypto/atmel-aes.c 		if (!dd->src.sg_len)
dd                740 drivers/crypto/atmel-aes.c 		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
dd                742 drivers/crypto/atmel-aes.c 		if (!dd->dst.sg_len) {
dd                743 drivers/crypto/atmel-aes.c 			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
dd                752 drivers/crypto/atmel-aes.c static void atmel_aes_unmap(struct atmel_aes_dev *dd)
dd                754 drivers/crypto/atmel-aes.c 	if (dd->src.sg == dd->dst.sg) {
dd                755 drivers/crypto/atmel-aes.c 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
dd                758 drivers/crypto/atmel-aes.c 		if (dd->src.sg != &dd->aligned_sg)
dd                759 drivers/crypto/atmel-aes.c 			atmel_aes_restore_sg(&dd->src);
dd                761 drivers/crypto/atmel-aes.c 		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
dd                764 drivers/crypto/atmel-aes.c 		if (dd->dst.sg != &dd->aligned_sg)
dd                765 drivers/crypto/atmel-aes.c 			atmel_aes_restore_sg(&dd->dst);
dd                767 drivers/crypto/atmel-aes.c 		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
dd                770 drivers/crypto/atmel-aes.c 		if (dd->src.sg != &dd->aligned_sg)
dd                771 drivers/crypto/atmel-aes.c 			atmel_aes_restore_sg(&dd->src);
dd                774 drivers/crypto/atmel-aes.c 	if (dd->dst.sg == &dd->aligned_sg)
dd                775 drivers/crypto/atmel-aes.c 		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
dd                776 drivers/crypto/atmel-aes.c 				    dd->buf, dd->total);
dd                779 drivers/crypto/atmel-aes.c static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
dd                799 drivers/crypto/atmel-aes.c 		dma = &dd->src;
dd                801 drivers/crypto/atmel-aes.c 		config.dst_addr = dd->phys_base + AES_IDATAR(0);
dd                805 drivers/crypto/atmel-aes.c 		dma = &dd->dst;
dd                807 drivers/crypto/atmel-aes.c 		config.src_addr = dd->phys_base + AES_ODATAR(0);
dd                824 drivers/crypto/atmel-aes.c 	desc->callback_param = dd;
dd                831 drivers/crypto/atmel-aes.c static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
dd                838 drivers/crypto/atmel-aes.c 		dma = &dd->src;
dd                842 drivers/crypto/atmel-aes.c 		dma = &dd->dst;
dd                852 drivers/crypto/atmel-aes.c static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
dd                862 drivers/crypto/atmel-aes.c 	switch (dd->ctx->block_size) {
dd                881 drivers/crypto/atmel-aes.c 		maxburst = dd->caps.max_burst_size;
dd                889 drivers/crypto/atmel-aes.c 	err = atmel_aes_map(dd, src, dst, len);
dd                893 drivers/crypto/atmel-aes.c 	dd->resume = resume;
dd                896 drivers/crypto/atmel-aes.c 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
dd                902 drivers/crypto/atmel-aes.c 	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
dd                910 drivers/crypto/atmel-aes.c 	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
dd                912 drivers/crypto/atmel-aes.c 	atmel_aes_unmap(dd);
dd                914 drivers/crypto/atmel-aes.c 	return atmel_aes_complete(dd, err);
dd                917 drivers/crypto/atmel-aes.c static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
dd                919 drivers/crypto/atmel-aes.c 	atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
dd                920 drivers/crypto/atmel-aes.c 	atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
dd                921 drivers/crypto/atmel-aes.c 	atmel_aes_unmap(dd);
dd                926 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd = data;
dd                928 drivers/crypto/atmel-aes.c 	atmel_aes_dma_stop(dd);
dd                929 drivers/crypto/atmel-aes.c 	dd->is_async = true;
dd                930 drivers/crypto/atmel-aes.c 	(void)dd->resume(dd);
dd                933 drivers/crypto/atmel-aes.c static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
dd                942 drivers/crypto/atmel-aes.c 	spin_lock_irqsave(&dd->lock, flags);
dd                944 drivers/crypto/atmel-aes.c 		ret = crypto_enqueue_request(&dd->queue, new_areq);
dd                945 drivers/crypto/atmel-aes.c 	if (dd->flags & AES_FLAGS_BUSY) {
dd                946 drivers/crypto/atmel-aes.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd                949 drivers/crypto/atmel-aes.c 	backlog = crypto_get_backlog(&dd->queue);
dd                950 drivers/crypto/atmel-aes.c 	areq = crypto_dequeue_request(&dd->queue);
dd                952 drivers/crypto/atmel-aes.c 		dd->flags |= AES_FLAGS_BUSY;
dd                953 drivers/crypto/atmel-aes.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd                963 drivers/crypto/atmel-aes.c 	dd->areq = areq;
dd                964 drivers/crypto/atmel-aes.c 	dd->ctx = ctx;
dd                966 drivers/crypto/atmel-aes.c 	dd->is_async = start_async;
dd                969 drivers/crypto/atmel-aes.c 	err = ctx->start(dd);
dd                976 drivers/crypto/atmel-aes.c static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
dd                978 drivers/crypto/atmel-aes.c 	return atmel_aes_complete(dd, 0);
dd                981 drivers/crypto/atmel-aes.c static int atmel_aes_start(struct atmel_aes_dev *dd)
dd                983 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd                986 drivers/crypto/atmel-aes.c 			dd->ctx->block_size != AES_BLOCK_SIZE);
dd                989 drivers/crypto/atmel-aes.c 	atmel_aes_set_mode(dd, rctx);
dd                991 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd                993 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd                995 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, use_dma, req->info);
dd                997 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
dd               1000 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
dd               1010 drivers/crypto/atmel-aes.c static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
dd               1012 drivers/crypto/atmel-aes.c 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
dd               1013 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd               1021 drivers/crypto/atmel-aes.c 	ctx->offset += dd->total;
dd               1023 drivers/crypto/atmel-aes.c 		return atmel_aes_transfer_complete(dd);
dd               1048 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
dd               1059 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, src, dst, datalen,
dd               1062 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, src, dst, datalen,
dd               1066 drivers/crypto/atmel-aes.c static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
dd               1068 drivers/crypto/atmel-aes.c 	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
dd               1069 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd               1073 drivers/crypto/atmel-aes.c 	atmel_aes_set_mode(dd, rctx);
dd               1075 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd               1077 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               1081 drivers/crypto/atmel-aes.c 	dd->total = 0;
dd               1082 drivers/crypto/atmel-aes.c 	return atmel_aes_ctr_transfer(dd);
dd               1090 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd;
dd               1115 drivers/crypto/atmel-aes.c 	dd = atmel_aes_find_dev(ctx);
dd               1116 drivers/crypto/atmel-aes.c 	if (!dd)
dd               1131 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
dd               1448 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
dd               1452 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
dd               1453 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
dd               1455 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
dd               1456 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
dd               1457 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
dd               1458 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
dd               1459 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
dd               1460 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
dd               1461 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
dd               1469 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
dd               1474 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1476 drivers/crypto/atmel-aes.c 	dd->data = (u32 *)data;
dd               1477 drivers/crypto/atmel-aes.c 	dd->datalen = datalen;
dd               1482 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, false, NULL);
dd               1483 drivers/crypto/atmel-aes.c 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
dd               1486 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
dd               1488 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1491 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_AADLENR, dd->total);
dd               1492 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_CLENR, 0);
dd               1496 drivers/crypto/atmel-aes.c 		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
dd               1498 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_ghash_finalize(dd);
dd               1501 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
dd               1503 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1507 drivers/crypto/atmel-aes.c 	while (dd->datalen > 0) {
dd               1508 drivers/crypto/atmel-aes.c 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
dd               1509 drivers/crypto/atmel-aes.c 		dd->data += 4;
dd               1510 drivers/crypto/atmel-aes.c 		dd->datalen -= AES_BLOCK_SIZE;
dd               1512 drivers/crypto/atmel-aes.c 		isr = atmel_aes_read(dd, AES_ISR);
dd               1514 drivers/crypto/atmel-aes.c 			dd->resume = atmel_aes_gcm_ghash_finalize;
dd               1515 drivers/crypto/atmel-aes.c 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
dd               1521 drivers/crypto/atmel-aes.c 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
dd               1523 drivers/crypto/atmel-aes.c 	return ctx->ghash_resume(dd);
dd               1527 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
dd               1529 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1530 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1536 drivers/crypto/atmel-aes.c 	u8 *data = dd->buf;
dd               1539 drivers/crypto/atmel-aes.c 	atmel_aes_set_mode(dd, rctx);
dd               1541 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd               1543 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               1548 drivers/crypto/atmel-aes.c 		return atmel_aes_gcm_process(dd);
dd               1553 drivers/crypto/atmel-aes.c 	if (datalen > dd->buflen)
dd               1554 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, -EINVAL);
dd               1560 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
dd               1564 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
dd               1566 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1567 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1569 drivers/crypto/atmel-aes.c 	bool enc = atmel_aes_is_encrypt(dd);
dd               1581 drivers/crypto/atmel-aes.c 		dd->flags |= AES_FLAGS_GTAGEN;
dd               1583 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, false, NULL);
dd               1584 drivers/crypto/atmel-aes.c 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
dd               1587 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
dd               1589 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1590 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1597 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_IVR(0), j0);
dd               1601 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
dd               1602 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
dd               1606 drivers/crypto/atmel-aes.c 		dd->datalen = 0;
dd               1607 drivers/crypto/atmel-aes.c 		return atmel_aes_gcm_data(dd);
dd               1612 drivers/crypto/atmel-aes.c 	if (unlikely(req->assoclen + padlen > dd->buflen))
dd               1613 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, -EINVAL);
dd               1614 drivers/crypto/atmel-aes.c 	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
dd               1617 drivers/crypto/atmel-aes.c 	dd->data = (u32 *)dd->buf;
dd               1618 drivers/crypto/atmel-aes.c 	dd->datalen = req->assoclen + padlen;
dd               1619 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_data(dd);
dd               1622 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
dd               1624 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1625 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1631 drivers/crypto/atmel-aes.c 	while (dd->datalen > 0) {
dd               1632 drivers/crypto/atmel-aes.c 		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
dd               1633 drivers/crypto/atmel-aes.c 		dd->data += 4;
dd               1634 drivers/crypto/atmel-aes.c 		dd->datalen -= AES_BLOCK_SIZE;
dd               1636 drivers/crypto/atmel-aes.c 		isr = atmel_aes_read(dd, AES_ISR);
dd               1638 drivers/crypto/atmel-aes.c 			dd->resume = atmel_aes_gcm_data;
dd               1639 drivers/crypto/atmel-aes.c 			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
dd               1646 drivers/crypto/atmel-aes.c 		return atmel_aes_gcm_tag_init(dd);
dd               1655 drivers/crypto/atmel-aes.c 		mr = atmel_aes_read(dd, AES_MR);
dd               1658 drivers/crypto/atmel-aes.c 		if (dd->caps.has_dualbuff)
dd               1660 drivers/crypto/atmel-aes.c 		atmel_aes_write(dd, AES_MR, mr);
dd               1662 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
dd               1666 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
dd               1670 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
dd               1672 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1673 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1674 drivers/crypto/atmel-aes.c 	u64 *data = dd->buf;
dd               1676 drivers/crypto/atmel-aes.c 	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
dd               1677 drivers/crypto/atmel-aes.c 		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
dd               1678 drivers/crypto/atmel-aes.c 			dd->resume = atmel_aes_gcm_tag_init;
dd               1679 drivers/crypto/atmel-aes.c 			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
dd               1683 drivers/crypto/atmel-aes.c 		return atmel_aes_gcm_finalize(dd);
dd               1687 drivers/crypto/atmel-aes.c 	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
dd               1692 drivers/crypto/atmel-aes.c 	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
dd               1696 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
dd               1698 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1705 drivers/crypto/atmel-aes.c 	flags = dd->flags;
dd               1706 drivers/crypto/atmel-aes.c 	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
dd               1707 drivers/crypto/atmel-aes.c 	dd->flags |= AES_FLAGS_CTR;
dd               1708 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, false, ctx->j0);
dd               1709 drivers/crypto/atmel-aes.c 	dd->flags = flags;
dd               1711 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
dd               1712 drivers/crypto/atmel-aes.c 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
dd               1715 drivers/crypto/atmel-aes.c static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
dd               1717 drivers/crypto/atmel-aes.c 	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
dd               1718 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1720 drivers/crypto/atmel-aes.c 	bool enc = atmel_aes_is_encrypt(dd);
dd               1725 drivers/crypto/atmel-aes.c 	if (likely(dd->flags & AES_FLAGS_GTAGEN))
dd               1726 drivers/crypto/atmel-aes.c 		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
dd               1728 drivers/crypto/atmel-aes.c 		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
dd               1740 drivers/crypto/atmel-aes.c 	return atmel_aes_complete(dd, err);
dd               1748 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd;
dd               1754 drivers/crypto/atmel-aes.c 	dd = atmel_aes_find_dev(ctx);
dd               1755 drivers/crypto/atmel-aes.c 	if (!dd)
dd               1761 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
dd               1852 drivers/crypto/atmel-aes.c static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
dd               1854 drivers/crypto/atmel-aes.c static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
dd               1856 drivers/crypto/atmel-aes.c 	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
dd               1857 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd               1862 drivers/crypto/atmel-aes.c 	atmel_aes_set_mode(dd, rctx);
dd               1864 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd               1866 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               1869 drivers/crypto/atmel-aes.c 	flags = dd->flags;
dd               1870 drivers/crypto/atmel-aes.c 	dd->flags &= ~AES_FLAGS_MODE_MASK;
dd               1871 drivers/crypto/atmel-aes.c 	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
dd               1872 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl_key(dd, false, NULL,
dd               1874 drivers/crypto/atmel-aes.c 	dd->flags = flags;
dd               1876 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
dd               1877 drivers/crypto/atmel-aes.c 	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
dd               1880 drivers/crypto/atmel-aes.c static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
dd               1882 drivers/crypto/atmel-aes.c 	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
dd               1890 drivers/crypto/atmel-aes.c 	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
dd               1904 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, use_dma, NULL);
dd               1905 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_TWR(0), tweak);
dd               1906 drivers/crypto/atmel-aes.c 	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
dd               1908 drivers/crypto/atmel-aes.c 		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
dd               1911 drivers/crypto/atmel-aes.c 	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
dd               1976 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
dd               1977 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
dd               1979 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
dd               1981 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
dd               1982 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
dd               1985 drivers/crypto/atmel-aes.c static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
dd               1987 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               1990 drivers/crypto/atmel-aes.c 	if (err && (dd->flags & AES_FLAGS_OWN_SHA))
dd               1992 drivers/crypto/atmel-aes.c 	dd->flags &= ~AES_FLAGS_OWN_SHA;
dd               1995 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
dd               1997 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               2003 drivers/crypto/atmel-aes.c 	atmel_aes_set_mode(dd, &rctx->base);
dd               2005 drivers/crypto/atmel-aes.c 	err = atmel_aes_hw_init(dd);
dd               2007 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               2010 drivers/crypto/atmel-aes.c 					  atmel_aes_authenc_init, dd);
dd               2013 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
dd               2016 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               2020 drivers/crypto/atmel-aes.c 		dd->is_async = true;
dd               2022 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               2025 drivers/crypto/atmel-aes.c 	dd->flags |= AES_FLAGS_OWN_SHA;
dd               2031 drivers/crypto/atmel-aes.c 				      atmel_aes_authenc_transfer, dd);
dd               2034 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
dd               2037 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               2039 drivers/crypto/atmel-aes.c 	bool enc = atmel_aes_is_encrypt(dd);
dd               2045 drivers/crypto/atmel-aes.c 		dd->is_async = true;
dd               2047 drivers/crypto/atmel-aes.c 		return atmel_aes_complete(dd, err);
dd               2066 drivers/crypto/atmel-aes.c 	atmel_aes_write_ctrl(dd, true, iv);
dd               2070 drivers/crypto/atmel-aes.c 	atmel_aes_write(dd, AES_EMR, emr);
dd               2073 drivers/crypto/atmel-aes.c 	return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
dd               2077 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
dd               2079 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               2083 drivers/crypto/atmel-aes.c 	dd->flags &= ~AES_FLAGS_OWN_SHA;
dd               2086 drivers/crypto/atmel-aes.c 				       atmel_aes_authenc_final, dd);
dd               2089 drivers/crypto/atmel-aes.c static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
dd               2092 drivers/crypto/atmel-aes.c 	struct aead_request *req = aead_request_cast(dd->areq);
dd               2095 drivers/crypto/atmel-aes.c 	bool enc = atmel_aes_is_encrypt(dd);
dd               2100 drivers/crypto/atmel-aes.c 		dd->is_async = true;
dd               2115 drivers/crypto/atmel-aes.c 	return atmel_aes_complete(dd, err);
dd               2213 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd;
dd               2232 drivers/crypto/atmel-aes.c 	dd = atmel_aes_find_dev(ctx);
dd               2233 drivers/crypto/atmel-aes.c 	if (!dd)
dd               2236 drivers/crypto/atmel-aes.c 	return atmel_aes_handle_queue(dd, &req->base);
dd               2355 drivers/crypto/atmel-aes.c static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
dd               2357 drivers/crypto/atmel-aes.c 	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
dd               2358 drivers/crypto/atmel-aes.c 	dd->buflen = ATMEL_AES_BUFFER_SIZE;
dd               2359 drivers/crypto/atmel-aes.c 	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
dd               2361 drivers/crypto/atmel-aes.c 	if (!dd->buf) {
dd               2362 drivers/crypto/atmel-aes.c 		dev_err(dd->dev, "unable to alloc pages.\n");
dd               2369 drivers/crypto/atmel-aes.c static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
dd               2371 drivers/crypto/atmel-aes.c 	free_page((unsigned long)dd->buf);
dd               2386 drivers/crypto/atmel-aes.c static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
dd               2397 drivers/crypto/atmel-aes.c 	dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
dd               2398 drivers/crypto/atmel-aes.c 							slave, dd->dev, "tx");
dd               2399 drivers/crypto/atmel-aes.c 	if (!dd->src.chan)
dd               2403 drivers/crypto/atmel-aes.c 	dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
dd               2404 drivers/crypto/atmel-aes.c 							slave, dd->dev, "rx");
dd               2405 drivers/crypto/atmel-aes.c 	if (!dd->dst.chan)
dd               2411 drivers/crypto/atmel-aes.c 	dma_release_channel(dd->src.chan);
dd               2413 drivers/crypto/atmel-aes.c 	dev_warn(dd->dev, "no DMA channel available\n");
dd               2417 drivers/crypto/atmel-aes.c static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
dd               2419 drivers/crypto/atmel-aes.c 	dma_release_channel(dd->dst.chan);
dd               2420 drivers/crypto/atmel-aes.c 	dma_release_channel(dd->src.chan);
dd               2425 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
dd               2427 drivers/crypto/atmel-aes.c 	atmel_aes_handle_queue(dd, NULL);
dd               2432 drivers/crypto/atmel-aes.c 	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
dd               2434 drivers/crypto/atmel-aes.c 	dd->is_async = true;
dd               2435 drivers/crypto/atmel-aes.c 	(void)dd->resume(dd);
dd               2456 drivers/crypto/atmel-aes.c static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
dd               2461 drivers/crypto/atmel-aes.c 	if (dd->caps.has_authenc)
dd               2466 drivers/crypto/atmel-aes.c 	if (dd->caps.has_xts)
dd               2469 drivers/crypto/atmel-aes.c 	if (dd->caps.has_gcm)
dd               2472 drivers/crypto/atmel-aes.c 	if (dd->caps.has_cfb64)
dd               2479 drivers/crypto/atmel-aes.c static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
dd               2489 drivers/crypto/atmel-aes.c 	if (dd->caps.has_cfb64) {
dd               2495 drivers/crypto/atmel-aes.c 	if (dd->caps.has_gcm) {
dd               2501 drivers/crypto/atmel-aes.c 	if (dd->caps.has_xts) {
dd               2508 drivers/crypto/atmel-aes.c 	if (dd->caps.has_authenc) {
dd               2539 drivers/crypto/atmel-aes.c static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
dd               2541 drivers/crypto/atmel-aes.c 	dd->caps.has_dualbuff = 0;
dd               2542 drivers/crypto/atmel-aes.c 	dd->caps.has_cfb64 = 0;
dd               2543 drivers/crypto/atmel-aes.c 	dd->caps.has_gcm = 0;
dd               2544 drivers/crypto/atmel-aes.c 	dd->caps.has_xts = 0;
dd               2545 drivers/crypto/atmel-aes.c 	dd->caps.has_authenc = 0;
dd               2546 drivers/crypto/atmel-aes.c 	dd->caps.max_burst_size = 1;
dd               2549 drivers/crypto/atmel-aes.c 	switch (dd->hw_version & 0xff0) {
dd               2551 drivers/crypto/atmel-aes.c 		dd->caps.has_dualbuff = 1;
dd               2552 drivers/crypto/atmel-aes.c 		dd->caps.has_cfb64 = 1;
dd               2553 drivers/crypto/atmel-aes.c 		dd->caps.has_gcm = 1;
dd               2554 drivers/crypto/atmel-aes.c 		dd->caps.has_xts = 1;
dd               2555 drivers/crypto/atmel-aes.c 		dd->caps.has_authenc = 1;
dd               2556 drivers/crypto/atmel-aes.c 		dd->caps.max_burst_size = 4;
dd               2559 drivers/crypto/atmel-aes.c 		dd->caps.has_dualbuff = 1;
dd               2560 drivers/crypto/atmel-aes.c 		dd->caps.has_cfb64 = 1;
dd               2561 drivers/crypto/atmel-aes.c 		dd->caps.has_gcm = 1;
dd               2562 drivers/crypto/atmel-aes.c 		dd->caps.max_burst_size = 4;
dd               2565 drivers/crypto/atmel-aes.c 		dd->caps.has_dualbuff = 1;
dd               2566 drivers/crypto/atmel-aes.c 		dd->caps.has_cfb64 = 1;
dd               2567 drivers/crypto/atmel-aes.c 		dd->caps.max_burst_size = 4;
dd               2572 drivers/crypto/atmel-aes.c 		dev_warn(dd->dev,
dd                 39 drivers/crypto/atmel-authenc.h 			       struct atmel_aes_dev *dd);
dd                 44 drivers/crypto/atmel-authenc.h 			   struct atmel_aes_dev *dd);
dd                 48 drivers/crypto/atmel-authenc.h 			    struct atmel_aes_dev *dd);
dd                 88 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev	*dd;
dd                112 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev	*dd;
dd                252 drivers/crypto/atmel-sha.c static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
dd                254 drivers/crypto/atmel-sha.c 	u32 value = readl_relaxed(dd->io_base + offset);
dd                257 drivers/crypto/atmel-sha.c 	if (dd->flags & SHA_FLAGS_DUMP_REG) {
dd                260 drivers/crypto/atmel-sha.c 		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
dd                268 drivers/crypto/atmel-sha.c static inline void atmel_sha_write(struct atmel_sha_dev *dd,
dd                272 drivers/crypto/atmel-sha.c 	if (dd->flags & SHA_FLAGS_DUMP_REG) {
dd                275 drivers/crypto/atmel-sha.c 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
dd                280 drivers/crypto/atmel-sha.c 	writel_relaxed(value, dd->io_base + offset);
dd                283 drivers/crypto/atmel-sha.c static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
dd                285 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd                287 drivers/crypto/atmel-sha.c 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
dd                291 drivers/crypto/atmel-sha.c 	clk_disable(dd->iclk);
dd                293 drivers/crypto/atmel-sha.c 	if ((dd->is_async || dd->force_complete) && req->base.complete)
dd                297 drivers/crypto/atmel-sha.c 	tasklet_schedule(&dd->queue_task);
dd                406 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = NULL;
dd                410 drivers/crypto/atmel-sha.c 	if (!tctx->dd) {
dd                412 drivers/crypto/atmel-sha.c 			dd = tmp;
dd                415 drivers/crypto/atmel-sha.c 		tctx->dd = dd;
dd                417 drivers/crypto/atmel-sha.c 		dd = tctx->dd;
dd                422 drivers/crypto/atmel-sha.c 	return dd;
dd                430 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
dd                432 drivers/crypto/atmel-sha.c 	ctx->dd = dd;
dd                436 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "init: digest size: %d\n",
dd                473 drivers/crypto/atmel-sha.c static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
dd                475 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                480 drivers/crypto/atmel-sha.c 		if (!dd->caps.has_dma)
dd                481 drivers/crypto/atmel-sha.c 			atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
dd                483 drivers/crypto/atmel-sha.c 		if (dd->caps.has_dualbuff)
dd                486 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
dd                521 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd                522 drivers/crypto/atmel-sha.c 	} else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
dd                532 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
dd                534 drivers/crypto/atmel-sha.c 			atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
dd                535 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd                545 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MR, valmr);
dd                548 drivers/crypto/atmel-sha.c static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
dd                551 drivers/crypto/atmel-sha.c 	u32 isr = atmel_sha_read(dd, SHA_ISR);
dd                554 drivers/crypto/atmel-sha.c 		return resume(dd);
dd                556 drivers/crypto/atmel-sha.c 	dd->resume = resume;
dd                557 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
dd                561 drivers/crypto/atmel-sha.c static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
dd                564 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                568 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
dd                571 drivers/crypto/atmel-sha.c 	atmel_sha_write_ctrl(dd, 0);
dd                579 drivers/crypto/atmel-sha.c 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
dd                583 drivers/crypto/atmel-sha.c 	dd->flags |= SHA_FLAGS_CPU;
dd                586 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
dd                591 drivers/crypto/atmel-sha.c static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
dd                594 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                597 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
dd                601 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
dd                602 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_TPR, dma_addr1);
dd                603 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_TCR, len32);
dd                606 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_TNPR, dma_addr2);
dd                607 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_TNCR, len32);
dd                609 drivers/crypto/atmel-sha.c 	atmel_sha_write_ctrl(dd, 1);
dd                617 drivers/crypto/atmel-sha.c 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
dd                619 drivers/crypto/atmel-sha.c 	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
dd                622 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
dd                629 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = data;
dd                631 drivers/crypto/atmel-sha.c 	dd->is_async = true;
dd                634 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
dd                637 drivers/crypto/atmel-sha.c static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
dd                640 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                644 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
dd                647 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.src_maxburst = 16;
dd                648 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.dst_maxburst = 16;
dd                650 drivers/crypto/atmel-sha.c 	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dd                658 drivers/crypto/atmel-sha.c 		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
dd                664 drivers/crypto/atmel-sha.c 		in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
dd                668 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd                671 drivers/crypto/atmel-sha.c 	in_desc->callback_param = dd;
dd                673 drivers/crypto/atmel-sha.c 	atmel_sha_write_ctrl(dd, 1);
dd                681 drivers/crypto/atmel-sha.c 		dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
dd                683 drivers/crypto/atmel-sha.c 	dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
dd                687 drivers/crypto/atmel-sha.c 	dma_async_issue_pending(dd->dma_lch_in.chan);
dd                692 drivers/crypto/atmel-sha.c static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
dd                695 drivers/crypto/atmel-sha.c 	if (dd->caps.has_dma)
dd                696 drivers/crypto/atmel-sha.c 		return atmel_sha_xmit_dma(dd, dma_addr1, length1,
dd                699 drivers/crypto/atmel-sha.c 		return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
dd                703 drivers/crypto/atmel-sha.c static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
dd                705 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                713 drivers/crypto/atmel-sha.c 	return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
dd                716 drivers/crypto/atmel-sha.c static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
dd                720 drivers/crypto/atmel-sha.c 	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
dd                722 drivers/crypto/atmel-sha.c 	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dd                723 drivers/crypto/atmel-sha.c 		dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
dd                725 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd                731 drivers/crypto/atmel-sha.c 	return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
dd                734 drivers/crypto/atmel-sha.c static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
dd                736 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                744 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
dd                753 drivers/crypto/atmel-sha.c 		return atmel_sha_xmit_dma_map(dd, ctx, count, final);
dd                759 drivers/crypto/atmel-sha.c static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
dd                761 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                770 drivers/crypto/atmel-sha.c 		return atmel_sha_update_dma_slow(dd);
dd                772 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
dd                778 drivers/crypto/atmel-sha.c 		return atmel_sha_update_dma_slow(dd);
dd                782 drivers/crypto/atmel-sha.c 		return atmel_sha_update_dma_slow(dd);
dd                811 drivers/crypto/atmel-sha.c 		ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
dd                813 drivers/crypto/atmel-sha.c 		if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dd                814 drivers/crypto/atmel-sha.c 			dev_err(dd->dev, "dma %zu bytes error\n",
dd                816 drivers/crypto/atmel-sha.c 			return atmel_sha_complete(dd, -EINVAL);
dd                823 drivers/crypto/atmel-sha.c 			return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
dd                827 drivers/crypto/atmel-sha.c 			if (!dma_map_sg(dd->dev, ctx->sg, 1,
dd                829 drivers/crypto/atmel-sha.c 					dev_err(dd->dev, "dma_map_sg  error\n");
dd                830 drivers/crypto/atmel-sha.c 					return atmel_sha_complete(dd, -EINVAL);
dd                837 drivers/crypto/atmel-sha.c 			return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
dd                842 drivers/crypto/atmel-sha.c 	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
dd                843 drivers/crypto/atmel-sha.c 		dev_err(dd->dev, "dma_map_sg  error\n");
dd                844 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd                850 drivers/crypto/atmel-sha.c 	return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
dd                854 drivers/crypto/atmel-sha.c static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
dd                856 drivers/crypto/atmel-sha.c 	struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
dd                859 drivers/crypto/atmel-sha.c 		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
dd                866 drivers/crypto/atmel-sha.c 			dma_unmap_single(dd->dev, ctx->dma_addr,
dd                870 drivers/crypto/atmel-sha.c 		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
dd                877 drivers/crypto/atmel-sha.c static int atmel_sha_update_req(struct atmel_sha_dev *dd)
dd                879 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd                883 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
dd                887 drivers/crypto/atmel-sha.c 		err = atmel_sha_update_cpu(dd);
dd                889 drivers/crypto/atmel-sha.c 		err = atmel_sha_update_dma_start(dd);
dd                892 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
dd                898 drivers/crypto/atmel-sha.c static int atmel_sha_final_req(struct atmel_sha_dev *dd)
dd                900 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd                909 drivers/crypto/atmel-sha.c 		err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
dd                916 drivers/crypto/atmel-sha.c 		err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
dd                919 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
dd                951 drivers/crypto/atmel-sha.c 		hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
dd                989 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = ctx->dd;
dd                994 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
dd               1003 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = ctx->dd;
dd               1007 drivers/crypto/atmel-sha.c 		if (SHA_FLAGS_FINAL & dd->flags)
dd               1014 drivers/crypto/atmel-sha.c 	(void)atmel_sha_complete(dd, err);
dd               1017 drivers/crypto/atmel-sha.c static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
dd               1021 drivers/crypto/atmel-sha.c 	err = clk_enable(dd->iclk);
dd               1025 drivers/crypto/atmel-sha.c 	if (!(SHA_FLAGS_INIT & dd->flags)) {
dd               1026 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
dd               1027 drivers/crypto/atmel-sha.c 		dd->flags |= SHA_FLAGS_INIT;
dd               1028 drivers/crypto/atmel-sha.c 		dd->err = 0;
dd               1034 drivers/crypto/atmel-sha.c static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
dd               1036 drivers/crypto/atmel-sha.c 	return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
dd               1039 drivers/crypto/atmel-sha.c static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
dd               1041 drivers/crypto/atmel-sha.c 	atmel_sha_hw_init(dd);
dd               1043 drivers/crypto/atmel-sha.c 	dd->hw_version = atmel_sha_get_version(dd);
dd               1045 drivers/crypto/atmel-sha.c 	dev_info(dd->dev,
dd               1046 drivers/crypto/atmel-sha.c 			"version: 0x%x\n", dd->hw_version);
dd               1048 drivers/crypto/atmel-sha.c 	clk_disable(dd->iclk);
dd               1051 drivers/crypto/atmel-sha.c static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
dd               1060 drivers/crypto/atmel-sha.c 	spin_lock_irqsave(&dd->lock, flags);
dd               1062 drivers/crypto/atmel-sha.c 		ret = ahash_enqueue_request(&dd->queue, req);
dd               1064 drivers/crypto/atmel-sha.c 	if (SHA_FLAGS_BUSY & dd->flags) {
dd               1065 drivers/crypto/atmel-sha.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd               1069 drivers/crypto/atmel-sha.c 	backlog = crypto_get_backlog(&dd->queue);
dd               1070 drivers/crypto/atmel-sha.c 	async_req = crypto_dequeue_request(&dd->queue);
dd               1072 drivers/crypto/atmel-sha.c 		dd->flags |= SHA_FLAGS_BUSY;
dd               1074 drivers/crypto/atmel-sha.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd               1084 drivers/crypto/atmel-sha.c 	dd->req = ahash_request_cast(async_req);
dd               1085 drivers/crypto/atmel-sha.c 	start_async = (dd->req != req);
dd               1086 drivers/crypto/atmel-sha.c 	dd->is_async = start_async;
dd               1087 drivers/crypto/atmel-sha.c 	dd->force_complete = false;
dd               1090 drivers/crypto/atmel-sha.c 	err = ctx->start(dd);
dd               1094 drivers/crypto/atmel-sha.c static int atmel_sha_done(struct atmel_sha_dev *dd);
dd               1096 drivers/crypto/atmel-sha.c static int atmel_sha_start(struct atmel_sha_dev *dd)
dd               1098 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1102 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
dd               1105 drivers/crypto/atmel-sha.c 	err = atmel_sha_hw_init(dd);
dd               1107 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, err);
dd               1128 drivers/crypto/atmel-sha.c 	dd->resume = atmel_sha_done;
dd               1130 drivers/crypto/atmel-sha.c 		err = atmel_sha_update_req(dd);
dd               1133 drivers/crypto/atmel-sha.c 			err = atmel_sha_final_req(dd);
dd               1135 drivers/crypto/atmel-sha.c 		err = atmel_sha_final_req(dd);
dd               1142 drivers/crypto/atmel-sha.c 	dev_dbg(dd->dev, "exit, err: %d\n", err);
dd               1151 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = tctx->dd;
dd               1155 drivers/crypto/atmel-sha.c 	return atmel_sha_handle_queue(dd, req);
dd               1380 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
dd               1382 drivers/crypto/atmel-sha.c 	atmel_sha_handle_queue(dd, NULL);
dd               1385 drivers/crypto/atmel-sha.c static int atmel_sha_done(struct atmel_sha_dev *dd)
dd               1389 drivers/crypto/atmel-sha.c 	if (SHA_FLAGS_CPU & dd->flags) {
dd               1390 drivers/crypto/atmel-sha.c 		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
dd               1391 drivers/crypto/atmel-sha.c 			dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
dd               1394 drivers/crypto/atmel-sha.c 	} else if (SHA_FLAGS_DMA_READY & dd->flags) {
dd               1395 drivers/crypto/atmel-sha.c 		if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
dd               1396 drivers/crypto/atmel-sha.c 			dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
dd               1397 drivers/crypto/atmel-sha.c 			atmel_sha_update_dma_stop(dd);
dd               1398 drivers/crypto/atmel-sha.c 			if (dd->err) {
dd               1399 drivers/crypto/atmel-sha.c 				err = dd->err;
dd               1403 drivers/crypto/atmel-sha.c 		if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
dd               1405 drivers/crypto/atmel-sha.c 			dd->flags &= ~(SHA_FLAGS_DMA_READY |
dd               1407 drivers/crypto/atmel-sha.c 			err = atmel_sha_update_dma_start(dd);
dd               1416 drivers/crypto/atmel-sha.c 	atmel_sha_finish_req(dd->req, err);
dd               1423 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
dd               1425 drivers/crypto/atmel-sha.c 	dd->is_async = true;
dd               1426 drivers/crypto/atmel-sha.c 	(void)dd->resume(dd);
dd               1454 drivers/crypto/atmel-sha.c static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
dd               1458 drivers/crypto/atmel-sha.c 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
dd               1459 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1491 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = data;
dd               1492 drivers/crypto/atmel-sha.c 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
dd               1497 drivers/crypto/atmel-sha.c 	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
dd               1504 drivers/crypto/atmel-sha.c 	dd->is_async = true;
dd               1505 drivers/crypto/atmel-sha.c 	(void)atmel_sha_wait_for_data_ready(dd, dd->resume);
dd               1508 drivers/crypto/atmel-sha.c static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
dd               1513 drivers/crypto/atmel-sha.c 	struct atmel_sha_dma *dma = &dd->dma_lch_in;
dd               1521 drivers/crypto/atmel-sha.c 	dd->resume = resume;
dd               1528 drivers/crypto/atmel-sha.c 	sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
dd               1548 drivers/crypto/atmel-sha.c 	desc->callback_param = dd;
dd               1559 drivers/crypto/atmel-sha.c 	dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
dd               1561 drivers/crypto/atmel-sha.c 	return atmel_sha_complete(dd, err);
dd               1567 drivers/crypto/atmel-sha.c static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
dd               1569 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1580 drivers/crypto/atmel-sha.c 			atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
dd               1601 drivers/crypto/atmel-sha.c 		isr = atmel_sha_read(dd, SHA_ISR);
dd               1604 drivers/crypto/atmel-sha.c 			dd->resume = atmel_sha_cpu_transfer;
dd               1605 drivers/crypto/atmel-sha.c 			atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
dd               1611 drivers/crypto/atmel-sha.c 		return dd->cpu_transfer_complete(dd);
dd               1613 drivers/crypto/atmel-sha.c 	return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
dd               1616 drivers/crypto/atmel-sha.c static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
dd               1623 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1627 drivers/crypto/atmel-sha.c 		return resume(dd);
dd               1646 drivers/crypto/atmel-sha.c 	dd->cpu_transfer_complete = resume;
dd               1647 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_transfer(dd);
dd               1650 drivers/crypto/atmel-sha.c static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
dd               1655 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1661 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd               1664 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MR, mr);
dd               1665 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MSR, msglen);
dd               1666 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_BCR, msglen);
dd               1667 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd               1669 drivers/crypto/atmel-sha.c 	sg_init_one(&dd->tmp, data, datalen);
dd               1670 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
dd               1736 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
dd               1738 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
dd               1740 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
dd               1741 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
dd               1742 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
dd               1743 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
dd               1745 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
dd               1746 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
dd               1747 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
dd               1748 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
dd               1750 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
dd               1753 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1789 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd               1794 drivers/crypto/atmel-sha.c 		return resume(dd);
dd               1798 drivers/crypto/atmel-sha.c 		return atmel_sha_hmac_prehash_key(dd, key, keylen);
dd               1803 drivers/crypto/atmel-sha.c 	return atmel_sha_hmac_compute_ipad_hash(dd);
dd               1806 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
dd               1809 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_hash(dd, key, keylen, true,
dd               1813 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
dd               1815 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1825 drivers/crypto/atmel-sha.c 		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
dd               1827 drivers/crypto/atmel-sha.c 	return atmel_sha_hmac_compute_ipad_hash(dd);
dd               1830 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
dd               1832 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1845 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
dd               1849 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
dd               1851 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1860 drivers/crypto/atmel-sha.c 		hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
dd               1861 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
dd               1865 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
dd               1867 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1875 drivers/crypto/atmel-sha.c 		hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
dd               1877 drivers/crypto/atmel-sha.c 	return hmac->resume(dd);
dd               1880 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
dd               1882 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1886 drivers/crypto/atmel-sha.c 	err = atmel_sha_hw_init(dd);
dd               1888 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, err);
dd               1892 drivers/crypto/atmel-sha.c 		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
dd               1896 drivers/crypto/atmel-sha.c 		dd->resume = atmel_sha_done;
dd               1897 drivers/crypto/atmel-sha.c 		err = atmel_sha_update_req(dd);
dd               1901 drivers/crypto/atmel-sha.c 		dd->resume = atmel_sha_hmac_final;
dd               1902 drivers/crypto/atmel-sha.c 		err = atmel_sha_final_req(dd);
dd               1906 drivers/crypto/atmel-sha.c 		err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
dd               1910 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd               1935 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
dd               1937 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1949 drivers/crypto/atmel-sha.c 	return atmel_sha_complete(dd, 0);
dd               1952 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
dd               1954 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               1968 drivers/crypto/atmel-sha.c 		digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
dd               1971 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
dd               1974 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
dd               1978 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MR, mr);
dd               1979 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MSR, bs + ds);
dd               1980 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_BCR, ds);
dd               1981 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd               1983 drivers/crypto/atmel-sha.c 	sg_init_one(&dd->tmp, digest, ds);
dd               1984 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
dd               1988 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
dd               1995 drivers/crypto/atmel-sha.c 	atmel_sha_copy_hash(dd->req);
dd               1996 drivers/crypto/atmel-sha.c 	atmel_sha_copy_ready_hash(dd->req);
dd               1997 drivers/crypto/atmel-sha.c 	return atmel_sha_complete(dd, 0);
dd               2011 drivers/crypto/atmel-sha.c static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
dd               2013 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               2024 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL); // TODO:
dd               2028 drivers/crypto/atmel-sha.c 	    atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
dd               2032 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
dd               2034 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
dd               2036 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
dd               2038 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
dd               2047 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MR, mr);
dd               2049 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MSR, req->nbytes);
dd               2050 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_BCR, req->nbytes);
dd               2052 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd               2056 drivers/crypto/atmel-sha.c 		return atmel_sha_dma_start(dd, req->src, req->nbytes,
dd               2059 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
dd               2213 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
dd               2214 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
dd               2215 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
dd               2244 drivers/crypto/atmel-sha.c 	authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
dd               2247 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
dd               2249 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               2257 drivers/crypto/atmel-sha.c 	dd->force_complete = true;
dd               2259 drivers/crypto/atmel-sha.c 	err = atmel_sha_hw_init(dd);
dd               2260 drivers/crypto/atmel-sha.c 	return authctx->cb(authctx->aes_dev, err, dd->is_async);
dd               2267 drivers/crypto/atmel-sha.c 	dummy.dd = NULL;
dd               2369 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd;
dd               2375 drivers/crypto/atmel-sha.c 	dd = atmel_sha_find_dev(tctx);
dd               2376 drivers/crypto/atmel-sha.c 	if (!dd)
dd               2380 drivers/crypto/atmel-sha.c 	ctx->dd = dd;
dd               2387 drivers/crypto/atmel-sha.c 	return atmel_sha_handle_queue(dd, req);
dd               2401 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = ctx->dd;
dd               2404 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd               2413 drivers/crypto/atmel-sha.c 	return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
dd               2417 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
dd               2419 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               2428 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
dd               2430 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
dd               2432 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
dd               2434 drivers/crypto/atmel-sha.c 		atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
dd               2440 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MR, mr);
dd               2443 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_MSR, msg_size);
dd               2444 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_BCR, msg_size);
dd               2446 drivers/crypto/atmel-sha.c 	atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
dd               2449 drivers/crypto/atmel-sha.c 	return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
dd               2454 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
dd               2456 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               2459 drivers/crypto/atmel-sha.c 	return authctx->cb(authctx->aes_dev, 0, dd->is_async);
dd               2469 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = ctx->dd;
dd               2493 drivers/crypto/atmel-sha.c 		return atmel_sha_complete(dd, -EINVAL);
dd               2501 drivers/crypto/atmel-sha.c 	return atmel_sha_wait_for_data_ready(dd,
dd               2506 drivers/crypto/atmel-sha.c static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
dd               2508 drivers/crypto/atmel-sha.c 	struct ahash_request *req = dd->req;
dd               2513 drivers/crypto/atmel-sha.c 		authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
dd               2515 drivers/crypto/atmel-sha.c 	return atmel_sha_complete(dd, 0);
dd               2522 drivers/crypto/atmel-sha.c 	struct atmel_sha_dev *dd = ctx->dd;
dd               2525 drivers/crypto/atmel-sha.c 	dd->is_async = false;
dd               2526 drivers/crypto/atmel-sha.c 	dd->force_complete = false;
dd               2527 drivers/crypto/atmel-sha.c 	(void)atmel_sha_complete(dd, 0);
dd               2534 drivers/crypto/atmel-sha.c static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
dd               2538 drivers/crypto/atmel-sha.c 	if (dd->caps.has_hmac)
dd               2545 drivers/crypto/atmel-sha.c 	if (dd->caps.has_sha224)
dd               2548 drivers/crypto/atmel-sha.c 	if (dd->caps.has_sha_384_512) {
dd               2554 drivers/crypto/atmel-sha.c static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
dd               2564 drivers/crypto/atmel-sha.c 	if (dd->caps.has_sha224) {
dd               2570 drivers/crypto/atmel-sha.c 	if (dd->caps.has_sha_384_512) {
dd               2578 drivers/crypto/atmel-sha.c 	if (dd->caps.has_hmac) {
dd               2618 drivers/crypto/atmel-sha.c static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
dd               2627 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
dd               2628 drivers/crypto/atmel-sha.c 			atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
dd               2629 drivers/crypto/atmel-sha.c 	if (!dd->dma_lch_in.chan) {
dd               2630 drivers/crypto/atmel-sha.c 		dev_warn(dd->dev, "no DMA channel available\n");
dd               2634 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd               2635 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
dd               2637 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.src_maxburst = 1;
dd               2638 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.src_addr_width =
dd               2640 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
dd               2641 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.dst_addr_width =
dd               2643 drivers/crypto/atmel-sha.c 	dd->dma_lch_in.dma_conf.device_fc = false;
dd               2648 drivers/crypto/atmel-sha.c static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
dd               2650 drivers/crypto/atmel-sha.c 	dma_release_channel(dd->dma_lch_in.chan);
dd               2653 drivers/crypto/atmel-sha.c static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
dd               2656 drivers/crypto/atmel-sha.c 	dd->caps.has_dma = 0;
dd               2657 drivers/crypto/atmel-sha.c 	dd->caps.has_dualbuff = 0;
dd               2658 drivers/crypto/atmel-sha.c 	dd->caps.has_sha224 = 0;
dd               2659 drivers/crypto/atmel-sha.c 	dd->caps.has_sha_384_512 = 0;
dd               2660 drivers/crypto/atmel-sha.c 	dd->caps.has_uihv = 0;
dd               2661 drivers/crypto/atmel-sha.c 	dd->caps.has_hmac = 0;
dd               2664 drivers/crypto/atmel-sha.c 	switch (dd->hw_version & 0xff0) {
dd               2666 drivers/crypto/atmel-sha.c 		dd->caps.has_dma = 1;
dd               2667 drivers/crypto/atmel-sha.c 		dd->caps.has_dualbuff = 1;
dd               2668 drivers/crypto/atmel-sha.c 		dd->caps.has_sha224 = 1;
dd               2669 drivers/crypto/atmel-sha.c 		dd->caps.has_sha_384_512 = 1;
dd               2670 drivers/crypto/atmel-sha.c 		dd->caps.has_uihv = 1;
dd               2671 drivers/crypto/atmel-sha.c 		dd->caps.has_hmac = 1;
dd               2674 drivers/crypto/atmel-sha.c 		dd->caps.has_dma = 1;
dd               2675 drivers/crypto/atmel-sha.c 		dd->caps.has_dualbuff = 1;
dd               2676 drivers/crypto/atmel-sha.c 		dd->caps.has_sha224 = 1;
dd               2677 drivers/crypto/atmel-sha.c 		dd->caps.has_sha_384_512 = 1;
dd               2678 drivers/crypto/atmel-sha.c 		dd->caps.has_uihv = 1;
dd               2681 drivers/crypto/atmel-sha.c 		dd->caps.has_dma = 1;
dd               2682 drivers/crypto/atmel-sha.c 		dd->caps.has_dualbuff = 1;
dd               2683 drivers/crypto/atmel-sha.c 		dd->caps.has_sha224 = 1;
dd               2684 drivers/crypto/atmel-sha.c 		dd->caps.has_sha_384_512 = 1;
dd               2687 drivers/crypto/atmel-sha.c 		dd->caps.has_dma = 1;
dd               2688 drivers/crypto/atmel-sha.c 		dd->caps.has_dualbuff = 1;
dd               2689 drivers/crypto/atmel-sha.c 		dd->caps.has_sha224 = 1;
dd               2694 drivers/crypto/atmel-sha.c 		dev_warn(dd->dev,
dd                 72 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd;
dd                178 drivers/crypto/atmel-tdes.c static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
dd                180 drivers/crypto/atmel-tdes.c 	return readl_relaxed(dd->io_base + offset);
dd                183 drivers/crypto/atmel-tdes.c static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
dd                186 drivers/crypto/atmel-tdes.c 	writel_relaxed(value, dd->io_base + offset);
dd                189 drivers/crypto/atmel-tdes.c static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
dd                193 drivers/crypto/atmel-tdes.c 		atmel_tdes_write(dd, offset, *value);
dd                202 drivers/crypto/atmel-tdes.c 	if (!ctx->dd) {
dd                207 drivers/crypto/atmel-tdes.c 		ctx->dd = tdes_dd;
dd                209 drivers/crypto/atmel-tdes.c 		tdes_dd = ctx->dd;
dd                216 drivers/crypto/atmel-tdes.c static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
dd                220 drivers/crypto/atmel-tdes.c 	err = clk_prepare_enable(dd->iclk);
dd                224 drivers/crypto/atmel-tdes.c 	if (!(dd->flags & TDES_FLAGS_INIT)) {
dd                225 drivers/crypto/atmel-tdes.c 		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
dd                226 drivers/crypto/atmel-tdes.c 		dd->flags |= TDES_FLAGS_INIT;
dd                227 drivers/crypto/atmel-tdes.c 		dd->err = 0;
dd                233 drivers/crypto/atmel-tdes.c static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
dd                235 drivers/crypto/atmel-tdes.c 	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
dd                238 drivers/crypto/atmel-tdes.c static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
dd                240 drivers/crypto/atmel-tdes.c 	atmel_tdes_hw_init(dd);
dd                242 drivers/crypto/atmel-tdes.c 	dd->hw_version = atmel_tdes_get_version(dd);
dd                244 drivers/crypto/atmel-tdes.c 	dev_info(dd->dev,
dd                245 drivers/crypto/atmel-tdes.c 			"version: 0x%x\n", dd->hw_version);
dd                247 drivers/crypto/atmel-tdes.c 	clk_disable_unprepare(dd->iclk);
dd                252 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd = data;
dd                255 drivers/crypto/atmel-tdes.c 	tasklet_schedule(&dd->done_task);
dd                258 drivers/crypto/atmel-tdes.c static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
dd                263 drivers/crypto/atmel-tdes.c 	err = atmel_tdes_hw_init(dd);
dd                268 drivers/crypto/atmel-tdes.c 	if (!dd->caps.has_dma)
dd                269 drivers/crypto/atmel-tdes.c 		atmel_tdes_write(dd, TDES_PTCR,
dd                273 drivers/crypto/atmel-tdes.c 	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
dd                276 drivers/crypto/atmel-tdes.c 	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
dd                283 drivers/crypto/atmel-tdes.c 	if (dd->flags & TDES_FLAGS_CBC) {
dd                285 drivers/crypto/atmel-tdes.c 	} else if (dd->flags & TDES_FLAGS_CFB) {
dd                288 drivers/crypto/atmel-tdes.c 		if (dd->flags & TDES_FLAGS_CFB8)
dd                290 drivers/crypto/atmel-tdes.c 		else if (dd->flags & TDES_FLAGS_CFB16)
dd                292 drivers/crypto/atmel-tdes.c 		else if (dd->flags & TDES_FLAGS_CFB32)
dd                294 drivers/crypto/atmel-tdes.c 		else if (dd->flags & TDES_FLAGS_CFB64)
dd                296 drivers/crypto/atmel-tdes.c 	} else if (dd->flags & TDES_FLAGS_OFB) {
dd                300 drivers/crypto/atmel-tdes.c 	if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
dd                303 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_CR, valcr);
dd                304 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_MR, valmr);
dd                306 drivers/crypto/atmel-tdes.c 	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
dd                307 drivers/crypto/atmel-tdes.c 						dd->ctx->keylen >> 2);
dd                309 drivers/crypto/atmel-tdes.c 	if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
dd                310 drivers/crypto/atmel-tdes.c 		(dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
dd                311 drivers/crypto/atmel-tdes.c 		atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
dd                317 drivers/crypto/atmel-tdes.c static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dd                322 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
dd                324 drivers/crypto/atmel-tdes.c 	if (dd->flags & TDES_FLAGS_FAST) {
dd                325 drivers/crypto/atmel-tdes.c 		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dd                326 drivers/crypto/atmel-tdes.c 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dd                328 drivers/crypto/atmel-tdes.c 		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
dd                329 drivers/crypto/atmel-tdes.c 					   dd->dma_size, DMA_FROM_DEVICE);
dd                332 drivers/crypto/atmel-tdes.c 		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
dd                333 drivers/crypto/atmel-tdes.c 				dd->buf_out, dd->buflen, dd->dma_size, 1);
dd                334 drivers/crypto/atmel-tdes.c 		if (count != dd->dma_size) {
dd                343 drivers/crypto/atmel-tdes.c static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
dd                347 drivers/crypto/atmel-tdes.c 	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
dd                348 drivers/crypto/atmel-tdes.c 	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
dd                349 drivers/crypto/atmel-tdes.c 	dd->buflen = PAGE_SIZE;
dd                350 drivers/crypto/atmel-tdes.c 	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
dd                352 drivers/crypto/atmel-tdes.c 	if (!dd->buf_in || !dd->buf_out) {
dd                353 drivers/crypto/atmel-tdes.c 		dev_err(dd->dev, "unable to alloc pages.\n");
dd                358 drivers/crypto/atmel-tdes.c 	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
dd                359 drivers/crypto/atmel-tdes.c 					dd->buflen, DMA_TO_DEVICE);
dd                360 drivers/crypto/atmel-tdes.c 	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
dd                361 drivers/crypto/atmel-tdes.c 		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
dd                366 drivers/crypto/atmel-tdes.c 	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
dd                367 drivers/crypto/atmel-tdes.c 					dd->buflen, DMA_FROM_DEVICE);
dd                368 drivers/crypto/atmel-tdes.c 	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
dd                369 drivers/crypto/atmel-tdes.c 		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
dd                377 drivers/crypto/atmel-tdes.c 	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
dd                381 drivers/crypto/atmel-tdes.c 	free_page((unsigned long)dd->buf_out);
dd                382 drivers/crypto/atmel-tdes.c 	free_page((unsigned long)dd->buf_in);
dd                388 drivers/crypto/atmel-tdes.c static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
dd                390 drivers/crypto/atmel-tdes.c 	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
dd                392 drivers/crypto/atmel-tdes.c 	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
dd                394 drivers/crypto/atmel-tdes.c 	free_page((unsigned long)dd->buf_out);
dd                395 drivers/crypto/atmel-tdes.c 	free_page((unsigned long)dd->buf_in);
dd                402 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd = ctx->dd;
dd                405 drivers/crypto/atmel-tdes.c 	dd->dma_size = length;
dd                407 drivers/crypto/atmel-tdes.c 	if (!(dd->flags & TDES_FLAGS_FAST)) {
dd                408 drivers/crypto/atmel-tdes.c 		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
dd                412 drivers/crypto/atmel-tdes.c 	if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
dd                414 drivers/crypto/atmel-tdes.c 	else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
dd                419 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
dd                420 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
dd                421 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_TCR, len32);
dd                422 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
dd                423 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_RCR, len32);
dd                426 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
dd                429 drivers/crypto/atmel-tdes.c 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
dd                438 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd = ctx->dd;
dd                442 drivers/crypto/atmel-tdes.c 	dd->dma_size = length;
dd                444 drivers/crypto/atmel-tdes.c 	if (!(dd->flags & TDES_FLAGS_FAST)) {
dd                445 drivers/crypto/atmel-tdes.c 		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
dd                449 drivers/crypto/atmel-tdes.c 	if (dd->flags & TDES_FLAGS_CFB8) {
dd                450 drivers/crypto/atmel-tdes.c 		dd->dma_lch_in.dma_conf.dst_addr_width =
dd                452 drivers/crypto/atmel-tdes.c 		dd->dma_lch_out.dma_conf.src_addr_width =
dd                454 drivers/crypto/atmel-tdes.c 	} else if (dd->flags & TDES_FLAGS_CFB16) {
dd                455 drivers/crypto/atmel-tdes.c 		dd->dma_lch_in.dma_conf.dst_addr_width =
dd                457 drivers/crypto/atmel-tdes.c 		dd->dma_lch_out.dma_conf.src_addr_width =
dd                460 drivers/crypto/atmel-tdes.c 		dd->dma_lch_in.dma_conf.dst_addr_width =
dd                462 drivers/crypto/atmel-tdes.c 		dd->dma_lch_out.dma_conf.src_addr_width =
dd                466 drivers/crypto/atmel-tdes.c 	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
dd                467 drivers/crypto/atmel-tdes.c 	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
dd                469 drivers/crypto/atmel-tdes.c 	dd->flags |= TDES_FLAGS_DMA;
dd                479 drivers/crypto/atmel-tdes.c 	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
dd                485 drivers/crypto/atmel-tdes.c 	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
dd                492 drivers/crypto/atmel-tdes.c 	out_desc->callback_param = dd;
dd                495 drivers/crypto/atmel-tdes.c 	dma_async_issue_pending(dd->dma_lch_out.chan);
dd                498 drivers/crypto/atmel-tdes.c 	dma_async_issue_pending(dd->dma_lch_in.chan);
dd                503 drivers/crypto/atmel-tdes.c static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
dd                506 drivers/crypto/atmel-tdes.c 					crypto_ablkcipher_reqtfm(dd->req));
dd                511 drivers/crypto/atmel-tdes.c 	if ((!dd->in_offset) && (!dd->out_offset)) {
dd                513 drivers/crypto/atmel-tdes.c 		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
dd                514 drivers/crypto/atmel-tdes.c 			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
dd                515 drivers/crypto/atmel-tdes.c 		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
dd                516 drivers/crypto/atmel-tdes.c 			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
dd                519 drivers/crypto/atmel-tdes.c 		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
dd                525 drivers/crypto/atmel-tdes.c 		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
dd                526 drivers/crypto/atmel-tdes.c 		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
dd                528 drivers/crypto/atmel-tdes.c 		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dd                530 drivers/crypto/atmel-tdes.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                534 drivers/crypto/atmel-tdes.c 		err = dma_map_sg(dd->dev, dd->out_sg, 1,
dd                537 drivers/crypto/atmel-tdes.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                538 drivers/crypto/atmel-tdes.c 			dma_unmap_sg(dd->dev, dd->in_sg, 1,
dd                543 drivers/crypto/atmel-tdes.c 		addr_in = sg_dma_address(dd->in_sg);
dd                544 drivers/crypto/atmel-tdes.c 		addr_out = sg_dma_address(dd->out_sg);
dd                546 drivers/crypto/atmel-tdes.c 		dd->flags |= TDES_FLAGS_FAST;
dd                550 drivers/crypto/atmel-tdes.c 		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
dd                551 drivers/crypto/atmel-tdes.c 				dd->buf_in, dd->buflen, dd->total, 0);
dd                553 drivers/crypto/atmel-tdes.c 		addr_in = dd->dma_addr_in;
dd                554 drivers/crypto/atmel-tdes.c 		addr_out = dd->dma_addr_out;
dd                556 drivers/crypto/atmel-tdes.c 		dd->flags &= ~TDES_FLAGS_FAST;
dd                559 drivers/crypto/atmel-tdes.c 	dd->total -= count;
dd                561 drivers/crypto/atmel-tdes.c 	if (dd->caps.has_dma)
dd                566 drivers/crypto/atmel-tdes.c 	if (err && (dd->flags & TDES_FLAGS_FAST)) {
dd                567 drivers/crypto/atmel-tdes.c 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dd                568 drivers/crypto/atmel-tdes.c 		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
dd                574 drivers/crypto/atmel-tdes.c static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
dd                576 drivers/crypto/atmel-tdes.c 	struct ablkcipher_request *req = dd->req;
dd                578 drivers/crypto/atmel-tdes.c 	clk_disable_unprepare(dd->iclk);
dd                580 drivers/crypto/atmel-tdes.c 	dd->flags &= ~TDES_FLAGS_BUSY;
dd                585 drivers/crypto/atmel-tdes.c static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
dd                594 drivers/crypto/atmel-tdes.c 	spin_lock_irqsave(&dd->lock, flags);
dd                596 drivers/crypto/atmel-tdes.c 		ret = ablkcipher_enqueue_request(&dd->queue, req);
dd                597 drivers/crypto/atmel-tdes.c 	if (dd->flags & TDES_FLAGS_BUSY) {
dd                598 drivers/crypto/atmel-tdes.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd                601 drivers/crypto/atmel-tdes.c 	backlog = crypto_get_backlog(&dd->queue);
dd                602 drivers/crypto/atmel-tdes.c 	async_req = crypto_dequeue_request(&dd->queue);
dd                604 drivers/crypto/atmel-tdes.c 		dd->flags |= TDES_FLAGS_BUSY;
dd                605 drivers/crypto/atmel-tdes.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd                616 drivers/crypto/atmel-tdes.c 	dd->req = req;
dd                617 drivers/crypto/atmel-tdes.c 	dd->total = req->nbytes;
dd                618 drivers/crypto/atmel-tdes.c 	dd->in_offset = 0;
dd                619 drivers/crypto/atmel-tdes.c 	dd->in_sg = req->src;
dd                620 drivers/crypto/atmel-tdes.c 	dd->out_offset = 0;
dd                621 drivers/crypto/atmel-tdes.c 	dd->out_sg = req->dst;
dd                626 drivers/crypto/atmel-tdes.c 	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
dd                627 drivers/crypto/atmel-tdes.c 	dd->ctx = ctx;
dd                628 drivers/crypto/atmel-tdes.c 	ctx->dd = dd;
dd                630 drivers/crypto/atmel-tdes.c 	err = atmel_tdes_write_ctrl(dd);
dd                632 drivers/crypto/atmel-tdes.c 		err = atmel_tdes_crypt_start(dd);
dd                635 drivers/crypto/atmel-tdes.c 		atmel_tdes_finish_req(dd, err);
dd                636 drivers/crypto/atmel-tdes.c 		tasklet_schedule(&dd->queue_task);
dd                642 drivers/crypto/atmel-tdes.c static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dd                647 drivers/crypto/atmel-tdes.c 	if (dd->flags & TDES_FLAGS_DMA) {
dd                649 drivers/crypto/atmel-tdes.c 		if  (dd->flags & TDES_FLAGS_FAST) {
dd                650 drivers/crypto/atmel-tdes.c 			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dd                651 drivers/crypto/atmel-tdes.c 			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dd                653 drivers/crypto/atmel-tdes.c 			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
dd                654 drivers/crypto/atmel-tdes.c 				dd->dma_size, DMA_FROM_DEVICE);
dd                657 drivers/crypto/atmel-tdes.c 			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
dd                658 drivers/crypto/atmel-tdes.c 				dd->buf_out, dd->buflen, dd->dma_size, 1);
dd                659 drivers/crypto/atmel-tdes.c 			if (count != dd->dma_size) {
dd                702 drivers/crypto/atmel-tdes.c 	return atmel_tdes_handle_queue(ctx->dd, req);
dd                717 drivers/crypto/atmel-tdes.c static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
dd                726 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
dd                727 drivers/crypto/atmel-tdes.c 			atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
dd                728 drivers/crypto/atmel-tdes.c 	if (!dd->dma_lch_in.chan)
dd                731 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd                732 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
dd                734 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.src_maxburst = 1;
dd                735 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.src_addr_width =
dd                737 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
dd                738 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.dst_addr_width =
dd                740 drivers/crypto/atmel-tdes.c 	dd->dma_lch_in.dma_conf.device_fc = false;
dd                742 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
dd                743 drivers/crypto/atmel-tdes.c 			atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
dd                744 drivers/crypto/atmel-tdes.c 	if (!dd->dma_lch_out.chan)
dd                747 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd                748 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
dd                750 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.src_maxburst = 1;
dd                751 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.src_addr_width =
dd                753 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
dd                754 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.dst_addr_width =
dd                756 drivers/crypto/atmel-tdes.c 	dd->dma_lch_out.dma_conf.device_fc = false;
dd                761 drivers/crypto/atmel-tdes.c 	dma_release_channel(dd->dma_lch_in.chan);
dd                763 drivers/crypto/atmel-tdes.c 	dev_warn(dd->dev, "no DMA channel available\n");
dd                767 drivers/crypto/atmel-tdes.c static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
dd                769 drivers/crypto/atmel-tdes.c 	dma_release_channel(dd->dma_lch_in.chan);
dd                770 drivers/crypto/atmel-tdes.c 	dma_release_channel(dd->dma_lch_out.chan);
dd                880 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd;
dd                884 drivers/crypto/atmel-tdes.c 	dd = atmel_tdes_find_dev(ctx);
dd                885 drivers/crypto/atmel-tdes.c 	if (!dd)
dd               1094 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
dd               1096 drivers/crypto/atmel-tdes.c 	atmel_tdes_handle_queue(dd, NULL);
dd               1101 drivers/crypto/atmel-tdes.c 	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
dd               1104 drivers/crypto/atmel-tdes.c 	if (!(dd->flags & TDES_FLAGS_DMA))
dd               1105 drivers/crypto/atmel-tdes.c 		err = atmel_tdes_crypt_pdc_stop(dd);
dd               1107 drivers/crypto/atmel-tdes.c 		err = atmel_tdes_crypt_dma_stop(dd);
dd               1109 drivers/crypto/atmel-tdes.c 	err = dd->err ? : err;
dd               1111 drivers/crypto/atmel-tdes.c 	if (dd->total && !err) {
dd               1112 drivers/crypto/atmel-tdes.c 		if (dd->flags & TDES_FLAGS_FAST) {
dd               1113 drivers/crypto/atmel-tdes.c 			dd->in_sg = sg_next(dd->in_sg);
dd               1114 drivers/crypto/atmel-tdes.c 			dd->out_sg = sg_next(dd->out_sg);
dd               1115 drivers/crypto/atmel-tdes.c 			if (!dd->in_sg || !dd->out_sg)
dd               1119 drivers/crypto/atmel-tdes.c 			err = atmel_tdes_crypt_start(dd);
dd               1124 drivers/crypto/atmel-tdes.c 	atmel_tdes_finish_req(dd, err);
dd               1125 drivers/crypto/atmel-tdes.c 	atmel_tdes_handle_queue(dd, NULL);
dd               1146 drivers/crypto/atmel-tdes.c static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
dd               1154 drivers/crypto/atmel-tdes.c static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
dd               1173 drivers/crypto/atmel-tdes.c static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
dd               1176 drivers/crypto/atmel-tdes.c 	dd->caps.has_dma = 0;
dd               1177 drivers/crypto/atmel-tdes.c 	dd->caps.has_cfb_3keys = 0;
dd               1180 drivers/crypto/atmel-tdes.c 	switch (dd->hw_version & 0xf00) {
dd               1182 drivers/crypto/atmel-tdes.c 		dd->caps.has_dma = 1;
dd               1183 drivers/crypto/atmel-tdes.c 		dd->caps.has_cfb_3keys = 1;
dd               1188 drivers/crypto/atmel-tdes.c 		dev_warn(dd->dev,
dd                 25 drivers/crypto/omap-aes-gcm.c static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
dd                 28 drivers/crypto/omap-aes-gcm.c static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
dd                 30 drivers/crypto/omap-aes-gcm.c 	struct aead_request *req = dd->aead_req;
dd                 32 drivers/crypto/omap-aes-gcm.c 	dd->flags &= ~FLAGS_BUSY;
dd                 33 drivers/crypto/omap-aes-gcm.c 	dd->in_sg = NULL;
dd                 34 drivers/crypto/omap-aes-gcm.c 	dd->out_sg = NULL;
dd                 39 drivers/crypto/omap-aes-gcm.c static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
dd                 45 drivers/crypto/omap-aes-gcm.c 	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
dd                 46 drivers/crypto/omap-aes-gcm.c 	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
dd                 47 drivers/crypto/omap-aes-gcm.c 	rctx = aead_request_ctx(dd->aead_req);
dd                 49 drivers/crypto/omap-aes-gcm.c 	nsg = !!(dd->assoc_len && dd->total);
dd                 51 drivers/crypto/omap-aes-gcm.c 	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
dd                 53 drivers/crypto/omap-aes-gcm.c 	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dd                 54 drivers/crypto/omap-aes-gcm.c 	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
dd                 55 drivers/crypto/omap-aes-gcm.c 	omap_aes_crypt_dma_stop(dd);
dd                 57 drivers/crypto/omap-aes-gcm.c 	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
dd                 58 drivers/crypto/omap-aes-gcm.c 			    dd->aead_req->assoclen, dd->total,
dd                 59 drivers/crypto/omap-aes-gcm.c 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
dd                 61 drivers/crypto/omap-aes-gcm.c 	if (dd->flags & FLAGS_ENCRYPT)
dd                 63 drivers/crypto/omap-aes-gcm.c 					 dd->aead_req->dst,
dd                 64 drivers/crypto/omap-aes-gcm.c 					 dd->total + dd->aead_req->assoclen,
dd                 65 drivers/crypto/omap-aes-gcm.c 					 dd->authsize, 1);
dd                 67 drivers/crypto/omap-aes-gcm.c 	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
dd                 68 drivers/crypto/omap-aes-gcm.c 			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
dd                 70 drivers/crypto/omap-aes-gcm.c 	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
dd                 71 drivers/crypto/omap-aes-gcm.c 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
dd                 73 drivers/crypto/omap-aes-gcm.c 	if (!(dd->flags & FLAGS_ENCRYPT)) {
dd                 75 drivers/crypto/omap-aes-gcm.c 		for (i = 0; i < dd->authsize; i++) {
dd                 77 drivers/crypto/omap-aes-gcm.c 				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
dd                 83 drivers/crypto/omap-aes-gcm.c 	omap_aes_gcm_finish_req(dd, ret);
dd                 84 drivers/crypto/omap-aes-gcm.c 	omap_aes_gcm_handle_queue(dd, NULL);
dd                 87 drivers/crypto/omap-aes-gcm.c static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
dd                100 drivers/crypto/omap-aes-gcm.c 	if (dd->flags & FLAGS_RFC4106_GCM)
dd                103 drivers/crypto/omap-aes-gcm.c 	if (!(dd->flags & FLAGS_ENCRYPT))
dd                111 drivers/crypto/omap-aes-gcm.c 	omap_aes_clear_copy_flags(dd);
dd                113 drivers/crypto/omap-aes-gcm.c 	sg_init_table(dd->in_sgl, nsg + 1);
dd                117 drivers/crypto/omap-aes-gcm.c 					   AES_BLOCK_SIZE, dd->in_sgl,
dd                122 drivers/crypto/omap-aes-gcm.c 					   &dd->flags);
dd                129 drivers/crypto/omap-aes-gcm.c 					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
dd                134 drivers/crypto/omap-aes-gcm.c 					   &dd->flags);
dd                137 drivers/crypto/omap-aes-gcm.c 	dd->in_sg = dd->in_sgl;
dd                138 drivers/crypto/omap-aes-gcm.c 	dd->total = cryptlen;
dd                139 drivers/crypto/omap-aes-gcm.c 	dd->assoc_len = assoclen;
dd                140 drivers/crypto/omap-aes-gcm.c 	dd->authsize = authlen;
dd                142 drivers/crypto/omap-aes-gcm.c 	dd->out_sg = req->dst;
dd                143 drivers/crypto/omap-aes-gcm.c 	dd->orig_out = req->dst;
dd                145 drivers/crypto/omap-aes-gcm.c 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
dd                148 drivers/crypto/omap-aes-gcm.c 	if (req->src == req->dst || dd->out_sg == sg_arr)
dd                151 drivers/crypto/omap-aes-gcm.c 	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
dd                152 drivers/crypto/omap-aes-gcm.c 				   AES_BLOCK_SIZE, &dd->out_sgl,
dd                154 drivers/crypto/omap-aes-gcm.c 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
dd                158 drivers/crypto/omap-aes-gcm.c 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
dd                159 drivers/crypto/omap-aes-gcm.c 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
dd                224 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_dev *dd = data;
dd                229 drivers/crypto/omap-aes-gcm.c 	if (!(dd->flags & FLAGS_ENCRYPT))
dd                230 drivers/crypto/omap-aes-gcm.c 		scatterwalk_map_and_copy(tag, dd->aead_req->src,
dd                231 drivers/crypto/omap-aes-gcm.c 					 dd->total + dd->aead_req->assoclen,
dd                232 drivers/crypto/omap-aes-gcm.c 					 dd->authsize, 0);
dd                234 drivers/crypto/omap-aes-gcm.c 	rctx = aead_request_ctx(dd->aead_req);
dd                237 drivers/crypto/omap-aes-gcm.c 		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
dd                239 drivers/crypto/omap-aes-gcm.c 		if (!(dd->flags & FLAGS_ENCRYPT))
dd                243 drivers/crypto/omap-aes-gcm.c 	omap_aes_gcm_done_task(dd);
dd                246 drivers/crypto/omap-aes-gcm.c static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
dd                255 drivers/crypto/omap-aes-gcm.c 	spin_lock_irqsave(&dd->lock, flags);
dd                257 drivers/crypto/omap-aes-gcm.c 		ret = aead_enqueue_request(&dd->aead_queue, req);
dd                258 drivers/crypto/omap-aes-gcm.c 	if (dd->flags & FLAGS_BUSY) {
dd                259 drivers/crypto/omap-aes-gcm.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd                263 drivers/crypto/omap-aes-gcm.c 	backlog = aead_get_backlog(&dd->aead_queue);
dd                264 drivers/crypto/omap-aes-gcm.c 	req = aead_dequeue_request(&dd->aead_queue);
dd                266 drivers/crypto/omap-aes-gcm.c 		dd->flags |= FLAGS_BUSY;
dd                267 drivers/crypto/omap-aes-gcm.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd                278 drivers/crypto/omap-aes-gcm.c 	dd->ctx = ctx;
dd                279 drivers/crypto/omap-aes-gcm.c 	rctx->dd = dd;
dd                280 drivers/crypto/omap-aes-gcm.c 	dd->aead_req = req;
dd                283 drivers/crypto/omap-aes-gcm.c 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
dd                285 drivers/crypto/omap-aes-gcm.c 	err = omap_aes_gcm_copy_buffers(dd, req);
dd                289 drivers/crypto/omap-aes-gcm.c 	err = omap_aes_write_ctrl(dd);
dd                291 drivers/crypto/omap-aes-gcm.c 		err = omap_aes_crypt_dma_start(dd);
dd                294 drivers/crypto/omap-aes-gcm.c 		omap_aes_gcm_finish_req(dd, err);
dd                295 drivers/crypto/omap-aes-gcm.c 		omap_aes_gcm_handle_queue(dd, NULL);
dd                306 drivers/crypto/omap-aes-gcm.c 	struct omap_aes_dev *dd;
dd                327 drivers/crypto/omap-aes-gcm.c 	dd = omap_aes_find_dev(rctx);
dd                328 drivers/crypto/omap-aes-gcm.c 	if (!dd)
dd                332 drivers/crypto/omap-aes-gcm.c 	return omap_aes_gcm_handle_queue(dd, req);
dd                 49 drivers/crypto/omap-aes.c #define omap_aes_read(dd, offset)				\
dd                 52 drivers/crypto/omap-aes.c 	_read_ret = __raw_readl(dd->io_base + offset);		\
dd                 58 drivers/crypto/omap-aes.c inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
dd                 60 drivers/crypto/omap-aes.c 	return __raw_readl(dd->io_base + offset);
dd                 65 drivers/crypto/omap-aes.c #define omap_aes_write(dd, offset, value)				\
dd                 69 drivers/crypto/omap-aes.c 		__raw_writel(value, dd->io_base + offset);		\
dd                 72 drivers/crypto/omap-aes.c inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
dd                 75 drivers/crypto/omap-aes.c 	__raw_writel(value, dd->io_base + offset);
dd                 79 drivers/crypto/omap-aes.c static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
dd                 84 drivers/crypto/omap-aes.c 	val = omap_aes_read(dd, offset);
dd                 87 drivers/crypto/omap-aes.c 	omap_aes_write(dd, offset, val);
dd                 90 drivers/crypto/omap-aes.c static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
dd                 94 drivers/crypto/omap-aes.c 		omap_aes_write(dd, offset, *value);
dd                 97 drivers/crypto/omap-aes.c static int omap_aes_hw_init(struct omap_aes_dev *dd)
dd                101 drivers/crypto/omap-aes.c 	if (!(dd->flags & FLAGS_INIT)) {
dd                102 drivers/crypto/omap-aes.c 		dd->flags |= FLAGS_INIT;
dd                103 drivers/crypto/omap-aes.c 		dd->err = 0;
dd                106 drivers/crypto/omap-aes.c 	err = pm_runtime_get_sync(dd->dev);
dd                108 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "failed to get sync: %d\n", err);
dd                115 drivers/crypto/omap-aes.c void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
dd                117 drivers/crypto/omap-aes.c 	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
dd                118 drivers/crypto/omap-aes.c 	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
dd                119 drivers/crypto/omap-aes.c 	dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
dd                122 drivers/crypto/omap-aes.c int omap_aes_write_ctrl(struct omap_aes_dev *dd)
dd                129 drivers/crypto/omap-aes.c 	err = omap_aes_hw_init(dd);
dd                133 drivers/crypto/omap-aes.c 	key32 = dd->ctx->keylen / sizeof(u32);
dd                136 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_GCM)
dd                138 drivers/crypto/omap-aes.c 			omap_aes_write(dd, i, 0x0);
dd                141 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_KEY(dd, i),
dd                142 drivers/crypto/omap-aes.c 			__le32_to_cpu(dd->ctx->key[i]));
dd                145 drivers/crypto/omap-aes.c 	if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
dd                146 drivers/crypto/omap-aes.c 		omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
dd                148 drivers/crypto/omap-aes.c 	if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
dd                149 drivers/crypto/omap-aes.c 		rctx = aead_request_ctx(dd->aead_req);
dd                150 drivers/crypto/omap-aes.c 		omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
dd                153 drivers/crypto/omap-aes.c 	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
dd                154 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_CBC)
dd                157 drivers/crypto/omap-aes.c 	if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
dd                160 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_GCM)
dd                163 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_ENCRYPT)
dd                166 drivers/crypto/omap-aes.c 	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
dd                171 drivers/crypto/omap-aes.c static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
dd                175 drivers/crypto/omap-aes.c 	val = dd->pdata->dma_start;
dd                177 drivers/crypto/omap-aes.c 	if (dd->dma_lch_out != NULL)
dd                178 drivers/crypto/omap-aes.c 		val |= dd->pdata->dma_enable_out;
dd                179 drivers/crypto/omap-aes.c 	if (dd->dma_lch_in != NULL)
dd                180 drivers/crypto/omap-aes.c 		val |= dd->pdata->dma_enable_in;
dd                182 drivers/crypto/omap-aes.c 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
dd                183 drivers/crypto/omap-aes.c 	       dd->pdata->dma_start;
dd                185 drivers/crypto/omap-aes.c 	omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
dd                189 drivers/crypto/omap-aes.c static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
dd                191 drivers/crypto/omap-aes.c 	omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
dd                192 drivers/crypto/omap-aes.c 	omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
dd                193 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_GCM)
dd                194 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
dd                196 drivers/crypto/omap-aes.c 	omap_aes_dma_trigger_omap2(dd, length);
dd                199 drivers/crypto/omap-aes.c static void omap_aes_dma_stop(struct omap_aes_dev *dd)
dd                203 drivers/crypto/omap-aes.c 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
dd                204 drivers/crypto/omap-aes.c 	       dd->pdata->dma_start;
dd                206 drivers/crypto/omap-aes.c 	omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
dd                211 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd;
dd                214 drivers/crypto/omap-aes.c 	dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
dd                215 drivers/crypto/omap-aes.c 	list_move_tail(&dd->list, &dev_list);
dd                216 drivers/crypto/omap-aes.c 	rctx->dd = dd;
dd                219 drivers/crypto/omap-aes.c 	return dd;
dd                224 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = data;
dd                227 drivers/crypto/omap-aes.c 	tasklet_schedule(&dd->done_task);
dd                230 drivers/crypto/omap-aes.c static int omap_aes_dma_init(struct omap_aes_dev *dd)
dd                234 drivers/crypto/omap-aes.c 	dd->dma_lch_out = NULL;
dd                235 drivers/crypto/omap-aes.c 	dd->dma_lch_in = NULL;
dd                237 drivers/crypto/omap-aes.c 	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
dd                238 drivers/crypto/omap-aes.c 	if (IS_ERR(dd->dma_lch_in)) {
dd                239 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "Unable to request in DMA channel\n");
dd                240 drivers/crypto/omap-aes.c 		return PTR_ERR(dd->dma_lch_in);
dd                243 drivers/crypto/omap-aes.c 	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
dd                244 drivers/crypto/omap-aes.c 	if (IS_ERR(dd->dma_lch_out)) {
dd                245 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "Unable to request out DMA channel\n");
dd                246 drivers/crypto/omap-aes.c 		err = PTR_ERR(dd->dma_lch_out);
dd                253 drivers/crypto/omap-aes.c 	dma_release_channel(dd->dma_lch_in);
dd                258 drivers/crypto/omap-aes.c static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
dd                260 drivers/crypto/omap-aes.c 	if (dd->pio_only)
dd                263 drivers/crypto/omap-aes.c 	dma_release_channel(dd->dma_lch_out);
dd                264 drivers/crypto/omap-aes.c 	dma_release_channel(dd->dma_lch_in);
dd                267 drivers/crypto/omap-aes.c static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
dd                276 drivers/crypto/omap-aes.c 	if (dd->pio_only) {
dd                277 drivers/crypto/omap-aes.c 		scatterwalk_start(&dd->in_walk, dd->in_sg);
dd                278 drivers/crypto/omap-aes.c 		scatterwalk_start(&dd->out_walk, dd->out_sg);
dd                282 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
dd                286 drivers/crypto/omap-aes.c 	dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
dd                290 drivers/crypto/omap-aes.c 	cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
dd                291 drivers/crypto/omap-aes.c 	cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
dd                298 drivers/crypto/omap-aes.c 	ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
dd                300 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
dd                305 drivers/crypto/omap-aes.c 	tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
dd                309 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "IN prep_slave_sg() failed\n");
dd                314 drivers/crypto/omap-aes.c 	tx_in->callback_param = dd;
dd                317 drivers/crypto/omap-aes.c 	ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
dd                319 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
dd                324 drivers/crypto/omap-aes.c 	tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
dd                328 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
dd                332 drivers/crypto/omap-aes.c 	if (dd->flags & FLAGS_GCM)
dd                336 drivers/crypto/omap-aes.c 	tx_out->callback_param = dd;
dd                341 drivers/crypto/omap-aes.c 	dma_async_issue_pending(dd->dma_lch_in);
dd                342 drivers/crypto/omap-aes.c 	dma_async_issue_pending(dd->dma_lch_out);
dd                345 drivers/crypto/omap-aes.c 	dd->pdata->trigger(dd, dd->total);
dd                350 drivers/crypto/omap-aes.c int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd                354 drivers/crypto/omap-aes.c 	pr_debug("total: %d\n", dd->total);
dd                356 drivers/crypto/omap-aes.c 	if (!dd->pio_only) {
dd                357 drivers/crypto/omap-aes.c 		err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
dd                360 drivers/crypto/omap-aes.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                364 drivers/crypto/omap-aes.c 		err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                367 drivers/crypto/omap-aes.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                372 drivers/crypto/omap-aes.c 	err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
dd                373 drivers/crypto/omap-aes.c 				 dd->out_sg_len);
dd                374 drivers/crypto/omap-aes.c 	if (err && !dd->pio_only) {
dd                375 drivers/crypto/omap-aes.c 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dd                376 drivers/crypto/omap-aes.c 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                383 drivers/crypto/omap-aes.c static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
dd                385 drivers/crypto/omap-aes.c 	struct ablkcipher_request *req = dd->req;
dd                389 drivers/crypto/omap-aes.c 	crypto_finalize_ablkcipher_request(dd->engine, req, err);
dd                391 drivers/crypto/omap-aes.c 	pm_runtime_mark_last_busy(dd->dev);
dd                392 drivers/crypto/omap-aes.c 	pm_runtime_put_autosuspend(dd->dev);
dd                395 drivers/crypto/omap-aes.c int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
dd                397 drivers/crypto/omap-aes.c 	pr_debug("total: %d\n", dd->total);
dd                399 drivers/crypto/omap-aes.c 	omap_aes_dma_stop(dd);
dd                405 drivers/crypto/omap-aes.c static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd                409 drivers/crypto/omap-aes.c 		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
dd                421 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = rctx->dd;
dd                425 drivers/crypto/omap-aes.c 	if (!dd)
dd                429 drivers/crypto/omap-aes.c 	dd->req = req;
dd                430 drivers/crypto/omap-aes.c 	dd->total = req->nbytes;
dd                431 drivers/crypto/omap-aes.c 	dd->total_save = req->nbytes;
dd                432 drivers/crypto/omap-aes.c 	dd->in_sg = req->src;
dd                433 drivers/crypto/omap-aes.c 	dd->out_sg = req->dst;
dd                434 drivers/crypto/omap-aes.c 	dd->orig_out = req->dst;
dd                440 drivers/crypto/omap-aes.c 	ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
dd                441 drivers/crypto/omap-aes.c 				   dd->in_sgl, flags,
dd                442 drivers/crypto/omap-aes.c 				   FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
dd                446 drivers/crypto/omap-aes.c 	ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
dd                447 drivers/crypto/omap-aes.c 				   &dd->out_sgl, 0,
dd                448 drivers/crypto/omap-aes.c 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
dd                452 drivers/crypto/omap-aes.c 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
dd                453 drivers/crypto/omap-aes.c 	if (dd->in_sg_len < 0)
dd                454 drivers/crypto/omap-aes.c 		return dd->in_sg_len;
dd                456 drivers/crypto/omap-aes.c 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
dd                457 drivers/crypto/omap-aes.c 	if (dd->out_sg_len < 0)
dd                458 drivers/crypto/omap-aes.c 		return dd->out_sg_len;
dd                461 drivers/crypto/omap-aes.c 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
dd                463 drivers/crypto/omap-aes.c 	dd->ctx = ctx;
dd                464 drivers/crypto/omap-aes.c 	rctx->dd = dd;
dd                466 drivers/crypto/omap-aes.c 	return omap_aes_write_ctrl(dd);
dd                474 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = rctx->dd;
dd                476 drivers/crypto/omap-aes.c 	if (!dd)
dd                479 drivers/crypto/omap-aes.c 	return omap_aes_crypt_dma_start(dd);
dd                484 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
dd                488 drivers/crypto/omap-aes.c 	if (!dd->pio_only) {
dd                489 drivers/crypto/omap-aes.c 		dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
dd                491 drivers/crypto/omap-aes.c 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dd                492 drivers/crypto/omap-aes.c 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                494 drivers/crypto/omap-aes.c 		omap_aes_crypt_dma_stop(dd);
dd                497 drivers/crypto/omap-aes.c 	omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
dd                498 drivers/crypto/omap-aes.c 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
dd                500 drivers/crypto/omap-aes.c 	omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
dd                501 drivers/crypto/omap-aes.c 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
dd                503 drivers/crypto/omap-aes.c 	omap_aes_finish_req(dd, 0);
dd                513 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd;
dd                537 drivers/crypto/omap-aes.c 	dd = omap_aes_find_dev(rctx);
dd                538 drivers/crypto/omap-aes.c 	if (!dd)
dd                543 drivers/crypto/omap-aes.c 	return omap_aes_handle_queue(dd, req);
dd                632 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = NULL;
dd                638 drivers/crypto/omap-aes.c 	list_for_each_entry(dd, &dev_list, list) {
dd                643 drivers/crypto/omap-aes.c 	err = pm_runtime_get_sync(dd->dev);
dd                645 drivers/crypto/omap-aes.c 		dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
dd                884 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = dev_id;
dd                888 drivers/crypto/omap-aes.c 	status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
dd                890 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
dd                892 drivers/crypto/omap-aes.c 		BUG_ON(!dd->in_sg);
dd                894 drivers/crypto/omap-aes.c 		BUG_ON(_calc_walked(in) > dd->in_sg->length);
dd                896 drivers/crypto/omap-aes.c 		src = sg_virt(dd->in_sg) + _calc_walked(in);
dd                899 drivers/crypto/omap-aes.c 			omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
dd                901 drivers/crypto/omap-aes.c 			scatterwalk_advance(&dd->in_walk, 4);
dd                902 drivers/crypto/omap-aes.c 			if (dd->in_sg->length == _calc_walked(in)) {
dd                903 drivers/crypto/omap-aes.c 				dd->in_sg = sg_next(dd->in_sg);
dd                904 drivers/crypto/omap-aes.c 				if (dd->in_sg) {
dd                905 drivers/crypto/omap-aes.c 					scatterwalk_start(&dd->in_walk,
dd                906 drivers/crypto/omap-aes.c 							  dd->in_sg);
dd                907 drivers/crypto/omap-aes.c 					src = sg_virt(dd->in_sg) +
dd                917 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
dd                920 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
dd                923 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
dd                925 drivers/crypto/omap-aes.c 		BUG_ON(!dd->out_sg);
dd                927 drivers/crypto/omap-aes.c 		BUG_ON(_calc_walked(out) > dd->out_sg->length);
dd                929 drivers/crypto/omap-aes.c 		dst = sg_virt(dd->out_sg) + _calc_walked(out);
dd                932 drivers/crypto/omap-aes.c 			*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
dd                933 drivers/crypto/omap-aes.c 			scatterwalk_advance(&dd->out_walk, 4);
dd                934 drivers/crypto/omap-aes.c 			if (dd->out_sg->length == _calc_walked(out)) {
dd                935 drivers/crypto/omap-aes.c 				dd->out_sg = sg_next(dd->out_sg);
dd                936 drivers/crypto/omap-aes.c 				if (dd->out_sg) {
dd                937 drivers/crypto/omap-aes.c 					scatterwalk_start(&dd->out_walk,
dd                938 drivers/crypto/omap-aes.c 							  dd->out_sg);
dd                939 drivers/crypto/omap-aes.c 					dst = sg_virt(dd->out_sg) +
dd                947 drivers/crypto/omap-aes.c 		dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
dd                951 drivers/crypto/omap-aes.c 		omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
dd                953 drivers/crypto/omap-aes.c 		if (!dd->total)
dd                955 drivers/crypto/omap-aes.c 			tasklet_schedule(&dd->done_task);
dd                958 drivers/crypto/omap-aes.c 			omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
dd                981 drivers/crypto/omap-aes.c static int omap_aes_get_res_of(struct omap_aes_dev *dd,
dd                987 drivers/crypto/omap-aes.c 	dd->pdata = of_device_get_match_data(dev);
dd                988 drivers/crypto/omap-aes.c 	if (!dd->pdata) {
dd               1009 drivers/crypto/omap-aes.c static int omap_aes_get_res_of(struct omap_aes_dev *dd,
dd               1016 drivers/crypto/omap-aes.c static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
dd               1033 drivers/crypto/omap-aes.c 	dd->pdata = &omap_aes_pdata_omap2;
dd               1069 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = dev_get_drvdata(dev);
dd               1071 drivers/crypto/omap-aes.c 	return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
dd               1078 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd;
dd               1096 drivers/crypto/omap-aes.c 	list_for_each_entry(dd, &dev_list, list) {
dd               1097 drivers/crypto/omap-aes.c 		spin_lock_irqsave(&dd->lock, flags);
dd               1098 drivers/crypto/omap-aes.c 		dd->engine->queue.max_qlen = value;
dd               1099 drivers/crypto/omap-aes.c 		dd->aead_queue.base.max_qlen = value;
dd               1100 drivers/crypto/omap-aes.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd               1123 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd;
dd               1130 drivers/crypto/omap-aes.c 	dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
dd               1131 drivers/crypto/omap-aes.c 	if (dd == NULL) {
dd               1135 drivers/crypto/omap-aes.c 	dd->dev = dev;
dd               1136 drivers/crypto/omap-aes.c 	platform_set_drvdata(pdev, dd);
dd               1138 drivers/crypto/omap-aes.c 	aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
dd               1140 drivers/crypto/omap-aes.c 	err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
dd               1141 drivers/crypto/omap-aes.c 			       omap_aes_get_res_pdev(dd, pdev, &res);
dd               1145 drivers/crypto/omap-aes.c 	dd->io_base = devm_ioremap_resource(dev, &res);
dd               1146 drivers/crypto/omap-aes.c 	if (IS_ERR(dd->io_base)) {
dd               1147 drivers/crypto/omap-aes.c 		err = PTR_ERR(dd->io_base);
dd               1150 drivers/crypto/omap-aes.c 	dd->phys_base = res.start;
dd               1163 drivers/crypto/omap-aes.c 	omap_aes_dma_stop(dd);
dd               1165 drivers/crypto/omap-aes.c 	reg = omap_aes_read(dd, AES_REG_REV(dd));
dd               1170 drivers/crypto/omap-aes.c 		 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
dd               1171 drivers/crypto/omap-aes.c 		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
dd               1173 drivers/crypto/omap-aes.c 	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
dd               1175 drivers/crypto/omap-aes.c 	err = omap_aes_dma_init(dd);
dd               1178 drivers/crypto/omap-aes.c 	} else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
dd               1179 drivers/crypto/omap-aes.c 		dd->pio_only = 1;
dd               1188 drivers/crypto/omap-aes.c 				dev_name(dev), dd);
dd               1195 drivers/crypto/omap-aes.c 	spin_lock_init(&dd->lock);
dd               1197 drivers/crypto/omap-aes.c 	INIT_LIST_HEAD(&dd->list);
dd               1199 drivers/crypto/omap-aes.c 	list_add_tail(&dd->list, &dev_list);
dd               1203 drivers/crypto/omap-aes.c 	dd->engine = crypto_engine_alloc_init(dev, 1);
dd               1204 drivers/crypto/omap-aes.c 	if (!dd->engine) {
dd               1209 drivers/crypto/omap-aes.c 	err = crypto_engine_start(dd->engine);
dd               1213 drivers/crypto/omap-aes.c 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
dd               1214 drivers/crypto/omap-aes.c 		if (!dd->pdata->algs_info[i].registered) {
dd               1215 drivers/crypto/omap-aes.c 			for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
dd               1216 drivers/crypto/omap-aes.c 				algp = &dd->pdata->algs_info[i].algs_list[j];
dd               1224 drivers/crypto/omap-aes.c 				dd->pdata->algs_info[i].registered++;
dd               1229 drivers/crypto/omap-aes.c 	if (dd->pdata->aead_algs_info &&
dd               1230 drivers/crypto/omap-aes.c 	    !dd->pdata->aead_algs_info->registered) {
dd               1231 drivers/crypto/omap-aes.c 		for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
dd               1232 drivers/crypto/omap-aes.c 			aalg = &dd->pdata->aead_algs_info->algs_list[i];
dd               1241 drivers/crypto/omap-aes.c 			dd->pdata->aead_algs_info->registered++;
dd               1253 drivers/crypto/omap-aes.c 	for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
dd               1254 drivers/crypto/omap-aes.c 		aalg = &dd->pdata->aead_algs_info->algs_list[i];
dd               1258 drivers/crypto/omap-aes.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               1259 drivers/crypto/omap-aes.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               1261 drivers/crypto/omap-aes.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               1264 drivers/crypto/omap-aes.c 	if (dd->engine)
dd               1265 drivers/crypto/omap-aes.c 		crypto_engine_exit(dd->engine);
dd               1267 drivers/crypto/omap-aes.c 	omap_aes_dma_cleanup(dd);
dd               1269 drivers/crypto/omap-aes.c 	tasklet_kill(&dd->done_task);
dd               1272 drivers/crypto/omap-aes.c 	dd = NULL;
dd               1280 drivers/crypto/omap-aes.c 	struct omap_aes_dev *dd = platform_get_drvdata(pdev);
dd               1284 drivers/crypto/omap-aes.c 	if (!dd)
dd               1288 drivers/crypto/omap-aes.c 	list_del(&dd->list);
dd               1291 drivers/crypto/omap-aes.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               1292 drivers/crypto/omap-aes.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               1294 drivers/crypto/omap-aes.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               1296 drivers/crypto/omap-aes.c 	for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
dd               1297 drivers/crypto/omap-aes.c 		aalg = &dd->pdata->aead_algs_info->algs_list[i];
dd               1301 drivers/crypto/omap-aes.c 	crypto_engine_exit(dd->engine);
dd               1303 drivers/crypto/omap-aes.c 	tasklet_kill(&dd->done_task);
dd               1304 drivers/crypto/omap-aes.c 	omap_aes_dma_cleanup(dd);
dd               1305 drivers/crypto/omap-aes.c 	pm_runtime_disable(dd->dev);
dd               1306 drivers/crypto/omap-aes.c 	dd = NULL;
dd                 17 drivers/crypto/omap-aes.h #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
dd                 26 drivers/crypto/omap-aes.h #define AES_REG_KEY(dd, x)		((dd)->pdata->key_ofs - \
dd                 28 drivers/crypto/omap-aes.h #define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
dd                 30 drivers/crypto/omap-aes.h #define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
dd                 50 drivers/crypto/omap-aes.h #define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
dd                 51 drivers/crypto/omap-aes.h #define AES_REG_TAG_N(dd, x)		(0x70 + ((x) * 0x04))
dd                 53 drivers/crypto/omap-aes.h #define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
dd                 55 drivers/crypto/omap-aes.h #define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
dd                 65 drivers/crypto/omap-aes.h #define AES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
dd                 66 drivers/crypto/omap-aes.h #define AES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
dd                105 drivers/crypto/omap-aes.h 	struct omap_aes_dev *dd;
dd                131 drivers/crypto/omap-aes.h 	void		(*trigger)(struct omap_aes_dev *dd, int length);
dd                196 drivers/crypto/omap-aes.h u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset);
dd                197 drivers/crypto/omap-aes.h void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value);
dd                207 drivers/crypto/omap-aes.h int omap_aes_write_ctrl(struct omap_aes_dev *dd);
dd                208 drivers/crypto/omap-aes.h int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
dd                209 drivers/crypto/omap-aes.h int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
dd                211 drivers/crypto/omap-aes.h void omap_aes_clear_copy_flags(struct omap_aes_dev *dd);
dd                 46 drivers/crypto/omap-des.c #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
dd                 48 drivers/crypto/omap-des.c #define DES_REG_KEY(dd, x)		((dd)->pdata->key_ofs - \
dd                 51 drivers/crypto/omap-des.c #define DES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
dd                 53 drivers/crypto/omap-des.c #define DES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
dd                 60 drivers/crypto/omap-des.c #define DES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
dd                 62 drivers/crypto/omap-des.c #define DES_REG_REV(dd)			((dd)->pdata->rev_ofs)
dd                 64 drivers/crypto/omap-des.c #define DES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
dd                 68 drivers/crypto/omap-des.c #define DES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
dd                 69 drivers/crypto/omap-des.c #define DES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
dd                 86 drivers/crypto/omap-des.c 	struct omap_des_dev *dd;
dd                110 drivers/crypto/omap-des.c 	void		(*trigger)(struct omap_des_dev *dd, int length);
dd                174 drivers/crypto/omap-des.c #define omap_des_read(dd, offset)                               \
dd                177 drivers/crypto/omap-des.c 	 _read_ret = __raw_readl(dd->io_base + offset);          \
dd                183 drivers/crypto/omap-des.c static inline u32 omap_des_read(struct omap_des_dev *dd, u32 offset)
dd                185 drivers/crypto/omap-des.c 	return __raw_readl(dd->io_base + offset);
dd                190 drivers/crypto/omap-des.c #define omap_des_write(dd, offset, value)                               \
dd                194 drivers/crypto/omap-des.c 		__raw_writel(value, dd->io_base + offset);              \
dd                197 drivers/crypto/omap-des.c static inline void omap_des_write(struct omap_des_dev *dd, u32 offset,
dd                200 drivers/crypto/omap-des.c 	__raw_writel(value, dd->io_base + offset);
dd                204 drivers/crypto/omap-des.c static inline void omap_des_write_mask(struct omap_des_dev *dd, u32 offset,
dd                209 drivers/crypto/omap-des.c 	val = omap_des_read(dd, offset);
dd                212 drivers/crypto/omap-des.c 	omap_des_write(dd, offset, val);
dd                215 drivers/crypto/omap-des.c static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
dd                219 drivers/crypto/omap-des.c 		omap_des_write(dd, offset, *value);
dd                222 drivers/crypto/omap-des.c static int omap_des_hw_init(struct omap_des_dev *dd)
dd                231 drivers/crypto/omap-des.c 	err = pm_runtime_get_sync(dd->dev);
dd                233 drivers/crypto/omap-des.c 		pm_runtime_put_noidle(dd->dev);
dd                234 drivers/crypto/omap-des.c 		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
dd                238 drivers/crypto/omap-des.c 	if (!(dd->flags & FLAGS_INIT)) {
dd                239 drivers/crypto/omap-des.c 		dd->flags |= FLAGS_INIT;
dd                240 drivers/crypto/omap-des.c 		dd->err = 0;
dd                246 drivers/crypto/omap-des.c static int omap_des_write_ctrl(struct omap_des_dev *dd)
dd                252 drivers/crypto/omap-des.c 	err = omap_des_hw_init(dd);
dd                256 drivers/crypto/omap-des.c 	key32 = dd->ctx->keylen / sizeof(u32);
dd                260 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_KEY(dd, i),
dd                261 drivers/crypto/omap-des.c 			       __le32_to_cpu(dd->ctx->key[i]));
dd                264 drivers/crypto/omap-des.c 	if ((dd->flags & FLAGS_CBC) && dd->req->info)
dd                265 drivers/crypto/omap-des.c 		omap_des_write_n(dd, DES_REG_IV(dd, 0), dd->req->info, 2);
dd                267 drivers/crypto/omap-des.c 	if (dd->flags & FLAGS_CBC)
dd                269 drivers/crypto/omap-des.c 	if (dd->flags & FLAGS_ENCRYPT)
dd                276 drivers/crypto/omap-des.c 	omap_des_write_mask(dd, DES_REG_CTRL(dd), val, mask);
dd                281 drivers/crypto/omap-des.c static void omap_des_dma_trigger_omap4(struct omap_des_dev *dd, int length)
dd                285 drivers/crypto/omap-des.c 	omap_des_write(dd, DES_REG_LENGTH_N(0), length);
dd                287 drivers/crypto/omap-des.c 	val = dd->pdata->dma_start;
dd                289 drivers/crypto/omap-des.c 	if (dd->dma_lch_out != NULL)
dd                290 drivers/crypto/omap-des.c 		val |= dd->pdata->dma_enable_out;
dd                291 drivers/crypto/omap-des.c 	if (dd->dma_lch_in != NULL)
dd                292 drivers/crypto/omap-des.c 		val |= dd->pdata->dma_enable_in;
dd                294 drivers/crypto/omap-des.c 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
dd                295 drivers/crypto/omap-des.c 	       dd->pdata->dma_start;
dd                297 drivers/crypto/omap-des.c 	omap_des_write_mask(dd, DES_REG_MASK(dd), val, mask);
dd                300 drivers/crypto/omap-des.c static void omap_des_dma_stop(struct omap_des_dev *dd)
dd                304 drivers/crypto/omap-des.c 	mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
dd                305 drivers/crypto/omap-des.c 	       dd->pdata->dma_start;
dd                307 drivers/crypto/omap-des.c 	omap_des_write_mask(dd, DES_REG_MASK(dd), 0, mask);
dd                312 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = NULL, *tmp;
dd                315 drivers/crypto/omap-des.c 	if (!ctx->dd) {
dd                318 drivers/crypto/omap-des.c 			dd = tmp;
dd                321 drivers/crypto/omap-des.c 		ctx->dd = dd;
dd                324 drivers/crypto/omap-des.c 		dd = ctx->dd;
dd                328 drivers/crypto/omap-des.c 	return dd;
dd                333 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = data;
dd                336 drivers/crypto/omap-des.c 	tasklet_schedule(&dd->done_task);
dd                339 drivers/crypto/omap-des.c static int omap_des_dma_init(struct omap_des_dev *dd)
dd                343 drivers/crypto/omap-des.c 	dd->dma_lch_out = NULL;
dd                344 drivers/crypto/omap-des.c 	dd->dma_lch_in = NULL;
dd                346 drivers/crypto/omap-des.c 	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
dd                347 drivers/crypto/omap-des.c 	if (IS_ERR(dd->dma_lch_in)) {
dd                348 drivers/crypto/omap-des.c 		dev_err(dd->dev, "Unable to request in DMA channel\n");
dd                349 drivers/crypto/omap-des.c 		return PTR_ERR(dd->dma_lch_in);
dd                352 drivers/crypto/omap-des.c 	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
dd                353 drivers/crypto/omap-des.c 	if (IS_ERR(dd->dma_lch_out)) {
dd                354 drivers/crypto/omap-des.c 		dev_err(dd->dev, "Unable to request out DMA channel\n");
dd                355 drivers/crypto/omap-des.c 		err = PTR_ERR(dd->dma_lch_out);
dd                362 drivers/crypto/omap-des.c 	dma_release_channel(dd->dma_lch_in);
dd                367 drivers/crypto/omap-des.c static void omap_des_dma_cleanup(struct omap_des_dev *dd)
dd                369 drivers/crypto/omap-des.c 	if (dd->pio_only)
dd                372 drivers/crypto/omap-des.c 	dma_release_channel(dd->dma_lch_out);
dd                373 drivers/crypto/omap-des.c 	dma_release_channel(dd->dma_lch_in);
dd                381 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = ctx->dd;
dd                386 drivers/crypto/omap-des.c 	if (dd->pio_only) {
dd                387 drivers/crypto/omap-des.c 		scatterwalk_start(&dd->in_walk, dd->in_sg);
dd                388 drivers/crypto/omap-des.c 		scatterwalk_start(&dd->out_walk, dd->out_sg);
dd                392 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
dd                396 drivers/crypto/omap-des.c 	dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
dd                400 drivers/crypto/omap-des.c 	cfg.src_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
dd                401 drivers/crypto/omap-des.c 	cfg.dst_addr = dd->phys_base + DES_REG_DATA_N(dd, 0);
dd                408 drivers/crypto/omap-des.c 	ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
dd                410 drivers/crypto/omap-des.c 		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
dd                415 drivers/crypto/omap-des.c 	tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
dd                419 drivers/crypto/omap-des.c 		dev_err(dd->dev, "IN prep_slave_sg() failed\n");
dd                424 drivers/crypto/omap-des.c 	tx_in->callback_param = dd;
dd                427 drivers/crypto/omap-des.c 	ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
dd                429 drivers/crypto/omap-des.c 		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
dd                434 drivers/crypto/omap-des.c 	tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
dd                438 drivers/crypto/omap-des.c 		dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
dd                443 drivers/crypto/omap-des.c 	tx_out->callback_param = dd;
dd                448 drivers/crypto/omap-des.c 	dma_async_issue_pending(dd->dma_lch_in);
dd                449 drivers/crypto/omap-des.c 	dma_async_issue_pending(dd->dma_lch_out);
dd                452 drivers/crypto/omap-des.c 	dd->pdata->trigger(dd, dd->total);
dd                457 drivers/crypto/omap-des.c static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
dd                460 drivers/crypto/omap-des.c 					crypto_ablkcipher_reqtfm(dd->req));
dd                463 drivers/crypto/omap-des.c 	pr_debug("total: %d\n", dd->total);
dd                465 drivers/crypto/omap-des.c 	if (!dd->pio_only) {
dd                466 drivers/crypto/omap-des.c 		err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
dd                469 drivers/crypto/omap-des.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                473 drivers/crypto/omap-des.c 		err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                476 drivers/crypto/omap-des.c 			dev_err(dd->dev, "dma_map_sg() error\n");
dd                481 drivers/crypto/omap-des.c 	err = omap_des_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
dd                482 drivers/crypto/omap-des.c 				 dd->out_sg_len);
dd                483 drivers/crypto/omap-des.c 	if (err && !dd->pio_only) {
dd                484 drivers/crypto/omap-des.c 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dd                485 drivers/crypto/omap-des.c 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                492 drivers/crypto/omap-des.c static void omap_des_finish_req(struct omap_des_dev *dd, int err)
dd                494 drivers/crypto/omap-des.c 	struct ablkcipher_request *req = dd->req;
dd                498 drivers/crypto/omap-des.c 	crypto_finalize_ablkcipher_request(dd->engine, req, err);
dd                500 drivers/crypto/omap-des.c 	pm_runtime_mark_last_busy(dd->dev);
dd                501 drivers/crypto/omap-des.c 	pm_runtime_put_autosuspend(dd->dev);
dd                504 drivers/crypto/omap-des.c static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
dd                506 drivers/crypto/omap-des.c 	pr_debug("total: %d\n", dd->total);
dd                508 drivers/crypto/omap-des.c 	omap_des_dma_stop(dd);
dd                510 drivers/crypto/omap-des.c 	dmaengine_terminate_all(dd->dma_lch_in);
dd                511 drivers/crypto/omap-des.c 	dmaengine_terminate_all(dd->dma_lch_out);
dd                516 drivers/crypto/omap-des.c static int omap_des_handle_queue(struct omap_des_dev *dd,
dd                520 drivers/crypto/omap-des.c 		return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
dd                531 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = omap_des_find_dev(ctx);
dd                536 drivers/crypto/omap-des.c 	if (!dd)
dd                540 drivers/crypto/omap-des.c 	dd->req = req;
dd                541 drivers/crypto/omap-des.c 	dd->total = req->nbytes;
dd                542 drivers/crypto/omap-des.c 	dd->total_save = req->nbytes;
dd                543 drivers/crypto/omap-des.c 	dd->in_sg = req->src;
dd                544 drivers/crypto/omap-des.c 	dd->out_sg = req->dst;
dd                545 drivers/crypto/omap-des.c 	dd->orig_out = req->dst;
dd                551 drivers/crypto/omap-des.c 	ret = omap_crypto_align_sg(&dd->in_sg, dd->total, DES_BLOCK_SIZE,
dd                552 drivers/crypto/omap-des.c 				   &dd->in_sgl, flags,
dd                553 drivers/crypto/omap-des.c 				   FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
dd                557 drivers/crypto/omap-des.c 	ret = omap_crypto_align_sg(&dd->out_sg, dd->total, DES_BLOCK_SIZE,
dd                558 drivers/crypto/omap-des.c 				   &dd->out_sgl, 0,
dd                559 drivers/crypto/omap-des.c 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
dd                563 drivers/crypto/omap-des.c 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
dd                564 drivers/crypto/omap-des.c 	if (dd->in_sg_len < 0)
dd                565 drivers/crypto/omap-des.c 		return dd->in_sg_len;
dd                567 drivers/crypto/omap-des.c 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
dd                568 drivers/crypto/omap-des.c 	if (dd->out_sg_len < 0)
dd                569 drivers/crypto/omap-des.c 		return dd->out_sg_len;
dd                574 drivers/crypto/omap-des.c 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
dd                576 drivers/crypto/omap-des.c 	dd->ctx = ctx;
dd                577 drivers/crypto/omap-des.c 	ctx->dd = dd;
dd                579 drivers/crypto/omap-des.c 	return omap_des_write_ctrl(dd);
dd                588 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = omap_des_find_dev(ctx);
dd                590 drivers/crypto/omap-des.c 	if (!dd)
dd                593 drivers/crypto/omap-des.c 	return omap_des_crypt_dma_start(dd);
dd                598 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = (struct omap_des_dev *)data;
dd                602 drivers/crypto/omap-des.c 	if (!dd->pio_only) {
dd                603 drivers/crypto/omap-des.c 		dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
dd                605 drivers/crypto/omap-des.c 		dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
dd                606 drivers/crypto/omap-des.c 		dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
dd                608 drivers/crypto/omap-des.c 		omap_des_crypt_dma_stop(dd);
dd                611 drivers/crypto/omap-des.c 	omap_crypto_cleanup(&dd->in_sgl, NULL, 0, dd->total_save,
dd                612 drivers/crypto/omap-des.c 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
dd                614 drivers/crypto/omap-des.c 	omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
dd                615 drivers/crypto/omap-des.c 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
dd                617 drivers/crypto/omap-des.c 	omap_des_finish_req(dd, 0);
dd                627 drivers/crypto/omap-des.c 	struct omap_des_dev *dd;
dd                638 drivers/crypto/omap-des.c 	dd = omap_des_find_dev(ctx);
dd                639 drivers/crypto/omap-des.c 	if (!dd)
dd                644 drivers/crypto/omap-des.c 	return omap_des_handle_queue(dd, req);
dd                855 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = dev_id;
dd                859 drivers/crypto/omap-des.c 	status = omap_des_read(dd, DES_REG_IRQ_STATUS(dd));
dd                861 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
dd                863 drivers/crypto/omap-des.c 		BUG_ON(!dd->in_sg);
dd                865 drivers/crypto/omap-des.c 		BUG_ON(_calc_walked(in) > dd->in_sg->length);
dd                867 drivers/crypto/omap-des.c 		src = sg_virt(dd->in_sg) + _calc_walked(in);
dd                870 drivers/crypto/omap-des.c 			omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
dd                872 drivers/crypto/omap-des.c 			scatterwalk_advance(&dd->in_walk, 4);
dd                873 drivers/crypto/omap-des.c 			if (dd->in_sg->length == _calc_walked(in)) {
dd                874 drivers/crypto/omap-des.c 				dd->in_sg = sg_next(dd->in_sg);
dd                875 drivers/crypto/omap-des.c 				if (dd->in_sg) {
dd                876 drivers/crypto/omap-des.c 					scatterwalk_start(&dd->in_walk,
dd                877 drivers/crypto/omap-des.c 							  dd->in_sg);
dd                878 drivers/crypto/omap-des.c 					src = sg_virt(dd->in_sg) +
dd                888 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
dd                891 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x4);
dd                894 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x0);
dd                896 drivers/crypto/omap-des.c 		BUG_ON(!dd->out_sg);
dd                898 drivers/crypto/omap-des.c 		BUG_ON(_calc_walked(out) > dd->out_sg->length);
dd                900 drivers/crypto/omap-des.c 		dst = sg_virt(dd->out_sg) + _calc_walked(out);
dd                903 drivers/crypto/omap-des.c 			*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
dd                904 drivers/crypto/omap-des.c 			scatterwalk_advance(&dd->out_walk, 4);
dd                905 drivers/crypto/omap-des.c 			if (dd->out_sg->length == _calc_walked(out)) {
dd                906 drivers/crypto/omap-des.c 				dd->out_sg = sg_next(dd->out_sg);
dd                907 drivers/crypto/omap-des.c 				if (dd->out_sg) {
dd                908 drivers/crypto/omap-des.c 					scatterwalk_start(&dd->out_walk,
dd                909 drivers/crypto/omap-des.c 							  dd->out_sg);
dd                910 drivers/crypto/omap-des.c 					dst = sg_virt(dd->out_sg) +
dd                918 drivers/crypto/omap-des.c 		BUG_ON(dd->total < DES_BLOCK_SIZE);
dd                920 drivers/crypto/omap-des.c 		dd->total -= DES_BLOCK_SIZE;
dd                924 drivers/crypto/omap-des.c 		omap_des_write(dd, DES_REG_IRQ_STATUS(dd), status);
dd                926 drivers/crypto/omap-des.c 		if (!dd->total)
dd                928 drivers/crypto/omap-des.c 			tasklet_schedule(&dd->done_task);
dd                931 drivers/crypto/omap-des.c 			omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
dd                946 drivers/crypto/omap-des.c static int omap_des_get_of(struct omap_des_dev *dd,
dd                950 drivers/crypto/omap-des.c 	dd->pdata = of_device_get_match_data(&pdev->dev);
dd                951 drivers/crypto/omap-des.c 	if (!dd->pdata) {
dd                959 drivers/crypto/omap-des.c static int omap_des_get_of(struct omap_des_dev *dd,
dd                966 drivers/crypto/omap-des.c static int omap_des_get_pdev(struct omap_des_dev *dd,
dd                970 drivers/crypto/omap-des.c 	dd->pdata = pdev->dev.platform_data;
dd                978 drivers/crypto/omap-des.c 	struct omap_des_dev *dd;
dd                984 drivers/crypto/omap-des.c 	dd = devm_kzalloc(dev, sizeof(struct omap_des_dev), GFP_KERNEL);
dd                985 drivers/crypto/omap-des.c 	if (dd == NULL) {
dd                989 drivers/crypto/omap-des.c 	dd->dev = dev;
dd                990 drivers/crypto/omap-des.c 	platform_set_drvdata(pdev, dd);
dd                998 drivers/crypto/omap-des.c 	err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
dd                999 drivers/crypto/omap-des.c 			       omap_des_get_pdev(dd, pdev);
dd               1003 drivers/crypto/omap-des.c 	dd->io_base = devm_ioremap_resource(dev, res);
dd               1004 drivers/crypto/omap-des.c 	if (IS_ERR(dd->io_base)) {
dd               1005 drivers/crypto/omap-des.c 		err = PTR_ERR(dd->io_base);
dd               1008 drivers/crypto/omap-des.c 	dd->phys_base = res->start;
dd               1017 drivers/crypto/omap-des.c 		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
dd               1021 drivers/crypto/omap-des.c 	omap_des_dma_stop(dd);
dd               1023 drivers/crypto/omap-des.c 	reg = omap_des_read(dd, DES_REG_REV(dd));
dd               1028 drivers/crypto/omap-des.c 		 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
dd               1029 drivers/crypto/omap-des.c 		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
dd               1031 drivers/crypto/omap-des.c 	tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
dd               1033 drivers/crypto/omap-des.c 	err = omap_des_dma_init(dd);
dd               1036 drivers/crypto/omap-des.c 	} else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
dd               1037 drivers/crypto/omap-des.c 		dd->pio_only = 1;
dd               1046 drivers/crypto/omap-des.c 				dev_name(dev), dd);
dd               1054 drivers/crypto/omap-des.c 	INIT_LIST_HEAD(&dd->list);
dd               1056 drivers/crypto/omap-des.c 	list_add_tail(&dd->list, &dev_list);
dd               1060 drivers/crypto/omap-des.c 	dd->engine = crypto_engine_alloc_init(dev, 1);
dd               1061 drivers/crypto/omap-des.c 	if (!dd->engine) {
dd               1066 drivers/crypto/omap-des.c 	err = crypto_engine_start(dd->engine);
dd               1070 drivers/crypto/omap-des.c 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
dd               1071 drivers/crypto/omap-des.c 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
dd               1072 drivers/crypto/omap-des.c 			algp = &dd->pdata->algs_info[i].algs_list[j];
dd               1080 drivers/crypto/omap-des.c 			dd->pdata->algs_info[i].registered++;
dd               1087 drivers/crypto/omap-des.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               1088 drivers/crypto/omap-des.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               1090 drivers/crypto/omap-des.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               1093 drivers/crypto/omap-des.c 	if (dd->engine)
dd               1094 drivers/crypto/omap-des.c 		crypto_engine_exit(dd->engine);
dd               1096 drivers/crypto/omap-des.c 	omap_des_dma_cleanup(dd);
dd               1098 drivers/crypto/omap-des.c 	tasklet_kill(&dd->done_task);
dd               1102 drivers/crypto/omap-des.c 	dd = NULL;
dd               1110 drivers/crypto/omap-des.c 	struct omap_des_dev *dd = platform_get_drvdata(pdev);
dd               1113 drivers/crypto/omap-des.c 	if (!dd)
dd               1117 drivers/crypto/omap-des.c 	list_del(&dd->list);
dd               1120 drivers/crypto/omap-des.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               1121 drivers/crypto/omap-des.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               1123 drivers/crypto/omap-des.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               1125 drivers/crypto/omap-des.c 	tasklet_kill(&dd->done_task);
dd               1126 drivers/crypto/omap-des.c 	omap_des_dma_cleanup(dd);
dd               1127 drivers/crypto/omap-des.c 	pm_runtime_disable(dd->dev);
dd               1128 drivers/crypto/omap-des.c 	dd = NULL;
dd                 46 drivers/crypto/omap-sham.c #define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
dd                 47 drivers/crypto/omap-sham.c #define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
dd                 48 drivers/crypto/omap-sham.c #define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
dd                 50 drivers/crypto/omap-sham.c #define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
dd                 60 drivers/crypto/omap-sham.c #define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
dd                 62 drivers/crypto/omap-sham.c #define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
dd                 68 drivers/crypto/omap-sham.c #define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
dd                 71 drivers/crypto/omap-sham.c #define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
dd                 85 drivers/crypto/omap-sham.c #define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
dd                142 drivers/crypto/omap-sham.c 	struct omap_sham_dev	*dd;
dd                168 drivers/crypto/omap-sham.c 	struct omap_sham_dev	*dd;
dd                193 drivers/crypto/omap-sham.c 	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
dd                195 drivers/crypto/omap-sham.c 	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
dd                196 drivers/crypto/omap-sham.c 	int		(*poll_irq)(struct omap_sham_dev *dd);
dd                247 drivers/crypto/omap-sham.c static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
dd                249 drivers/crypto/omap-sham.c 	return __raw_readl(dd->io_base + offset);
dd                252 drivers/crypto/omap-sham.c static inline void omap_sham_write(struct omap_sham_dev *dd,
dd                255 drivers/crypto/omap-sham.c 	__raw_writel(value, dd->io_base + offset);
dd                258 drivers/crypto/omap-sham.c static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
dd                263 drivers/crypto/omap-sham.c 	val = omap_sham_read(dd, address);
dd                266 drivers/crypto/omap-sham.c 	omap_sham_write(dd, address, val);
dd                269 drivers/crypto/omap-sham.c static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
dd                273 drivers/crypto/omap-sham.c 	while (!(omap_sham_read(dd, offset) & bit)) {
dd                284 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = ctx->dd;
dd                288 drivers/crypto/omap-sham.c 	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
dd                290 drivers/crypto/omap-sham.c 			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
dd                292 drivers/crypto/omap-sham.c 			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
dd                299 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = ctx->dd;
dd                303 drivers/crypto/omap-sham.c 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
dd                308 drivers/crypto/omap-sham.c 		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
dd                310 drivers/crypto/omap-sham.c 				opad[i] = omap_sham_read(dd,
dd                311 drivers/crypto/omap-sham.c 						SHA_REG_ODIGEST(dd, i));
dd                313 drivers/crypto/omap-sham.c 				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
dd                337 drivers/crypto/omap-sham.c 		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
dd                365 drivers/crypto/omap-sham.c static int omap_sham_hw_init(struct omap_sham_dev *dd)
dd                369 drivers/crypto/omap-sham.c 	err = pm_runtime_get_sync(dd->dev);
dd                371 drivers/crypto/omap-sham.c 		dev_err(dd->dev, "failed to get sync: %d\n", err);
dd                375 drivers/crypto/omap-sham.c 	if (!test_bit(FLAGS_INIT, &dd->flags)) {
dd                376 drivers/crypto/omap-sham.c 		set_bit(FLAGS_INIT, &dd->flags);
dd                377 drivers/crypto/omap-sham.c 		dd->err = 0;
dd                383 drivers/crypto/omap-sham.c static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
dd                386 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
dd                390 drivers/crypto/omap-sham.c 		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
dd                392 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
dd                409 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
dd                412 drivers/crypto/omap-sham.c static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
dd                416 drivers/crypto/omap-sham.c static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
dd                418 drivers/crypto/omap-sham.c 	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
dd                445 drivers/crypto/omap-sham.c static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
dd                449 drivers/crypto/omap-sham.c 		omap_sham_write(dd, offset, *value);
dd                452 drivers/crypto/omap-sham.c static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
dd                455 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
dd                465 drivers/crypto/omap-sham.c 		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
dd                476 drivers/crypto/omap-sham.c 			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
dd                478 drivers/crypto/omap-sham.c 			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
dd                495 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
dd                496 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
dd                497 drivers/crypto/omap-sham.c 	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
dd                498 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
dd                504 drivers/crypto/omap-sham.c static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
dd                506 drivers/crypto/omap-sham.c 	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
dd                509 drivers/crypto/omap-sham.c static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
dd                511 drivers/crypto/omap-sham.c 	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
dd                515 drivers/crypto/omap-sham.c static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
dd                518 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
dd                524 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
dd                527 drivers/crypto/omap-sham.c 	dd->pdata->write_ctrl(dd, length, final, 0);
dd                528 drivers/crypto/omap-sham.c 	dd->pdata->trigger(dd, length);
dd                535 drivers/crypto/omap-sham.c 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
dd                537 drivers/crypto/omap-sham.c 	set_bit(FLAGS_CPU, &dd->flags);
dd                548 drivers/crypto/omap-sham.c 		if (dd->pdata->poll_irq(dd))
dd                562 drivers/crypto/omap-sham.c 			omap_sham_write(dd, SHA_REG_DIN(dd, count),
dd                576 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = param;
dd                578 drivers/crypto/omap-sham.c 	set_bit(FLAGS_DMA_READY, &dd->flags);
dd                579 drivers/crypto/omap-sham.c 	tasklet_schedule(&dd->done_task);
dd                582 drivers/crypto/omap-sham.c static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
dd                585 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
dd                590 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
dd                593 drivers/crypto/omap-sham.c 	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
dd                594 drivers/crypto/omap-sham.c 		dev_err(dd->dev, "dma_map_sg error\n");
dd                600 drivers/crypto/omap-sham.c 	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
dd                604 drivers/crypto/omap-sham.c 	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
dd                610 drivers/crypto/omap-sham.c 	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
dd                615 drivers/crypto/omap-sham.c 		dev_err(dd->dev, "prep_slave_sg failed\n");
dd                620 drivers/crypto/omap-sham.c 	tx->callback_param = dd;
dd                622 drivers/crypto/omap-sham.c 	dd->pdata->write_ctrl(dd, length, final, 1);
dd                628 drivers/crypto/omap-sham.c 		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
dd                630 drivers/crypto/omap-sham.c 	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
dd                633 drivers/crypto/omap-sham.c 	dma_async_issue_pending(dd->dma_lch);
dd                635 drivers/crypto/omap-sham.c 	dd->pdata->trigger(dd, length);
dd                661 drivers/crypto/omap-sham.c 		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
dd                690 drivers/crypto/omap-sham.c 	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
dd                715 drivers/crypto/omap-sham.c 		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
dd                722 drivers/crypto/omap-sham.c 	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
dd                842 drivers/crypto/omap-sham.c 		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
dd                866 drivers/crypto/omap-sham.c 		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
dd                876 drivers/crypto/omap-sham.c 		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
dd                910 drivers/crypto/omap-sham.c static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
dd                912 drivers/crypto/omap-sham.c 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
dd                914 drivers/crypto/omap-sham.c 	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
dd                916 drivers/crypto/omap-sham.c 	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
dd                926 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = NULL, *tmp;
dd                930 drivers/crypto/omap-sham.c 	if (!tctx->dd) {
dd                932 drivers/crypto/omap-sham.c 			dd = tmp;
dd                935 drivers/crypto/omap-sham.c 		tctx->dd = dd;
dd                937 drivers/crypto/omap-sham.c 		dd = tctx->dd;
dd                941 drivers/crypto/omap-sham.c 	ctx->dd = dd;
dd                945 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "init: digest size: %d\n",
dd                982 drivers/crypto/omap-sham.c 		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
dd                996 drivers/crypto/omap-sham.c static int omap_sham_update_req(struct omap_sham_dev *dd)
dd                998 drivers/crypto/omap-sham.c 	struct ahash_request *req = dd->req;
dd               1003 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
dd               1007 drivers/crypto/omap-sham.c 	    ctx->total < dd->fallback_sz)
dd               1011 drivers/crypto/omap-sham.c 		err = omap_sham_xmit_cpu(dd, ctx->total, final);
dd               1013 drivers/crypto/omap-sham.c 		err = omap_sham_xmit_dma(dd, ctx->total, final);
dd               1016 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
dd               1021 drivers/crypto/omap-sham.c static int omap_sham_final_req(struct omap_sham_dev *dd)
dd               1023 drivers/crypto/omap-sham.c 	struct ahash_request *req = dd->req;
dd               1027 drivers/crypto/omap-sham.c 	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
dd               1035 drivers/crypto/omap-sham.c 		err = omap_sham_xmit_dma(dd, ctx->total, 1);
dd               1037 drivers/crypto/omap-sham.c 		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
dd               1041 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
dd               1064 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = ctx->dd;
dd               1070 drivers/crypto/omap-sham.c 				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
dd               1074 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
dd               1082 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = ctx->dd;
dd               1084 drivers/crypto/omap-sham.c 	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
dd               1088 drivers/crypto/omap-sham.c 	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
dd               1093 drivers/crypto/omap-sham.c 	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
dd               1096 drivers/crypto/omap-sham.c 		dd->pdata->copy_hash(req, 1);
dd               1097 drivers/crypto/omap-sham.c 		if (test_bit(FLAGS_FINAL, &dd->flags))
dd               1104 drivers/crypto/omap-sham.c 	dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
dd               1107 drivers/crypto/omap-sham.c 	pm_runtime_mark_last_busy(dd->dev);
dd               1108 drivers/crypto/omap-sham.c 	pm_runtime_put_autosuspend(dd->dev);
dd               1114 drivers/crypto/omap-sham.c static int omap_sham_handle_queue(struct omap_sham_dev *dd,
dd               1123 drivers/crypto/omap-sham.c 	spin_lock_irqsave(&dd->lock, flags);
dd               1125 drivers/crypto/omap-sham.c 		ret = ahash_enqueue_request(&dd->queue, req);
dd               1126 drivers/crypto/omap-sham.c 	if (test_bit(FLAGS_BUSY, &dd->flags)) {
dd               1127 drivers/crypto/omap-sham.c 		spin_unlock_irqrestore(&dd->lock, flags);
dd               1130 drivers/crypto/omap-sham.c 	backlog = crypto_get_backlog(&dd->queue);
dd               1131 drivers/crypto/omap-sham.c 	async_req = crypto_dequeue_request(&dd->queue);
dd               1133 drivers/crypto/omap-sham.c 		set_bit(FLAGS_BUSY, &dd->flags);
dd               1134 drivers/crypto/omap-sham.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd               1143 drivers/crypto/omap-sham.c 	dd->req = req;
dd               1150 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
dd               1153 drivers/crypto/omap-sham.c 	err = omap_sham_hw_init(dd);
dd               1159 drivers/crypto/omap-sham.c 		dd->pdata->copy_hash(req, 0);
dd               1162 drivers/crypto/omap-sham.c 		err = omap_sham_update_req(dd);
dd               1165 drivers/crypto/omap-sham.c 			err = omap_sham_final_req(dd);
dd               1167 drivers/crypto/omap-sham.c 		err = omap_sham_final_req(dd);
dd               1170 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "exit, err: %d\n", err);
dd               1191 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = tctx->dd;
dd               1195 drivers/crypto/omap-sham.c 	return omap_sham_handle_queue(dd, req);
dd               1201 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = ctx->dd;
dd               1213 drivers/crypto/omap-sham.c 	if (dd->polling_mode)
dd               1241 drivers/crypto/omap-sham.c 	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
dd               1265 drivers/crypto/omap-sham.c 	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
dd               1305 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = NULL, *tmp;
dd               1309 drivers/crypto/omap-sham.c 	if (!tctx->dd) {
dd               1311 drivers/crypto/omap-sham.c 			dd = tmp;
dd               1314 drivers/crypto/omap-sham.c 		tctx->dd = dd;
dd               1316 drivers/crypto/omap-sham.c 		dd = tctx->dd;
dd               1337 drivers/crypto/omap-sham.c 	if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
dd               1730 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
dd               1733 drivers/crypto/omap-sham.c 	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
dd               1734 drivers/crypto/omap-sham.c 		omap_sham_handle_queue(dd, NULL);
dd               1738 drivers/crypto/omap-sham.c 	if (test_bit(FLAGS_CPU, &dd->flags)) {
dd               1739 drivers/crypto/omap-sham.c 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
dd               1741 drivers/crypto/omap-sham.c 	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
dd               1742 drivers/crypto/omap-sham.c 		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
dd               1743 drivers/crypto/omap-sham.c 			omap_sham_update_dma_stop(dd);
dd               1744 drivers/crypto/omap-sham.c 			if (dd->err) {
dd               1745 drivers/crypto/omap-sham.c 				err = dd->err;
dd               1749 drivers/crypto/omap-sham.c 		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
dd               1751 drivers/crypto/omap-sham.c 			clear_bit(FLAGS_DMA_READY, &dd->flags);
dd               1759 drivers/crypto/omap-sham.c 	dev_dbg(dd->dev, "update done: err: %d\n", err);
dd               1761 drivers/crypto/omap-sham.c 	omap_sham_finish_req(dd->req, err);
dd               1764 drivers/crypto/omap-sham.c 	if (!test_bit(FLAGS_BUSY, &dd->flags))
dd               1765 drivers/crypto/omap-sham.c 		omap_sham_handle_queue(dd, NULL);
dd               1768 drivers/crypto/omap-sham.c static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
dd               1770 drivers/crypto/omap-sham.c 	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
dd               1771 drivers/crypto/omap-sham.c 		dev_warn(dd->dev, "Interrupt when no active requests.\n");
dd               1773 drivers/crypto/omap-sham.c 		set_bit(FLAGS_OUTPUT_READY, &dd->flags);
dd               1774 drivers/crypto/omap-sham.c 		tasklet_schedule(&dd->done_task);
dd               1782 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_id;
dd               1784 drivers/crypto/omap-sham.c 	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
dd               1786 drivers/crypto/omap-sham.c 		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
dd               1788 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
dd               1790 drivers/crypto/omap-sham.c 	omap_sham_read(dd, SHA_REG_CTRL);
dd               1792 drivers/crypto/omap-sham.c 	return omap_sham_irq_common(dd);
dd               1797 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_id;
dd               1799 drivers/crypto/omap-sham.c 	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
dd               1801 drivers/crypto/omap-sham.c 	return omap_sham_irq_common(dd);
dd               1931 drivers/crypto/omap-sham.c static int omap_sham_get_res_of(struct omap_sham_dev *dd,
dd               1937 drivers/crypto/omap-sham.c 	dd->pdata = of_device_get_match_data(dev);
dd               1938 drivers/crypto/omap-sham.c 	if (!dd->pdata) {
dd               1951 drivers/crypto/omap-sham.c 	dd->irq = irq_of_parse_and_map(node, 0);
dd               1952 drivers/crypto/omap-sham.c 	if (!dd->irq) {
dd               1966 drivers/crypto/omap-sham.c static int omap_sham_get_res_of(struct omap_sham_dev *dd,
dd               1973 drivers/crypto/omap-sham.c static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
dd               1990 drivers/crypto/omap-sham.c 	dd->irq = platform_get_irq(pdev, 0);
dd               1991 drivers/crypto/omap-sham.c 	if (dd->irq < 0) {
dd               1992 drivers/crypto/omap-sham.c 		err = dd->irq;
dd               1997 drivers/crypto/omap-sham.c 	dd->pdata = &omap_sham_pdata_omap2;
dd               2006 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
dd               2008 drivers/crypto/omap-sham.c 	return sprintf(buf, "%d\n", dd->fallback_sz);
dd               2014 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
dd               2028 drivers/crypto/omap-sham.c 	dd->fallback_sz = value;
dd               2036 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
dd               2038 drivers/crypto/omap-sham.c 	return sprintf(buf, "%d\n", dd->queue.max_qlen);
dd               2045 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd = dev_get_drvdata(dev);
dd               2062 drivers/crypto/omap-sham.c 	spin_lock_irqsave(&dd->lock, flags);
dd               2063 drivers/crypto/omap-sham.c 	dd->queue.max_qlen = value;
dd               2064 drivers/crypto/omap-sham.c 	spin_unlock_irqrestore(&dd->lock, flags);
dd               2084 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd;
dd               2091 drivers/crypto/omap-sham.c 	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
dd               2092 drivers/crypto/omap-sham.c 	if (dd == NULL) {
dd               2097 drivers/crypto/omap-sham.c 	dd->dev = dev;
dd               2098 drivers/crypto/omap-sham.c 	platform_set_drvdata(pdev, dd);
dd               2100 drivers/crypto/omap-sham.c 	INIT_LIST_HEAD(&dd->list);
dd               2101 drivers/crypto/omap-sham.c 	spin_lock_init(&dd->lock);
dd               2102 drivers/crypto/omap-sham.c 	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
dd               2103 drivers/crypto/omap-sham.c 	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
dd               2105 drivers/crypto/omap-sham.c 	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
dd               2106 drivers/crypto/omap-sham.c 			       omap_sham_get_res_pdev(dd, pdev, &res);
dd               2110 drivers/crypto/omap-sham.c 	dd->io_base = devm_ioremap_resource(dev, &res);
dd               2111 drivers/crypto/omap-sham.c 	if (IS_ERR(dd->io_base)) {
dd               2112 drivers/crypto/omap-sham.c 		err = PTR_ERR(dd->io_base);
dd               2115 drivers/crypto/omap-sham.c 	dd->phys_base = res.start;
dd               2117 drivers/crypto/omap-sham.c 	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
dd               2118 drivers/crypto/omap-sham.c 			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
dd               2121 drivers/crypto/omap-sham.c 			dd->irq, err);
dd               2128 drivers/crypto/omap-sham.c 	dd->dma_lch = dma_request_chan(dev, "rx");
dd               2129 drivers/crypto/omap-sham.c 	if (IS_ERR(dd->dma_lch)) {
dd               2130 drivers/crypto/omap-sham.c 		err = PTR_ERR(dd->dma_lch);
dd               2134 drivers/crypto/omap-sham.c 		dd->polling_mode = 1;
dd               2138 drivers/crypto/omap-sham.c 	dd->flags |= dd->pdata->flags;
dd               2143 drivers/crypto/omap-sham.c 	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
dd               2154 drivers/crypto/omap-sham.c 	rev = omap_sham_read(dd, SHA_REG_REV(dd));
dd               2158 drivers/crypto/omap-sham.c 		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
dd               2159 drivers/crypto/omap-sham.c 		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
dd               2162 drivers/crypto/omap-sham.c 	list_add_tail(&dd->list, &sham.dev_list);
dd               2165 drivers/crypto/omap-sham.c 	for (i = 0; i < dd->pdata->algs_info_size; i++) {
dd               2166 drivers/crypto/omap-sham.c 		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
dd               2169 drivers/crypto/omap-sham.c 			alg = &dd->pdata->algs_info[i].algs_list[j];
dd               2178 drivers/crypto/omap-sham.c 			dd->pdata->algs_info[i].registered++;
dd               2191 drivers/crypto/omap-sham.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               2192 drivers/crypto/omap-sham.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               2194 drivers/crypto/omap-sham.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               2197 drivers/crypto/omap-sham.c 	if (!dd->polling_mode)
dd               2198 drivers/crypto/omap-sham.c 		dma_release_channel(dd->dma_lch);
dd               2207 drivers/crypto/omap-sham.c 	struct omap_sham_dev *dd;
dd               2210 drivers/crypto/omap-sham.c 	dd = platform_get_drvdata(pdev);
dd               2211 drivers/crypto/omap-sham.c 	if (!dd)
dd               2214 drivers/crypto/omap-sham.c 	list_del(&dd->list);
dd               2216 drivers/crypto/omap-sham.c 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
dd               2217 drivers/crypto/omap-sham.c 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
dd               2219 drivers/crypto/omap-sham.c 					&dd->pdata->algs_info[i].algs_list[j]);
dd               2220 drivers/crypto/omap-sham.c 	tasklet_kill(&dd->done_task);
dd               2223 drivers/crypto/omap-sham.c 	if (!dd->polling_mode)
dd               2224 drivers/crypto/omap-sham.c 		dma_release_channel(dd->dma_lch);
dd                354 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev	*dd;
dd                382 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev	*dd;
dd                612 drivers/crypto/s5p-sss.c static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
dd                614 drivers/crypto/s5p-sss.c 	return __raw_readl(dd->io_hash_base + offset);
dd                617 drivers/crypto/s5p-sss.c static inline void s5p_hash_write(struct s5p_aes_dev *dd,
dd                620 drivers/crypto/s5p-sss.c 	__raw_writel(value, dd->io_hash_base + offset);
dd                789 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev *dd = ctx->dd;
dd                794 drivers/crypto/s5p-sss.c 		hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
dd                802 drivers/crypto/s5p-sss.c static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
dd                809 drivers/crypto/s5p-sss.c 		s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
dd                820 drivers/crypto/s5p-sss.c 	s5p_hash_write_ctx_iv(ctx->dd, ctx);
dd                933 drivers/crypto/s5p-sss.c static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
dd                936 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
dd                944 drivers/crypto/s5p-sss.c 		s5p_hash_write_ctx_iv(dd, ctx);
dd                966 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
dd                967 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
dd                968 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
dd                969 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
dd                971 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
dd                972 drivers/crypto/s5p-sss.c 	s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
dd                983 drivers/crypto/s5p-sss.c static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
dd                986 drivers/crypto/s5p-sss.c 	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
dd                989 drivers/crypto/s5p-sss.c 	cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
dd                991 drivers/crypto/s5p-sss.c 		dev_err(dd->dev, "dma_map_sg error\n");
dd                996 drivers/crypto/s5p-sss.c 	set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
dd                997 drivers/crypto/s5p-sss.c 	dd->hash_sg_iter = ctx->sg;
dd                998 drivers/crypto/s5p-sss.c 	dd->hash_sg_cnt = cnt;
dd                999 drivers/crypto/s5p-sss.c 	s5p_hash_write_ctrl(dd, length, final);
dd               1005 drivers/crypto/s5p-sss.c 		set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
dd               1007 drivers/crypto/s5p-sss.c 	s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
dd               1035 drivers/crypto/s5p-sss.c 		dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
dd               1041 drivers/crypto/s5p-sss.c 		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
dd               1051 drivers/crypto/s5p-sss.c 	set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
dd               1093 drivers/crypto/s5p-sss.c 		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
dd               1119 drivers/crypto/s5p-sss.c 	set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
dd               1192 drivers/crypto/s5p-sss.c 		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
dd               1247 drivers/crypto/s5p-sss.c 		memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
dd               1275 drivers/crypto/s5p-sss.c 			scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
dd               1280 drivers/crypto/s5p-sss.c 		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
dd               1299 drivers/crypto/s5p-sss.c static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
dd               1301 drivers/crypto/s5p-sss.c 	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
dd               1303 drivers/crypto/s5p-sss.c 	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
dd               1304 drivers/crypto/s5p-sss.c 	clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
dd               1314 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev *dd = ctx->dd;
dd               1319 drivers/crypto/s5p-sss.c 	dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
dd               1330 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev *dd = ctx->dd;
dd               1333 drivers/crypto/s5p-sss.c 	if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
dd               1337 drivers/crypto/s5p-sss.c 	if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
dd               1341 drivers/crypto/s5p-sss.c 	dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
dd               1346 drivers/crypto/s5p-sss.c 		if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
dd               1352 drivers/crypto/s5p-sss.c 	spin_lock_irqsave(&dd->hash_lock, flags);
dd               1353 drivers/crypto/s5p-sss.c 	dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
dd               1356 drivers/crypto/s5p-sss.c 	spin_unlock_irqrestore(&dd->hash_lock, flags);
dd               1372 drivers/crypto/s5p-sss.c static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
dd               1381 drivers/crypto/s5p-sss.c 	spin_lock_irqsave(&dd->hash_lock, flags);
dd               1383 drivers/crypto/s5p-sss.c 		ret = ahash_enqueue_request(&dd->hash_queue, req);
dd               1385 drivers/crypto/s5p-sss.c 	if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
dd               1386 drivers/crypto/s5p-sss.c 		spin_unlock_irqrestore(&dd->hash_lock, flags);
dd               1390 drivers/crypto/s5p-sss.c 	backlog = crypto_get_backlog(&dd->hash_queue);
dd               1391 drivers/crypto/s5p-sss.c 	async_req = crypto_dequeue_request(&dd->hash_queue);
dd               1393 drivers/crypto/s5p-sss.c 		set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
dd               1395 drivers/crypto/s5p-sss.c 	spin_unlock_irqrestore(&dd->hash_lock, flags);
dd               1404 drivers/crypto/s5p-sss.c 	dd->hash_req = req;
dd               1411 drivers/crypto/s5p-sss.c 	dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
dd               1414 drivers/crypto/s5p-sss.c 	s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
dd               1419 drivers/crypto/s5p-sss.c 		err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
dd               1422 drivers/crypto/s5p-sss.c 			err = s5p_hash_xmit_dma(dd, ctx->total, true);
dd               1424 drivers/crypto/s5p-sss.c 		err = s5p_hash_xmit_dma(dd, ctx->total, true);
dd               1448 drivers/crypto/s5p-sss.c 	struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
dd               1450 drivers/crypto/s5p-sss.c 	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
dd               1451 drivers/crypto/s5p-sss.c 		s5p_hash_handle_queue(dd, NULL);
dd               1455 drivers/crypto/s5p-sss.c 	if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
dd               1457 drivers/crypto/s5p-sss.c 				       &dd->hash_flags)) {
dd               1458 drivers/crypto/s5p-sss.c 			s5p_hash_update_dma_stop(dd);
dd               1462 drivers/crypto/s5p-sss.c 				       &dd->hash_flags)) {
dd               1464 drivers/crypto/s5p-sss.c 			clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
dd               1473 drivers/crypto/s5p-sss.c 	s5p_hash_finish_req(dd->hash_req, 0);
dd               1476 drivers/crypto/s5p-sss.c 	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
dd               1477 drivers/crypto/s5p-sss.c 		s5p_hash_handle_queue(dd, NULL);
dd               1494 drivers/crypto/s5p-sss.c 	return s5p_hash_handle_queue(tctx->dd, req);
dd               1630 drivers/crypto/s5p-sss.c 	ctx->dd = tctx->dd;
dd               1638 drivers/crypto/s5p-sss.c 	dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
dd               1682 drivers/crypto/s5p-sss.c 	tctx->dd = s5p_dev;
dd               1752 drivers/crypto/s5p-sss.c 	ctx->dd = tctx->dd;
dd                840 drivers/dma/dma-jz4780.c 	struct dma_device *dd;
dd                914 drivers/dma/dma-jz4780.c 	dd = &jzdma->dma_device;
dd                916 drivers/dma/dma-jz4780.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd                917 drivers/dma/dma-jz4780.c 	dma_cap_set(DMA_SLAVE, dd->cap_mask);
dd                918 drivers/dma/dma-jz4780.c 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
dd                920 drivers/dma/dma-jz4780.c 	dd->dev = dev;
dd                921 drivers/dma/dma-jz4780.c 	dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
dd                922 drivers/dma/dma-jz4780.c 	dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
dd                923 drivers/dma/dma-jz4780.c 	dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
dd                924 drivers/dma/dma-jz4780.c 	dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
dd                925 drivers/dma/dma-jz4780.c 	dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
dd                926 drivers/dma/dma-jz4780.c 	dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
dd                927 drivers/dma/dma-jz4780.c 	dd->device_config = jz4780_dma_config;
dd                928 drivers/dma/dma-jz4780.c 	dd->device_terminate_all = jz4780_dma_terminate_all;
dd                929 drivers/dma/dma-jz4780.c 	dd->device_synchronize = jz4780_dma_synchronize;
dd                930 drivers/dma/dma-jz4780.c 	dd->device_tx_status = jz4780_dma_tx_status;
dd                931 drivers/dma/dma-jz4780.c 	dd->device_issue_pending = jz4780_dma_issue_pending;
dd                932 drivers/dma/dma-jz4780.c 	dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
dd                933 drivers/dma/dma-jz4780.c 	dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
dd                934 drivers/dma/dma-jz4780.c 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd                935 drivers/dma/dma-jz4780.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd                948 drivers/dma/dma-jz4780.c 	INIT_LIST_HEAD(&dd->channels);
dd                954 drivers/dma/dma-jz4780.c 		vchan_init(&jzchan->vchan, dd);
dd                958 drivers/dma/dma-jz4780.c 	ret = dmaenginem_async_device_register(dd);
dd                290 drivers/dma/imx-sdma.c 	u32  dd;
dd                753 drivers/dma/mediatek/mtk-cqdma.c 	struct dma_device *dd;
dd                762 drivers/dma/mediatek/mtk-cqdma.c 	dd = &cqdma->ddev;
dd                771 drivers/dma/mediatek/mtk-cqdma.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd                773 drivers/dma/mediatek/mtk-cqdma.c 	dd->copy_align = MTK_CQDMA_ALIGN_SIZE;
dd                774 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources;
dd                775 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_free_chan_resources = mtk_cqdma_free_chan_resources;
dd                776 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_tx_status = mtk_cqdma_tx_status;
dd                777 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_issue_pending = mtk_cqdma_issue_pending;
dd                778 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy;
dd                779 drivers/dma/mediatek/mtk-cqdma.c 	dd->device_terminate_all = mtk_cqdma_terminate_all;
dd                780 drivers/dma/mediatek/mtk-cqdma.c 	dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
dd                781 drivers/dma/mediatek/mtk-cqdma.c 	dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS;
dd                782 drivers/dma/mediatek/mtk-cqdma.c 	dd->directions = BIT(DMA_MEM_TO_MEM);
dd                783 drivers/dma/mediatek/mtk-cqdma.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
dd                784 drivers/dma/mediatek/mtk-cqdma.c 	dd->dev = &pdev->dev;
dd                785 drivers/dma/mediatek/mtk-cqdma.c 	INIT_LIST_HEAD(&dd->channels);
dd                862 drivers/dma/mediatek/mtk-cqdma.c 		vchan_init(&vc->vc, dd);
dd                866 drivers/dma/mediatek/mtk-cqdma.c 	err = dma_async_device_register(dd);
dd                897 drivers/dma/mediatek/mtk-cqdma.c 	dma_async_device_unregister(dd);
dd                898 drivers/dma/mediatek/mtk-hsdma.c 	struct dma_device *dd;
dd                906 drivers/dma/mediatek/mtk-hsdma.c 	dd = &hsdma->ddev;
dd                937 drivers/dma/mediatek/mtk-hsdma.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd                939 drivers/dma/mediatek/mtk-hsdma.c 	dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
dd                940 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
dd                941 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
dd                942 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_tx_status = mtk_hsdma_tx_status;
dd                943 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_issue_pending = mtk_hsdma_issue_pending;
dd                944 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
dd                945 drivers/dma/mediatek/mtk-hsdma.c 	dd->device_terminate_all = mtk_hsdma_terminate_all;
dd                946 drivers/dma/mediatek/mtk-hsdma.c 	dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
dd                947 drivers/dma/mediatek/mtk-hsdma.c 	dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
dd                948 drivers/dma/mediatek/mtk-hsdma.c 	dd->directions = BIT(DMA_MEM_TO_MEM);
dd                949 drivers/dma/mediatek/mtk-hsdma.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
dd                950 drivers/dma/mediatek/mtk-hsdma.c 	dd->dev = &pdev->dev;
dd                951 drivers/dma/mediatek/mtk-hsdma.c 	INIT_LIST_HEAD(&dd->channels);
dd                975 drivers/dma/mediatek/mtk-hsdma.c 		vchan_init(&vc->vc, dd);
dd                980 drivers/dma/mediatek/mtk-hsdma.c 	err = dma_async_device_register(dd);
dd               1010 drivers/dma/mediatek/mtk-hsdma.c 	dma_async_device_unregister(dd);
dd                278 drivers/dma/owl-dma.c static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
dd                280 drivers/dma/owl-dma.c 	return container_of(dd, struct owl_dma, dma);
dd               1275 drivers/dma/stm32-dma.c 	struct dma_device *dd;
dd               1290 drivers/dma/stm32-dma.c 	dd = &dmadev->ddev;
dd               1319 drivers/dma/stm32-dma.c 	dma_cap_set(DMA_SLAVE, dd->cap_mask);
dd               1320 drivers/dma/stm32-dma.c 	dma_cap_set(DMA_PRIVATE, dd->cap_mask);
dd               1321 drivers/dma/stm32-dma.c 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
dd               1322 drivers/dma/stm32-dma.c 	dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
dd               1323 drivers/dma/stm32-dma.c 	dd->device_free_chan_resources = stm32_dma_free_chan_resources;
dd               1324 drivers/dma/stm32-dma.c 	dd->device_tx_status = stm32_dma_tx_status;
dd               1325 drivers/dma/stm32-dma.c 	dd->device_issue_pending = stm32_dma_issue_pending;
dd               1326 drivers/dma/stm32-dma.c 	dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
dd               1327 drivers/dma/stm32-dma.c 	dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
dd               1328 drivers/dma/stm32-dma.c 	dd->device_config = stm32_dma_slave_config;
dd               1329 drivers/dma/stm32-dma.c 	dd->device_terminate_all = stm32_dma_terminate_all;
dd               1330 drivers/dma/stm32-dma.c 	dd->device_synchronize = stm32_dma_synchronize;
dd               1331 drivers/dma/stm32-dma.c 	dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
dd               1334 drivers/dma/stm32-dma.c 	dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
dd               1337 drivers/dma/stm32-dma.c 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd               1338 drivers/dma/stm32-dma.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd               1339 drivers/dma/stm32-dma.c 	dd->max_burst = STM32_DMA_MAX_BURST;
dd               1340 drivers/dma/stm32-dma.c 	dd->dev = &pdev->dev;
dd               1341 drivers/dma/stm32-dma.c 	INIT_LIST_HEAD(&dd->channels);
dd               1344 drivers/dma/stm32-dma.c 		dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd               1345 drivers/dma/stm32-dma.c 		dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
dd               1346 drivers/dma/stm32-dma.c 		dd->directions |= BIT(DMA_MEM_TO_MEM);
dd               1353 drivers/dma/stm32-dma.c 		vchan_init(&chan->vchan, dd);
dd               1356 drivers/dma/stm32-dma.c 	ret = dma_async_device_register(dd);
dd               1398 drivers/dma/stm32-dma.c 	dma_async_device_unregister(dd);
dd               1532 drivers/dma/stm32-mdma.c 	struct dma_device *dd;
dd               1600 drivers/dma/stm32-mdma.c 	dd = &dmadev->ddev;
dd               1601 drivers/dma/stm32-mdma.c 	dma_cap_set(DMA_SLAVE, dd->cap_mask);
dd               1602 drivers/dma/stm32-mdma.c 	dma_cap_set(DMA_PRIVATE, dd->cap_mask);
dd               1603 drivers/dma/stm32-mdma.c 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
dd               1604 drivers/dma/stm32-mdma.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd               1605 drivers/dma/stm32-mdma.c 	dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
dd               1606 drivers/dma/stm32-mdma.c 	dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
dd               1607 drivers/dma/stm32-mdma.c 	dd->device_tx_status = stm32_mdma_tx_status;
dd               1608 drivers/dma/stm32-mdma.c 	dd->device_issue_pending = stm32_mdma_issue_pending;
dd               1609 drivers/dma/stm32-mdma.c 	dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
dd               1610 drivers/dma/stm32-mdma.c 	dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
dd               1611 drivers/dma/stm32-mdma.c 	dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
dd               1612 drivers/dma/stm32-mdma.c 	dd->device_config = stm32_mdma_slave_config;
dd               1613 drivers/dma/stm32-mdma.c 	dd->device_pause = stm32_mdma_pause;
dd               1614 drivers/dma/stm32-mdma.c 	dd->device_resume = stm32_mdma_resume;
dd               1615 drivers/dma/stm32-mdma.c 	dd->device_terminate_all = stm32_mdma_terminate_all;
dd               1616 drivers/dma/stm32-mdma.c 	dd->device_synchronize = stm32_mdma_synchronize;
dd               1617 drivers/dma/stm32-mdma.c 	dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
dd               1621 drivers/dma/stm32-mdma.c 	dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
dd               1625 drivers/dma/stm32-mdma.c 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
dd               1627 drivers/dma/stm32-mdma.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd               1628 drivers/dma/stm32-mdma.c 	dd->max_burst = STM32_MDMA_MAX_BURST;
dd               1629 drivers/dma/stm32-mdma.c 	dd->dev = &pdev->dev;
dd               1630 drivers/dma/stm32-mdma.c 	INIT_LIST_HEAD(&dd->channels);
dd               1636 drivers/dma/stm32-mdma.c 		vchan_init(&chan->vchan, dd);
dd               1650 drivers/dma/stm32-mdma.c 	ret = dmaenginem_async_device_register(dd);
dd                368 drivers/edac/ie31200_edac.c static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
dd                371 drivers/edac/ie31200_edac.c 	dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
dd                372 drivers/edac/ie31200_edac.c 	dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
dd                373 drivers/edac/ie31200_edac.c 	dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
dd                377 drivers/edac/ie31200_edac.c static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
dd                380 drivers/edac/ie31200_edac.c 	dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
dd                381 drivers/edac/ie31200_edac.c 	dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
dd                382 drivers/edac/ie31200_edac.c 	dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
dd                385 drivers/edac/ie31200_edac.c static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
dd                389 drivers/edac/ie31200_edac.c 		__skl_populate_dimm_info(dd, addr_decode, chan);
dd                391 drivers/edac/ie31200_edac.c 		__populate_dimm_info(dd, addr_decode, chan);
dd                361 drivers/infiniband/hw/hfi1/affinity.c static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
dd                368 drivers/infiniband/hw/hfi1/affinity.c 	struct cpu_mask_set *set = dd->comp_vect;
dd                405 drivers/infiniband/hw/hfi1/affinity.c static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
dd                407 drivers/infiniband/hw/hfi1/affinity.c 	struct cpu_mask_set *set = dd->comp_vect;
dd                416 drivers/infiniband/hw/hfi1/affinity.c static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
dd                420 drivers/infiniband/hw/hfi1/affinity.c 	if (!dd->comp_vect_mappings)
dd                423 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
dd                424 drivers/infiniband/hw/hfi1/affinity.c 		cpu = dd->comp_vect_mappings[i];
dd                425 drivers/infiniband/hw/hfi1/affinity.c 		_dev_comp_vect_cpu_put(dd, cpu);
dd                426 drivers/infiniband/hw/hfi1/affinity.c 		dd->comp_vect_mappings[i] = -1;
dd                429 drivers/infiniband/hw/hfi1/affinity.c 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
dd                432 drivers/infiniband/hw/hfi1/affinity.c 	kfree(dd->comp_vect_mappings);
dd                433 drivers/infiniband/hw/hfi1/affinity.c 	dd->comp_vect_mappings = NULL;
dd                440 drivers/infiniband/hw/hfi1/affinity.c static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
dd                458 drivers/infiniband/hw/hfi1/affinity.c 	dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
dd                459 drivers/infiniband/hw/hfi1/affinity.c 					 sizeof(*dd->comp_vect_mappings),
dd                461 drivers/infiniband/hw/hfi1/affinity.c 	if (!dd->comp_vect_mappings) {
dd                465 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->comp_vect_possible_cpus; i++)
dd                466 drivers/infiniband/hw/hfi1/affinity.c 		dd->comp_vect_mappings[i] = -1;
dd                468 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
dd                469 drivers/infiniband/hw/hfi1/affinity.c 		cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
dd                476 drivers/infiniband/hw/hfi1/affinity.c 		dd->comp_vect_mappings[i] = cpu;
dd                479 drivers/infiniband/hw/hfi1/affinity.c 			  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
dd                489 drivers/infiniband/hw/hfi1/affinity.c 	_dev_comp_vect_mappings_destroy(dd);
dd                494 drivers/infiniband/hw/hfi1/affinity.c int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
dd                500 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                505 drivers/infiniband/hw/hfi1/affinity.c 	ret = _dev_comp_vect_mappings_create(dd, entry);
dd                512 drivers/infiniband/hw/hfi1/affinity.c void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
dd                514 drivers/infiniband/hw/hfi1/affinity.c 	_dev_comp_vect_mappings_destroy(dd);
dd                520 drivers/infiniband/hw/hfi1/affinity.c 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
dd                522 drivers/infiniband/hw/hfi1/affinity.c 	if (!dd->comp_vect_mappings)
dd                524 drivers/infiniband/hw/hfi1/affinity.c 	if (comp_vect >= dd->comp_vect_possible_cpus)
dd                527 drivers/infiniband/hw/hfi1/affinity.c 	return dd->comp_vect_mappings[comp_vect];
dd                533 drivers/infiniband/hw/hfi1/affinity.c static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
dd                540 drivers/infiniband/hw/hfi1/affinity.c 	struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
dd                552 drivers/infiniband/hw/hfi1/affinity.c 		dd_dev_warn(dd,
dd                557 drivers/infiniband/hw/hfi1/affinity.c 				       hfi1_per_node_cntr[dd->node];
dd                566 drivers/infiniband/hw/hfi1/affinity.c 		    hfi1_per_node_cntr[dd->node] != 0)
dd                570 drivers/infiniband/hw/hfi1/affinity.c 	dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
dd                573 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
dd                584 drivers/infiniband/hw/hfi1/affinity.c 		  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
dd                600 drivers/infiniband/hw/hfi1/affinity.c static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
dd                607 drivers/infiniband/hw/hfi1/affinity.c 	if (!dd->comp_vect_possible_cpus)
dd                610 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
dd                611 drivers/infiniband/hw/hfi1/affinity.c 		cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
dd                615 drivers/infiniband/hw/hfi1/affinity.c 			cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
dd                618 drivers/infiniband/hw/hfi1/affinity.c 	dd->comp_vect_possible_cpus = 0;
dd                632 drivers/infiniband/hw/hfi1/affinity.c int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
dd                634 drivers/infiniband/hw/hfi1/affinity.c 	int node = pcibus_to_node(dd->pcidev->bus);
dd                645 drivers/infiniband/hw/hfi1/affinity.c 		dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
dd                648 drivers/infiniband/hw/hfi1/affinity.c 	dd->node = node;
dd                650 drivers/infiniband/hw/hfi1/affinity.c 	local_mask = cpumask_of_node(dd->node);
dd                655 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                664 drivers/infiniband/hw/hfi1/affinity.c 			dd_dev_err(dd,
dd                703 drivers/infiniband/hw/hfi1/affinity.c 			     i < (dd->n_krcv_queues - 1) *
dd                704 drivers/infiniband/hw/hfi1/affinity.c 				  hfi1_per_node_cntr[dd->node];
dd                746 drivers/infiniband/hw/hfi1/affinity.c 	ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
dd                764 drivers/infiniband/hw/hfi1/affinity.c void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
dd                768 drivers/infiniband/hw/hfi1/affinity.c 	if (dd->node < 0)
dd                772 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                780 drivers/infiniband/hw/hfi1/affinity.c 	_dev_comp_vect_cpu_mask_clean_up(dd, entry);
dd                783 drivers/infiniband/hw/hfi1/affinity.c 	dd->node = NUMA_NO_NODE;
dd                794 drivers/infiniband/hw/hfi1/affinity.c 	struct hfi1_devdata *dd = sde->dd;
dd                803 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                811 drivers/infiniband/hw/hfi1/affinity.c 	dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
dd                823 drivers/infiniband/hw/hfi1/affinity.c 	for (i = 0; i < dd->msix_info.max_requested; i++) {
dd                826 drivers/infiniband/hw/hfi1/affinity.c 		other_msix = &dd->msix_info.msix_entries[i];
dd                885 drivers/infiniband/hw/hfi1/affinity.c static int get_irq_affinity(struct hfi1_devdata *dd,
dd                899 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                919 drivers/infiniband/hw/hfi1/affinity.c 		dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
dd                935 drivers/infiniband/hw/hfi1/affinity.c 			dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
dd                943 drivers/infiniband/hw/hfi1/affinity.c 	dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
dd                956 drivers/infiniband/hw/hfi1/affinity.c int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
dd                961 drivers/infiniband/hw/hfi1/affinity.c 	ret = get_irq_affinity(dd, msix);
dd                966 drivers/infiniband/hw/hfi1/affinity.c void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
dd                974 drivers/infiniband/hw/hfi1/affinity.c 	entry = node_affinity_lookup(dd->node);
dd                 78 drivers/infiniband/hw/hfi1/affinity.h int hfi1_dev_affinity_init(struct hfi1_devdata *dd);
dd                 83 drivers/infiniband/hw/hfi1/affinity.h int hfi1_get_irq_affinity(struct hfi1_devdata *dd,
dd                 89 drivers/infiniband/hw/hfi1/affinity.h void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
dd                123 drivers/infiniband/hw/hfi1/affinity.h void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd);
dd                125 drivers/infiniband/hw/hfi1/affinity.h int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd);
dd                126 drivers/infiniband/hw/hfi1/affinity.h void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd);
dd                 23 drivers/infiniband/hw/hfi1/aspm.c static bool aspm_hw_l1_supported(struct hfi1_devdata *dd)
dd                 25 drivers/infiniband/hw/hfi1/aspm.c 	struct pci_dev *parent = dd->pcidev->bus->self;
dd                 35 drivers/infiniband/hw/hfi1/aspm.c 	pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn);
dd                 42 drivers/infiniband/hw/hfi1/aspm.c 	return (!!dn || is_ax(dd)) && !!up;
dd                 46 drivers/infiniband/hw/hfi1/aspm.c static void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd)
dd                 51 drivers/infiniband/hw/hfi1/aspm.c 	pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, &reg32);
dd                 54 drivers/infiniband/hw/hfi1/aspm.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32);
dd                 57 drivers/infiniband/hw/hfi1/aspm.c static void aspm_hw_enable_l1(struct hfi1_devdata *dd)
dd                 59 drivers/infiniband/hw/hfi1/aspm.c 	struct pci_dev *parent = dd->pcidev->bus->self;
dd                 72 drivers/infiniband/hw/hfi1/aspm.c 	pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
dd                 77 drivers/infiniband/hw/hfi1/aspm.c void aspm_hw_disable_l1(struct hfi1_devdata *dd)
dd                 79 drivers/infiniband/hw/hfi1/aspm.c 	struct pci_dev *parent = dd->pcidev->bus->self;
dd                 82 drivers/infiniband/hw/hfi1/aspm.c 	pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
dd                 89 drivers/infiniband/hw/hfi1/aspm.c static  void aspm_enable(struct hfi1_devdata *dd)
dd                 91 drivers/infiniband/hw/hfi1/aspm.c 	if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED ||
dd                 92 drivers/infiniband/hw/hfi1/aspm.c 	    !dd->aspm_supported)
dd                 95 drivers/infiniband/hw/hfi1/aspm.c 	aspm_hw_enable_l1(dd);
dd                 96 drivers/infiniband/hw/hfi1/aspm.c 	dd->aspm_enabled = true;
dd                 99 drivers/infiniband/hw/hfi1/aspm.c static  void aspm_disable(struct hfi1_devdata *dd)
dd                101 drivers/infiniband/hw/hfi1/aspm.c 	if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED)
dd                104 drivers/infiniband/hw/hfi1/aspm.c 	aspm_hw_disable_l1(dd);
dd                105 drivers/infiniband/hw/hfi1/aspm.c 	dd->aspm_enabled = false;
dd                108 drivers/infiniband/hw/hfi1/aspm.c static  void aspm_disable_inc(struct hfi1_devdata *dd)
dd                112 drivers/infiniband/hw/hfi1/aspm.c 	spin_lock_irqsave(&dd->aspm_lock, flags);
dd                113 drivers/infiniband/hw/hfi1/aspm.c 	aspm_disable(dd);
dd                114 drivers/infiniband/hw/hfi1/aspm.c 	atomic_inc(&dd->aspm_disabled_cnt);
dd                115 drivers/infiniband/hw/hfi1/aspm.c 	spin_unlock_irqrestore(&dd->aspm_lock, flags);
dd                118 drivers/infiniband/hw/hfi1/aspm.c static  void aspm_enable_dec(struct hfi1_devdata *dd)
dd                122 drivers/infiniband/hw/hfi1/aspm.c 	spin_lock_irqsave(&dd->aspm_lock, flags);
dd                123 drivers/infiniband/hw/hfi1/aspm.c 	if (atomic_dec_and_test(&dd->aspm_disabled_cnt))
dd                124 drivers/infiniband/hw/hfi1/aspm.c 		aspm_enable(dd);
dd                125 drivers/infiniband/hw/hfi1/aspm.c 	spin_unlock_irqrestore(&dd->aspm_lock, flags);
dd                155 drivers/infiniband/hw/hfi1/aspm.c 		aspm_disable_inc(rcd->dd);
dd                176 drivers/infiniband/hw/hfi1/aspm.c 	aspm_enable_dec(rcd->dd);
dd                185 drivers/infiniband/hw/hfi1/aspm.c void aspm_disable_all(struct hfi1_devdata *dd)
dd                191 drivers/infiniband/hw/hfi1/aspm.c 	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
dd                192 drivers/infiniband/hw/hfi1/aspm.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                202 drivers/infiniband/hw/hfi1/aspm.c 	aspm_disable(dd);
dd                203 drivers/infiniband/hw/hfi1/aspm.c 	atomic_set(&dd->aspm_disabled_cnt, 0);
dd                207 drivers/infiniband/hw/hfi1/aspm.c void aspm_enable_all(struct hfi1_devdata *dd)
dd                213 drivers/infiniband/hw/hfi1/aspm.c 	aspm_enable(dd);
dd                218 drivers/infiniband/hw/hfi1/aspm.c 	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
dd                219 drivers/infiniband/hw/hfi1/aspm.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                234 drivers/infiniband/hw/hfi1/aspm.c 	rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
dd                236 drivers/infiniband/hw/hfi1/aspm.c 		rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
dd                239 drivers/infiniband/hw/hfi1/aspm.c void aspm_init(struct hfi1_devdata *dd)
dd                244 drivers/infiniband/hw/hfi1/aspm.c 	spin_lock_init(&dd->aspm_lock);
dd                245 drivers/infiniband/hw/hfi1/aspm.c 	dd->aspm_supported = aspm_hw_l1_supported(dd);
dd                247 drivers/infiniband/hw/hfi1/aspm.c 	for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
dd                248 drivers/infiniband/hw/hfi1/aspm.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                255 drivers/infiniband/hw/hfi1/aspm.c 	aspm_hw_set_l1_ent_latency(dd);
dd                256 drivers/infiniband/hw/hfi1/aspm.c 	dd->aspm_enabled = false;
dd                257 drivers/infiniband/hw/hfi1/aspm.c 	aspm_hw_disable_l1(dd);
dd                260 drivers/infiniband/hw/hfi1/aspm.c 	aspm_enable_all(dd);
dd                263 drivers/infiniband/hw/hfi1/aspm.c void aspm_exit(struct hfi1_devdata *dd)
dd                265 drivers/infiniband/hw/hfi1/aspm.c 	aspm_disable_all(dd);
dd                268 drivers/infiniband/hw/hfi1/aspm.c 	aspm_enable(dd);
dd                 60 drivers/infiniband/hw/hfi1/aspm.h void aspm_init(struct hfi1_devdata *dd);
dd                 61 drivers/infiniband/hw/hfi1/aspm.h void aspm_exit(struct hfi1_devdata *dd);
dd                 62 drivers/infiniband/hw/hfi1/aspm.h void aspm_hw_disable_l1(struct hfi1_devdata *dd);
dd                 64 drivers/infiniband/hw/hfi1/aspm.h void aspm_disable_all(struct hfi1_devdata *dd);
dd                 65 drivers/infiniband/hw/hfi1/aspm.h void aspm_enable_all(struct hfi1_devdata *dd);
dd                143 drivers/infiniband/hw/hfi1/chip.c #define emulator_rev(dd) ((dd)->irev >> 8)
dd                145 drivers/infiniband/hw/hfi1/chip.c #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
dd                146 drivers/infiniband/hw/hfi1/chip.c #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
dd               1026 drivers/infiniband/hw/hfi1/chip.c static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
dd               1027 drivers/infiniband/hw/hfi1/chip.c static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
dd               1028 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
dd               1030 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
dd               1032 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_link_width(struct hfi1_devdata *dd,
dd               1034 drivers/infiniband/hw/hfi1/chip.c static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
dd               1036 drivers/infiniband/hw/hfi1/chip.c static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
dd               1038 drivers/infiniband/hw/hfi1/chip.c static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
dd               1039 drivers/infiniband/hw/hfi1/chip.c static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
dd               1042 drivers/infiniband/hw/hfi1/chip.c static void handle_sdma_eng_err(struct hfi1_devdata *dd,
dd               1044 drivers/infiniband/hw/hfi1/chip.c static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
dd               1045 drivers/infiniband/hw/hfi1/chip.c static void handle_dcc_err(struct hfi1_devdata *dd,
dd               1047 drivers/infiniband/hw/hfi1/chip.c static void handle_lcb_err(struct hfi1_devdata *dd,
dd               1049 drivers/infiniband/hw/hfi1/chip.c static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1050 drivers/infiniband/hw/hfi1/chip.c static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1051 drivers/infiniband/hw/hfi1/chip.c static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1052 drivers/infiniband/hw/hfi1/chip.c static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1053 drivers/infiniband/hw/hfi1/chip.c static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1054 drivers/infiniband/hw/hfi1/chip.c static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1055 drivers/infiniband/hw/hfi1/chip.c static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1056 drivers/infiniband/hw/hfi1/chip.c static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
dd               1061 drivers/infiniband/hw/hfi1/chip.c static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
dd               1063 drivers/infiniband/hw/hfi1/chip.c static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
dd               1064 drivers/infiniband/hw/hfi1/chip.c static int thermal_init(struct hfi1_devdata *dd);
dd               1077 drivers/infiniband/hw/hfi1/chip.c static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
dd               1078 drivers/infiniband/hw/hfi1/chip.c static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
dd               1079 drivers/infiniband/hw/hfi1/chip.c static void handle_temp_err(struct hfi1_devdata *dd);
dd               1080 drivers/infiniband/hw/hfi1/chip.c static void dc_shutdown(struct hfi1_devdata *dd);
dd               1081 drivers/infiniband/hw/hfi1/chip.c static void dc_start(struct hfi1_devdata *dd);
dd               1082 drivers/infiniband/hw/hfi1/chip.c static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
dd               1085 drivers/infiniband/hw/hfi1/chip.c static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
dd               1086 drivers/infiniband/hw/hfi1/chip.c static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
dd               1099 drivers/infiniband/hw/hfi1/chip.c 	void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
dd               1315 drivers/infiniband/hw/hfi1/chip.c 	const struct hfi1_devdata *dd,
dd               1318 drivers/infiniband/hw/hfi1/chip.c 	if (offset >= dd->base2_start)
dd               1319 drivers/infiniband/hw/hfi1/chip.c 		return dd->kregbase2 + (offset - dd->base2_start);
dd               1320 drivers/infiniband/hw/hfi1/chip.c 	return dd->kregbase1 + offset;
dd               1331 drivers/infiniband/hw/hfi1/chip.c u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
dd               1333 drivers/infiniband/hw/hfi1/chip.c 	if (dd->flags & HFI1_PRESENT)
dd               1334 drivers/infiniband/hw/hfi1/chip.c 		return readq(hfi1_addr_from_offset(dd, offset));
dd               1344 drivers/infiniband/hw/hfi1/chip.c void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
dd               1346 drivers/infiniband/hw/hfi1/chip.c 	if (dd->flags & HFI1_PRESENT) {
dd               1347 drivers/infiniband/hw/hfi1/chip.c 		void __iomem *base = hfi1_addr_from_offset(dd, offset);
dd               1350 drivers/infiniband/hw/hfi1/chip.c 		if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
dd               1365 drivers/infiniband/hw/hfi1/chip.c 	const struct hfi1_devdata *dd,
dd               1368 drivers/infiniband/hw/hfi1/chip.c 	if (dd->flags & HFI1_PRESENT)
dd               1369 drivers/infiniband/hw/hfi1/chip.c 		return hfi1_addr_from_offset(dd, offset);
dd               1373 drivers/infiniband/hw/hfi1/chip.c static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
dd               1379 drivers/infiniband/hw/hfi1/chip.c 		ret = read_csr(dd, csr);
dd               1381 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, csr, value);
dd               1384 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid cntr register access mode");
dd               1396 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1407 drivers/infiniband/hw/hfi1/chip.c 	return read_write_csr(dd, csr, mode, data);
dd               1413 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1415 drivers/infiniband/hw/hfi1/chip.c 	if (dd->per_sdma && idx < dd->num_sdma)
dd               1416 drivers/infiniband/hw/hfi1/chip.c 		return dd->per_sdma[idx].err_cnt;
dd               1423 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1425 drivers/infiniband/hw/hfi1/chip.c 	if (dd->per_sdma && idx < dd->num_sdma)
dd               1426 drivers/infiniband/hw/hfi1/chip.c 		return dd->per_sdma[idx].sdma_int_cnt;
dd               1433 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1435 drivers/infiniband/hw/hfi1/chip.c 	if (dd->per_sdma && idx < dd->num_sdma)
dd               1436 drivers/infiniband/hw/hfi1/chip.c 		return dd->per_sdma[idx].idle_int_cnt;
dd               1444 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1446 drivers/infiniband/hw/hfi1/chip.c 	if (dd->per_sdma && idx < dd->num_sdma)
dd               1447 drivers/infiniband/hw/hfi1/chip.c 		return dd->per_sdma[idx].progress_int_cnt;
dd               1454 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1468 drivers/infiniband/hw/hfi1/chip.c 	val = read_write_csr(dd, csr, mode, data);
dd               1475 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1482 drivers/infiniband/hw/hfi1/chip.c 		ret = read_lcb_csr(dd, csr, &data);
dd               1484 drivers/infiniband/hw/hfi1/chip.c 		ret = write_lcb_csr(dd, csr, data);
dd               1487 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
dd               1503 drivers/infiniband/hw/hfi1/chip.c 	return read_write_csr(ppd->dd, entry->csr, mode, data);
dd               1521 drivers/infiniband/hw/hfi1/chip.c 	val = read_write_csr(ppd->dd, csr, mode, data);
dd               1526 drivers/infiniband/hw/hfi1/chip.c static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
dd               1537 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid cntr sw access mode");
dd               1553 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
dd               1563 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
dd               1574 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
dd               1591 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, counter, mode, data);
dd               1603 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
dd               1615 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
dd               1629 drivers/infiniband/hw/hfi1/chip.c static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
dd               1645 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
dd               1647 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid cntr sw cpu access mode");
dd               1657 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1659 drivers/infiniband/hw/hfi1/chip.c 	return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
dd               1666 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1668 drivers/infiniband/hw/hfi1/chip.c 	return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
dd               1675 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1677 drivers/infiniband/hw/hfi1/chip.c 	return dd->verbs_dev.n_piowait;
dd               1683 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1685 drivers/infiniband/hw/hfi1/chip.c 	return dd->verbs_dev.n_piodrain;
dd               1691 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1693 drivers/infiniband/hw/hfi1/chip.c 	return dd->ctx0_seq_drop;
dd               1699 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1701 drivers/infiniband/hw/hfi1/chip.c 	return dd->verbs_dev.n_txwait;
dd               1707 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = context;
dd               1709 drivers/infiniband/hw/hfi1/chip.c 	return dd->verbs_dev.n_kmem_wait;
dd               1715 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1717 drivers/infiniband/hw/hfi1/chip.c 	return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
dd               1726 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1728 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[12];
dd               1735 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1737 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[11];
dd               1744 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1746 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[10];
dd               1753 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1755 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[9];
dd               1762 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1764 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[8];
dd               1771 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1773 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[7];
dd               1780 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1782 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[6];
dd               1789 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1791 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[5];
dd               1798 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1800 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[4];
dd               1807 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1809 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[3];
dd               1816 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1818 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[2];
dd               1825 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1827 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[1];
dd               1834 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1836 drivers/infiniband/hw/hfi1/chip.c 	return dd->misc_err_status_cnt[0];
dd               1847 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1849 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_cce_err_status_aggregate;
dd               1860 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1862 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[40];
dd               1869 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1871 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[39];
dd               1878 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1880 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[38];
dd               1887 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1889 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[37];
dd               1896 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1898 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[36];
dd               1905 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1907 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[35];
dd               1914 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1916 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[34];
dd               1923 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1925 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[33];
dd               1932 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1934 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[32];
dd               1940 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1942 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[31];
dd               1949 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1951 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[30];
dd               1958 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1960 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[29];
dd               1967 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1969 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[28];
dd               1976 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1978 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[27];
dd               1985 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1987 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[26];
dd               1994 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               1996 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[25];
dd               2003 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2005 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[24];
dd               2012 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2014 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[23];
dd               2021 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2023 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[22];
dd               2030 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2032 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[21];
dd               2039 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2041 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[20];
dd               2048 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2050 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[19];
dd               2057 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2059 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[18];
dd               2066 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2068 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[17];
dd               2075 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2077 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[16];
dd               2084 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2086 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[15];
dd               2093 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2095 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[14];
dd               2102 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2104 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[13];
dd               2111 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2113 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[12];
dd               2120 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2122 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[11];
dd               2129 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2131 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[10];
dd               2138 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2140 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[9];
dd               2147 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2149 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[8];
dd               2156 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2158 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[7];
dd               2165 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2167 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[6];
dd               2174 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2176 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[5];
dd               2183 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2185 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[4];
dd               2192 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2194 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[3];
dd               2201 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2203 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[2];
dd               2210 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2212 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[1];
dd               2219 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2221 drivers/infiniband/hw/hfi1/chip.c 	return dd->cce_err_status_cnt[0];
dd               2232 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2234 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[63];
dd               2241 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2243 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[62];
dd               2250 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2252 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[61];
dd               2259 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2261 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[60];
dd               2268 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2270 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[59];
dd               2277 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2279 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[58];
dd               2286 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2288 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[57];
dd               2295 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2297 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[56];
dd               2304 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2306 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[55];
dd               2313 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2315 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[54];
dd               2322 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2324 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[53];
dd               2331 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2333 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[52];
dd               2340 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2342 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[51];
dd               2349 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2351 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[50];
dd               2358 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2360 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[49];
dd               2367 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2369 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[48];
dd               2376 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2378 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[47];
dd               2385 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2387 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[46];
dd               2394 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2396 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[45];
dd               2403 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2405 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[44];
dd               2412 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2414 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[43];
dd               2421 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2423 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[42];
dd               2430 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2432 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[41];
dd               2439 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2441 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[40];
dd               2448 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2450 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[39];
dd               2457 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2459 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[38];
dd               2466 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2468 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[37];
dd               2475 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2477 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[36];
dd               2484 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2486 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[35];
dd               2493 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2495 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[34];
dd               2502 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2504 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[33];
dd               2511 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2513 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[32];
dd               2520 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2522 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[31];
dd               2529 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2531 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[30];
dd               2538 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2540 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[29];
dd               2547 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2549 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[28];
dd               2556 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2558 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[27];
dd               2565 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2567 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[26];
dd               2574 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2576 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[25];
dd               2583 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2585 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[24];
dd               2592 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2594 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[23];
dd               2601 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2603 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[22];
dd               2610 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2612 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[21];
dd               2619 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2621 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[20];
dd               2628 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2630 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[19];
dd               2637 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2639 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[18];
dd               2646 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2648 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[17];
dd               2655 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2657 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[16];
dd               2664 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2666 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[15];
dd               2673 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2675 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[14];
dd               2682 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2684 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[13];
dd               2691 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2693 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[12];
dd               2700 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2702 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[11];
dd               2709 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2711 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[10];
dd               2718 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2720 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[9];
dd               2727 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2729 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[8];
dd               2736 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2738 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[7];
dd               2745 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2747 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[6];
dd               2754 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2756 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[5];
dd               2763 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2765 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[4];
dd               2772 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2774 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[3];
dd               2781 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2783 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[2];
dd               2790 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2792 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[1];
dd               2799 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2801 drivers/infiniband/hw/hfi1/chip.c 	return dd->rcv_err_status_cnt[0];
dd               2812 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2814 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[35];
dd               2821 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2823 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[34];
dd               2830 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2832 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[33];
dd               2839 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2841 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[32];
dd               2848 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2850 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[31];
dd               2857 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2859 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[30];
dd               2866 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2868 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[29];
dd               2875 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2877 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[28];
dd               2884 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2886 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[27];
dd               2893 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2895 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[26];
dd               2902 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2904 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[25];
dd               2911 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2913 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[24];
dd               2920 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2922 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[23];
dd               2929 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2931 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[22];
dd               2938 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2940 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[21];
dd               2947 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2949 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[20];
dd               2956 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2958 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[19];
dd               2965 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2967 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[18];
dd               2974 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2976 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[17];
dd               2983 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2985 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[16];
dd               2992 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               2994 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[15];
dd               3001 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3003 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[14];
dd               3010 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3012 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[13];
dd               3019 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3021 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[12];
dd               3028 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3030 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[11];
dd               3037 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3039 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[10];
dd               3046 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3048 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[9];
dd               3055 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3057 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[8];
dd               3064 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3066 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[7];
dd               3073 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3075 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[6];
dd               3082 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3084 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[5];
dd               3091 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3093 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[4];
dd               3100 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3102 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[3];
dd               3109 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3111 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[2];
dd               3118 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3120 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[1];
dd               3127 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3129 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_pio_err_status_cnt[0];
dd               3140 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3142 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_dma_err_status_cnt[3];
dd               3149 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3151 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_dma_err_status_cnt[2];
dd               3158 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3160 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_dma_err_status_cnt[1];
dd               3167 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3169 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_dma_err_status_cnt[0];
dd               3180 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3182 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[63];
dd               3189 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3191 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[62];
dd               3198 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3200 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[61];
dd               3207 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3209 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[60];
dd               3216 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3218 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[59];
dd               3225 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3227 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[58];
dd               3234 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3236 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[57];
dd               3243 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3245 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[56];
dd               3252 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3254 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[55];
dd               3261 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3263 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[54];
dd               3270 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3272 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[53];
dd               3279 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3281 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[52];
dd               3288 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3290 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[51];
dd               3297 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3299 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[50];
dd               3306 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3308 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[49];
dd               3315 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3317 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[48];
dd               3324 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3326 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[47];
dd               3333 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3335 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[46];
dd               3342 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3344 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[45];
dd               3351 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3353 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[44];
dd               3360 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3362 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[43];
dd               3369 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3371 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[42];
dd               3378 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3380 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[41];
dd               3387 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3389 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[40];
dd               3396 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3398 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[39];
dd               3405 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3407 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[38];
dd               3414 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3416 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[37];
dd               3423 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3425 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[36];
dd               3432 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3434 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[35];
dd               3441 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3443 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[34];
dd               3450 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3452 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[33];
dd               3459 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3461 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[32];
dd               3468 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3470 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[31];
dd               3477 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3479 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[30];
dd               3486 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3488 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[29];
dd               3495 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3497 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[28];
dd               3504 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3506 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[27];
dd               3513 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3515 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[26];
dd               3522 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3524 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[25];
dd               3531 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3533 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[24];
dd               3540 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3542 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[23];
dd               3549 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3551 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[22];
dd               3558 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3560 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[21];
dd               3567 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3569 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[20];
dd               3576 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3578 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[19];
dd               3585 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3587 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[18];
dd               3594 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3596 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[17];
dd               3603 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3605 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[16];
dd               3612 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3614 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[15];
dd               3621 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3623 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[14];
dd               3630 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3632 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[13];
dd               3639 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3641 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[12];
dd               3648 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3650 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[11];
dd               3657 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3659 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[10];
dd               3666 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3668 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[9];
dd               3675 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3677 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[8];
dd               3684 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3686 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[7];
dd               3693 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3695 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[6];
dd               3702 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3704 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[5];
dd               3711 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3713 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[4];
dd               3720 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3722 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[3];
dd               3729 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3731 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[2];
dd               3738 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3740 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[1];
dd               3747 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3749 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_egress_err_status_cnt[0];
dd               3760 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3762 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_err_status_cnt[2];
dd               3769 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3771 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_err_status_cnt[1];
dd               3778 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3780 drivers/infiniband/hw/hfi1/chip.c 	return dd->send_err_status_cnt[0];
dd               3791 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3793 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_ctxt_err_status_cnt[4];
dd               3800 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3802 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_ctxt_err_status_cnt[3];
dd               3809 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3811 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_ctxt_err_status_cnt[2];
dd               3818 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3820 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_ctxt_err_status_cnt[1];
dd               3827 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3829 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_ctxt_err_status_cnt[0];
dd               3840 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3842 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[23];
dd               3849 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3851 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[22];
dd               3858 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3860 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[21];
dd               3867 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3869 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[20];
dd               3876 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3878 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[19];
dd               3885 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3887 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[18];
dd               3894 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3896 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[17];
dd               3903 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3905 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[16];
dd               3912 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3914 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[15];
dd               3921 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3923 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[14];
dd               3930 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3932 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[13];
dd               3939 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3941 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[12];
dd               3948 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3950 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[11];
dd               3957 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3959 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[10];
dd               3966 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3968 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[9];
dd               3975 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3977 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[8];
dd               3984 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3986 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[7];
dd               3992 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               3994 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[6];
dd               4001 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4003 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[5];
dd               4010 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4012 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[4];
dd               4019 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4021 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[3];
dd               4028 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4030 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[2];
dd               4037 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4039 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[1];
dd               4046 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4048 drivers/infiniband/hw/hfi1/chip.c 	return dd->sw_send_dma_eng_err_status_cnt[0];
dd               4055 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
dd               4060 drivers/infiniband/hw/hfi1/chip.c 	val = read_write_csr(dd, csr, mode, data);
dd               4062 drivers/infiniband/hw/hfi1/chip.c 		val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
dd               4063 drivers/infiniband/hw/hfi1/chip.c 			CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
dd               4065 drivers/infiniband/hw/hfi1/chip.c 		dd->sw_rcv_bypass_packet_errors = 0;
dd               4067 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid cntr register access mode");
dd               4078 drivers/infiniband/hw/hfi1/chip.c 	return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,	      \
dd               4096 drivers/infiniband/hw/hfi1/chip.c 	return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,	      \
dd               5226 drivers/infiniband/hw/hfi1/chip.c int is_ax(struct hfi1_devdata *dd)
dd               5229 drivers/infiniband/hw/hfi1/chip.c 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
dd               5235 drivers/infiniband/hw/hfi1/chip.c int is_bx(struct hfi1_devdata *dd)
dd               5238 drivers/infiniband/hw/hfi1/chip.c 		dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
dd               5250 drivers/infiniband/hw/hfi1/chip.c 	mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
dd               5538 drivers/infiniband/hw/hfi1/chip.c static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5547 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "CCE Error: %s\n",
dd               5551 drivers/infiniband/hw/hfi1/chip.c 	    is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
dd               5554 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, FREEZE_SELF);
dd               5559 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->cce_err_status_cnt[i]);
dd               5561 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->sw_cce_err_status_aggregate);
dd               5573 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
dd               5574 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               5575 drivers/infiniband/hw/hfi1/chip.c 	u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
dd               5577 drivers/infiniband/hw/hfi1/chip.c 	if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
dd               5579 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
dd               5585 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
dd               5587 drivers/infiniband/hw/hfi1/chip.c 	mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
dd               5590 drivers/infiniband/hw/hfi1/chip.c static int init_rcverr(struct hfi1_devdata *dd)
dd               5592 drivers/infiniband/hw/hfi1/chip.c 	timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
dd               5594 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_ovfl_cnt = 0;
dd               5595 drivers/infiniband/hw/hfi1/chip.c 	return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
dd               5598 drivers/infiniband/hw/hfi1/chip.c static void free_rcverr(struct hfi1_devdata *dd)
dd               5600 drivers/infiniband/hw/hfi1/chip.c 	if (dd->rcverr_timer.function)
dd               5601 drivers/infiniband/hw/hfi1/chip.c 		del_timer_sync(&dd->rcverr_timer);
dd               5604 drivers/infiniband/hw/hfi1/chip.c static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5609 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Receive Error: %s\n",
dd               5619 drivers/infiniband/hw/hfi1/chip.c 		if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
dd               5622 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, flags);
dd               5627 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->rcv_err_status_cnt[i]);
dd               5631 drivers/infiniband/hw/hfi1/chip.c static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5636 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Misc Error: %s",
dd               5640 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->misc_err_status_cnt[i]);
dd               5644 drivers/infiniband/hw/hfi1/chip.c static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5649 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "PIO Error: %s\n",
dd               5653 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, 0);
dd               5657 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->send_pio_err_status_cnt[i]);
dd               5661 drivers/infiniband/hw/hfi1/chip.c static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5666 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "SDMA Error: %s\n",
dd               5670 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, 0);
dd               5674 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->send_dma_err_status_cnt[i]);
dd               5683 drivers/infiniband/hw/hfi1/chip.c static void count_port_inactive(struct hfi1_devdata *dd)
dd               5685 drivers/infiniband/hw/hfi1/chip.c 	__count_port_discards(dd->pport);
dd               5697 drivers/infiniband/hw/hfi1/chip.c static void handle_send_egress_err_info(struct hfi1_devdata *dd,
dd               5700 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               5701 drivers/infiniband/hw/hfi1/chip.c 	u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
dd               5702 drivers/infiniband/hw/hfi1/chip.c 	u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
dd               5706 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_INFO, info);
dd               5708 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd,
dd               5784 drivers/infiniband/hw/hfi1/chip.c static int engine_to_vl(struct hfi1_devdata *dd, int engine)
dd               5794 drivers/infiniband/hw/hfi1/chip.c 	m = rcu_dereference(dd->sdma_map);
dd               5805 drivers/infiniband/hw/hfi1/chip.c static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
dd               5811 drivers/infiniband/hw/hfi1/chip.c 	sci = &dd->send_contexts[sw_index];
dd               5820 drivers/infiniband/hw/hfi1/chip.c 	if (dd->vld[15].sc == sc)
dd               5823 drivers/infiniband/hw/hfi1/chip.c 		if (dd->vld[i].sc == sc)
dd               5829 drivers/infiniband/hw/hfi1/chip.c static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5836 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, 0);
dd               5837 drivers/infiniband/hw/hfi1/chip.c 	else if (is_ax(dd) &&
dd               5839 drivers/infiniband/hw/hfi1/chip.c 		 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
dd               5840 drivers/infiniband/hw/hfi1/chip.c 		start_freeze_handling(dd->pport, 0);
dd               5849 drivers/infiniband/hw/hfi1/chip.c 			count_port_inactive(dd);
dd               5852 drivers/infiniband/hw/hfi1/chip.c 			int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
dd               5854 drivers/infiniband/hw/hfi1/chip.c 			handle_send_egress_err_info(dd, vl);
dd               5863 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "Egress Error: %s\n",
dd               5868 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->send_egress_err_status_cnt[i]);
dd               5872 drivers/infiniband/hw/hfi1/chip.c static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               5877 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Send Error: %s\n",
dd               5882 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->send_err_status_cnt[i]);
dd               5903 drivers/infiniband/hw/hfi1/chip.c static void interrupt_clear_down(struct hfi1_devdata *dd,
dd               5913 drivers/infiniband/hw/hfi1/chip.c 		reg = read_kctxt_csr(dd, context, eri->status);
dd               5916 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, context, eri->clear, reg);
dd               5918 drivers/infiniband/hw/hfi1/chip.c 			eri->handler(dd, context, reg);
dd               5923 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
dd               5929 drivers/infiniband/hw/hfi1/chip.c 			mask = read_kctxt_csr(dd, context, eri->mask);
dd               5931 drivers/infiniband/hw/hfi1/chip.c 			write_kctxt_csr(dd, context, eri->mask, mask);
dd               5940 drivers/infiniband/hw/hfi1/chip.c static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
dd               5945 drivers/infiniband/hw/hfi1/chip.c 		interrupt_clear_down(dd, 0, eri);
dd               5947 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
dd               5968 drivers/infiniband/hw/hfi1/chip.c static void is_sendctxt_err_int(struct hfi1_devdata *dd,
dd               5979 drivers/infiniband/hw/hfi1/chip.c 	sw_index = dd->hw_to_sw[hw_context];
dd               5980 drivers/infiniband/hw/hfi1/chip.c 	if (sw_index >= dd->num_send_contexts) {
dd               5981 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               5986 drivers/infiniband/hw/hfi1/chip.c 	sci = &dd->send_contexts[sw_index];
dd               5987 drivers/infiniband/hw/hfi1/chip.c 	spin_lock_irqsave(&dd->sc_lock, irq_flags);
dd               5990 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
dd               5992 drivers/infiniband/hw/hfi1/chip.c 		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
dd               5999 drivers/infiniband/hw/hfi1/chip.c 	status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
dd               6001 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
dd               6006 drivers/infiniband/hw/hfi1/chip.c 		handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
dd               6013 drivers/infiniband/hw/hfi1/chip.c 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
dd               6014 drivers/infiniband/hw/hfi1/chip.c 	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
dd               6023 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
dd               6027 drivers/infiniband/hw/hfi1/chip.c static void handle_sdma_eng_err(struct hfi1_devdata *dd,
dd               6033 drivers/infiniband/hw/hfi1/chip.c 	sde = &dd->per_sdma[source];
dd               6035 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
dd               6037 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
dd               6050 drivers/infiniband/hw/hfi1/chip.c 			incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
dd               6057 drivers/infiniband/hw/hfi1/chip.c static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
dd               6060 drivers/infiniband/hw/hfi1/chip.c 	struct sdma_engine *sde = &dd->per_sdma[source];
dd               6062 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
dd               6064 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
dd               6068 drivers/infiniband/hw/hfi1/chip.c 	interrupt_clear_down(dd, source, &sdma_eng_err);
dd               6074 drivers/infiniband/hw/hfi1/chip.c static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
dd               6084 drivers/infiniband/hw/hfi1/chip.c 		handle_temp_err(dd);
dd               6086 drivers/infiniband/hw/hfi1/chip.c 		interrupt_clear_down(dd, 0, eri);
dd               6088 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd,
dd               6093 drivers/infiniband/hw/hfi1/chip.c static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
dd               6096 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               6102 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "%s: QSFP module removed\n",
dd               6122 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
dd               6144 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "%s: QSFP module inserted\n",
dd               6158 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
dd               6167 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
dd               6179 drivers/infiniband/hw/hfi1/chip.c static int request_host_lcb_access(struct hfi1_devdata *dd)
dd               6183 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_MISC,
dd               6187 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: command failed with error %d\n",
dd               6193 drivers/infiniband/hw/hfi1/chip.c static int request_8051_lcb_access(struct hfi1_devdata *dd)
dd               6197 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_MISC,
dd               6201 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: command failed with error %d\n",
dd               6211 drivers/infiniband/hw/hfi1/chip.c static inline void set_host_lcb_access(struct hfi1_devdata *dd)
dd               6213 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
dd               6222 drivers/infiniband/hw/hfi1/chip.c static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
dd               6224 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
dd               6238 drivers/infiniband/hw/hfi1/chip.c int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
dd               6240 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               6258 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "%s: link state %s not up\n",
dd               6264 drivers/infiniband/hw/hfi1/chip.c 	if (dd->lcb_access_count == 0) {
dd               6265 drivers/infiniband/hw/hfi1/chip.c 		ret = request_host_lcb_access(dd);
dd               6267 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               6272 drivers/infiniband/hw/hfi1/chip.c 		set_host_lcb_access(dd);
dd               6274 drivers/infiniband/hw/hfi1/chip.c 	dd->lcb_access_count++;
dd               6288 drivers/infiniband/hw/hfi1/chip.c int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
dd               6298 drivers/infiniband/hw/hfi1/chip.c 		mutex_lock(&dd->pport->hls_lock);
dd               6300 drivers/infiniband/hw/hfi1/chip.c 		while (!mutex_trylock(&dd->pport->hls_lock))
dd               6304 drivers/infiniband/hw/hfi1/chip.c 	if (dd->lcb_access_count == 0) {
dd               6305 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
dd               6310 drivers/infiniband/hw/hfi1/chip.c 	if (dd->lcb_access_count == 1) {
dd               6311 drivers/infiniband/hw/hfi1/chip.c 		set_8051_lcb_access(dd);
dd               6312 drivers/infiniband/hw/hfi1/chip.c 		ret = request_8051_lcb_access(dd);
dd               6314 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               6318 drivers/infiniband/hw/hfi1/chip.c 			set_host_lcb_access(dd);
dd               6322 drivers/infiniband/hw/hfi1/chip.c 	dd->lcb_access_count--;
dd               6324 drivers/infiniband/hw/hfi1/chip.c 	mutex_unlock(&dd->pport->hls_lock);
dd               6337 drivers/infiniband/hw/hfi1/chip.c static void init_lcb_access(struct hfi1_devdata *dd)
dd               6339 drivers/infiniband/hw/hfi1/chip.c 	dd->lcb_access_count = 0;
dd               6345 drivers/infiniband/hw/hfi1/chip.c static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
dd               6347 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
dd               6359 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               6364 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
dd               6369 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
dd               6384 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
dd               6386 drivers/infiniband/hw/hfi1/chip.c 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
dd               6390 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
dd               6392 drivers/infiniband/hw/hfi1/chip.c 		(void)read_csr(dd, DCC_CFG_RESET);
dd               6396 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
dd               6397 drivers/infiniband/hw/hfi1/chip.c 		hreq_response(dd, HREQ_SUCCESS, 0);
dd               6401 drivers/infiniband/hw/hfi1/chip.c 		hreq_response(dd, HREQ_SUCCESS, 0);
dd               6405 drivers/infiniband/hw/hfi1/chip.c 		hreq_response(dd, HREQ_SUCCESS, data);
dd               6408 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
dd               6409 drivers/infiniband/hw/hfi1/chip.c 		hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
dd               6417 drivers/infiniband/hw/hfi1/chip.c void set_up_vau(struct hfi1_devdata *dd, u8 vau)
dd               6419 drivers/infiniband/hw/hfi1/chip.c 	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
dd               6424 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
dd               6432 drivers/infiniband/hw/hfi1/chip.c void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
dd               6434 drivers/infiniband/hw/hfi1/chip.c 	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
dd               6445 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
dd               6447 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
dd               6455 drivers/infiniband/hw/hfi1/chip.c void reset_link_credits(struct hfi1_devdata *dd)
dd               6461 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
dd               6462 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
dd               6463 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
dd               6465 drivers/infiniband/hw/hfi1/chip.c 	pio_send_control(dd, PSC_CM_RESET);
dd               6467 drivers/infiniband/hw/hfi1/chip.c 	dd->vl15buf_cached = 0;
dd               6497 drivers/infiniband/hw/hfi1/chip.c static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
dd               6502 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_RUN, 0);
dd               6504 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
dd               6507 drivers/infiniband/hw/hfi1/chip.c 	dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
dd               6508 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DCC_CFG_RESET);
dd               6509 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_RESET, reg |
dd               6511 drivers/infiniband/hw/hfi1/chip.c 	(void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
dd               6514 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DCC_CFG_RESET, reg);
dd               6515 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
dd               6529 drivers/infiniband/hw/hfi1/chip.c static void _dc_shutdown(struct hfi1_devdata *dd)
dd               6531 drivers/infiniband/hw/hfi1/chip.c 	lockdep_assert_held(&dd->dc8051_lock);
dd               6533 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc_shutdown)
dd               6536 drivers/infiniband/hw/hfi1/chip.c 	dd->dc_shutdown = 1;
dd               6538 drivers/infiniband/hw/hfi1/chip.c 	lcb_shutdown(dd, 1);
dd               6544 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_RST, 0x1);
dd               6547 drivers/infiniband/hw/hfi1/chip.c static void dc_shutdown(struct hfi1_devdata *dd)
dd               6549 drivers/infiniband/hw/hfi1/chip.c 	mutex_lock(&dd->dc8051_lock);
dd               6550 drivers/infiniband/hw/hfi1/chip.c 	_dc_shutdown(dd);
dd               6551 drivers/infiniband/hw/hfi1/chip.c 	mutex_unlock(&dd->dc8051_lock);
dd               6560 drivers/infiniband/hw/hfi1/chip.c static void _dc_start(struct hfi1_devdata *dd)
dd               6562 drivers/infiniband/hw/hfi1/chip.c 	lockdep_assert_held(&dd->dc8051_lock);
dd               6564 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->dc_shutdown)
dd               6568 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
dd               6570 drivers/infiniband/hw/hfi1/chip.c 	if (wait_fm_ready(dd, TIMEOUT_8051_START))
dd               6571 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
dd               6575 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
dd               6577 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
dd               6578 drivers/infiniband/hw/hfi1/chip.c 	dd->dc_shutdown = 0;
dd               6581 drivers/infiniband/hw/hfi1/chip.c static void dc_start(struct hfi1_devdata *dd)
dd               6583 drivers/infiniband/hw/hfi1/chip.c 	mutex_lock(&dd->dc8051_lock);
dd               6584 drivers/infiniband/hw/hfi1/chip.c 	_dc_start(dd);
dd               6585 drivers/infiniband/hw/hfi1/chip.c 	mutex_unlock(&dd->dc8051_lock);
dd               6591 drivers/infiniband/hw/hfi1/chip.c static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
dd               6596 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode != ICODE_FPGA_EMULATION)
dd               6606 drivers/infiniband/hw/hfi1/chip.c 	if (is_emulator_s(dd))
dd               6610 drivers/infiniband/hw/hfi1/chip.c 	version = emulator_rev(dd);
dd               6611 drivers/infiniband/hw/hfi1/chip.c 	if (!is_ax(dd))
dd               6655 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
dd               6666 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
dd               6668 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
dd               6670 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
dd               6682 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               6690 drivers/infiniband/hw/hfi1/chip.c 	ret = read_idle_sma(dd, &msg);
dd               6693 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
dd               6721 drivers/infiniband/hw/hfi1/chip.c 					dd,
dd               6727 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               6734 drivers/infiniband/hw/hfi1/chip.c static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
dd               6739 drivers/infiniband/hw/hfi1/chip.c 	spin_lock_irqsave(&dd->rcvctrl_lock, flags);
dd               6740 drivers/infiniband/hw/hfi1/chip.c 	rcvctrl = read_csr(dd, RCV_CTRL);
dd               6743 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_CTRL, rcvctrl);
dd               6744 drivers/infiniband/hw/hfi1/chip.c 	spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
dd               6747 drivers/infiniband/hw/hfi1/chip.c static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
dd               6749 drivers/infiniband/hw/hfi1/chip.c 	adjust_rcvctrl(dd, add, 0);
dd               6752 drivers/infiniband/hw/hfi1/chip.c static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
dd               6754 drivers/infiniband/hw/hfi1/chip.c 	adjust_rcvctrl(dd, 0, clear);
dd               6762 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               6768 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
dd               6771 drivers/infiniband/hw/hfi1/chip.c 	dd->flags |= HFI1_FROZEN;
dd               6774 drivers/infiniband/hw/hfi1/chip.c 	sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
dd               6779 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_send_contexts; i++) {
dd               6780 drivers/infiniband/hw/hfi1/chip.c 		sc = dd->send_contexts[i].sc;
dd               6789 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               6804 drivers/infiniband/hw/hfi1/chip.c static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
dd               6811 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, CCE_STATUS);
dd               6823 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               6836 drivers/infiniband/hw/hfi1/chip.c static void rxe_freeze(struct hfi1_devdata *dd)
dd               6842 drivers/infiniband/hw/hfi1/chip.c 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
dd               6845 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_rcv_contexts; i++) {
dd               6846 drivers/infiniband/hw/hfi1/chip.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd               6847 drivers/infiniband/hw/hfi1/chip.c 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
dd               6858 drivers/infiniband/hw/hfi1/chip.c static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
dd               6865 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_rcv_contexts; i++) {
dd               6866 drivers/infiniband/hw/hfi1/chip.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd               6870 drivers/infiniband/hw/hfi1/chip.c 		    (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
dd               6878 drivers/infiniband/hw/hfi1/chip.c 		hfi1_rcvctrl(dd, rcvmask, rcd);
dd               6883 drivers/infiniband/hw/hfi1/chip.c 	add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
dd               6895 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               6898 drivers/infiniband/hw/hfi1/chip.c 	wait_for_freeze_status(dd, 1);
dd               6903 drivers/infiniband/hw/hfi1/chip.c 	pio_freeze(dd);
dd               6906 drivers/infiniband/hw/hfi1/chip.c 	sdma_freeze(dd);
dd               6911 drivers/infiniband/hw/hfi1/chip.c 	rxe_freeze(dd);
dd               6917 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
dd               6918 drivers/infiniband/hw/hfi1/chip.c 	wait_for_freeze_status(dd, 0);
dd               6920 drivers/infiniband/hw/hfi1/chip.c 	if (is_ax(dd)) {
dd               6921 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
dd               6922 drivers/infiniband/hw/hfi1/chip.c 		wait_for_freeze_status(dd, 1);
dd               6923 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
dd               6924 drivers/infiniband/hw/hfi1/chip.c 		wait_for_freeze_status(dd, 0);
dd               6928 drivers/infiniband/hw/hfi1/chip.c 	pio_kernel_unfreeze(dd);
dd               6931 drivers/infiniband/hw/hfi1/chip.c 	sdma_unfreeze(dd);
dd               6936 drivers/infiniband/hw/hfi1/chip.c 	rxe_kernel_unfreeze(dd);
dd               6951 drivers/infiniband/hw/hfi1/chip.c 	dd->flags &= ~HFI1_FROZEN;
dd               6952 drivers/infiniband/hw/hfi1/chip.c 	wake_up(&dd->event_queue);
dd               6992 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               6997 drivers/infiniband/hw/hfi1/chip.c 	read_ltp_rtt(dd);
dd               7002 drivers/infiniband/hw/hfi1/chip.c 	clear_linkup_counters(dd);
dd               7014 drivers/infiniband/hw/hfi1/chip.c 	if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
dd               7015 drivers/infiniband/hw/hfi1/chip.c 		set_up_vl15(dd, dd->vl15buf_cached);
dd               7020 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               7139 drivers/infiniband/hw/hfi1/chip.c 		read_link_down_reason(ppd->dd, &link_down_reason);
dd               7143 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(ppd->dd, "%sUnexpected link down\n",
dd               7151 drivers/infiniband/hw/hfi1/chip.c 			read_planned_down_reason_code(ppd->dd, &neigh_reason);
dd               7152 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(ppd->dd,
dd               7158 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(ppd->dd,
dd               7163 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
dd               7193 drivers/infiniband/hw/hfi1/chip.c 	clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
dd               7200 drivers/infiniband/hw/hfi1/chip.c 		dc_shutdown(ppd->dd);
dd               7217 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
dd               7281 drivers/infiniband/hw/hfi1/chip.c 		hfi1_event_pkey_change(ppd->dd, ppd->port);
dd               7288 drivers/infiniband/hw/hfi1/chip.c static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
dd               7296 drivers/infiniband/hw/hfi1/chip.c 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
dd               7303 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "%s: invalid width %d, using 4\n",
dd               7330 drivers/infiniband/hw/hfi1/chip.c static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
dd               7341 drivers/infiniband/hw/hfi1/chip.c 	read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
dd               7343 drivers/infiniband/hw/hfi1/chip.c 	read_local_lni(dd, &enable_lane_rx);
dd               7354 drivers/infiniband/hw/hfi1/chip.c 	if ((dd->icode == ICODE_RTL_SILICON) &&
dd               7355 drivers/infiniband/hw/hfi1/chip.c 	    (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
dd               7359 drivers/infiniband/hw/hfi1/chip.c 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
dd               7362 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               7367 drivers/infiniband/hw/hfi1/chip.c 			dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
dd               7372 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd,
dd               7375 drivers/infiniband/hw/hfi1/chip.c 	*tx_width = link_width_to_bits(dd, tx);
dd               7376 drivers/infiniband/hw/hfi1/chip.c 	*rx_width = link_width_to_bits(dd, rx);
dd               7392 drivers/infiniband/hw/hfi1/chip.c static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
dd               7399 drivers/infiniband/hw/hfi1/chip.c 	read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
dd               7403 drivers/infiniband/hw/hfi1/chip.c 	*tx_width = link_width_to_bits(dd, tx);
dd               7404 drivers/infiniband/hw/hfi1/chip.c 	*rx_width = link_width_to_bits(dd, rx);
dd               7407 drivers/infiniband/hw/hfi1/chip.c 	get_link_widths(dd, &active_tx, &active_rx);
dd               7423 drivers/infiniband/hw/hfi1/chip.c 	get_linkup_widths(ppd->dd, &tx_width, &rx_width);
dd               7445 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               7464 drivers/infiniband/hw/hfi1/chip.c 	lcb_shutdown(dd, 0);
dd               7465 drivers/infiniband/hw/hfi1/chip.c 	adjust_lcb_for_fpga_serdes(dd);
dd               7467 drivers/infiniband/hw/hfi1/chip.c 	read_vc_remote_phy(dd, &power_management, &continuous);
dd               7468 drivers/infiniband/hw/hfi1/chip.c 	read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
dd               7470 drivers/infiniband/hw/hfi1/chip.c 	read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
dd               7471 drivers/infiniband/hw/hfi1/chip.c 	read_remote_device_id(dd, &device_id, &device_rev);
dd               7474 drivers/infiniband/hw/hfi1/chip.c 	get_link_widths(dd, &active_tx, &active_rx);
dd               7475 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd,
dd               7478 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd,
dd               7482 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
dd               7484 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
dd               7497 drivers/infiniband/hw/hfi1/chip.c 	set_up_vau(dd, vau);
dd               7503 drivers/infiniband/hw/hfi1/chip.c 	set_up_vl15(dd, 0);
dd               7504 drivers/infiniband/hw/hfi1/chip.c 	dd->vl15buf_cached = vl15buf;
dd               7519 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
dd               7520 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_CRC_MODE,
dd               7524 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, SEND_CM_CTRL);
dd               7526 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CM_CTRL,
dd               7529 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CM_CTRL,
dd               7534 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
dd               7554 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
dd               7575 drivers/infiniband/hw/hfi1/chip.c 	assign_remote_cm_au_table(dd, vcu);
dd               7586 drivers/infiniband/hw/hfi1/chip.c 	if (is_ax(dd)) {			/* fixed in B0 */
dd               7587 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
dd               7590 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
dd               7594 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
dd               7597 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
dd               7598 drivers/infiniband/hw/hfi1/chip.c 	set_8051_lcb_access(dd);
dd               7641 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               7651 drivers/infiniband/hw/hfi1/chip.c 		get_link_widths(ppd->dd, &tx, &rx);
dd               7659 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
dd               7669 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               7671 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               7682 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd,
dd               7684 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd,
dd               7715 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
dd               7750 drivers/infiniband/hw/hfi1/chip.c static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               7752 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               7761 drivers/infiniband/hw/hfi1/chip.c 		info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
dd               7781 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_info(dd, "Link error: %s\n",
dd               7796 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "8051 info error: %s\n",
dd               7821 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "8051: Link up\n");
dd               7840 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "8051: Link down%s\n", extra);
dd               7850 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "8051 info host message: %s\n",
dd               7864 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Lost 8051 heartbeat\n");
dd               7865 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_DC8051_ERR_EN,
dd               7866 drivers/infiniband/hw/hfi1/chip.c 			  read_csr(dd, DC_DC8051_ERR_EN) &
dd               7873 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "8051 error: %s\n",
dd               7886 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
dd               7891 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_info(dd,
dd               7945 drivers/infiniband/hw/hfi1/chip.c static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               7950 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               7955 drivers/infiniband/hw/hfi1/chip.c 		if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
dd               7956 drivers/infiniband/hw/hfi1/chip.c 			info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
dd               7957 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
dd               7959 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
dd               7965 drivers/infiniband/hw/hfi1/chip.c 		struct hfi1_pportdata *ppd = dd->pport;
dd               7975 drivers/infiniband/hw/hfi1/chip.c 		info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
dd               7976 drivers/infiniband/hw/hfi1/chip.c 		if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
dd               7977 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
dd               7979 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
dd               8018 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
dd               8026 drivers/infiniband/hw/hfi1/chip.c 		info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
dd               8027 drivers/infiniband/hw/hfi1/chip.c 		hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
dd               8028 drivers/infiniband/hw/hfi1/chip.c 		hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
dd               8029 drivers/infiniband/hw/hfi1/chip.c 		if (!(dd->err_info_rcvport.status_and_code &
dd               8031 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_rcvport.status_and_code =
dd               8034 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_rcvport.status_and_code |=
dd               8040 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_rcvport.packet_flit1 = hdr0;
dd               8041 drivers/infiniband/hw/hfi1/chip.c 			dd->err_info_rcvport.packet_flit2 = hdr1;
dd               8070 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
dd               8079 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
dd               8084 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
dd               8088 drivers/infiniband/hw/hfi1/chip.c 	if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
dd               8093 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
dd               8100 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
dd               8107 drivers/infiniband/hw/hfi1/chip.c static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd               8111 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "LCB Error: %s\n",
dd               8118 drivers/infiniband/hw/hfi1/chip.c static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
dd               8123 drivers/infiniband/hw/hfi1/chip.c 		interrupt_clear_down(dd, 0, eri);
dd               8134 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Parity error in DC LBM block\n");
dd               8136 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
dd               8143 drivers/infiniband/hw/hfi1/chip.c static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
dd               8145 drivers/infiniband/hw/hfi1/chip.c 	sc_group_release_update(dd, source);
dd               8157 drivers/infiniband/hw/hfi1/chip.c static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
dd               8165 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
dd               8167 drivers/infiniband/hw/hfi1/chip.c 	sdma_dumpstate(&dd->per_sdma[which]);
dd               8170 drivers/infiniband/hw/hfi1/chip.c 	if (likely(what < 3 && which < dd->num_sdma)) {
dd               8171 drivers/infiniband/hw/hfi1/chip.c 		sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
dd               8174 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
dd               8188 drivers/infiniband/hw/hfi1/chip.c static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
dd               8193 drivers/infiniband/hw/hfi1/chip.c 	if (likely(source < dd->num_rcv_contexts)) {
dd               8194 drivers/infiniband/hw/hfi1/chip.c 		rcd = hfi1_rcd_get_by_index(dd, source);
dd               8206 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
dd               8219 drivers/infiniband/hw/hfi1/chip.c static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
dd               8224 drivers/infiniband/hw/hfi1/chip.c 	if (likely(source < dd->num_rcv_contexts)) {
dd               8225 drivers/infiniband/hw/hfi1/chip.c 		rcd = hfi1_rcd_get_by_index(dd, source);
dd               8237 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
dd               8244 drivers/infiniband/hw/hfi1/chip.c static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
dd               8248 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "unexpected %s interrupt\n",
dd               8283 drivers/infiniband/hw/hfi1/chip.c static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
dd               8290 drivers/infiniband/hw/hfi1/chip.c 			trace_hfi1_interrupt(dd, entry, source);
dd               8291 drivers/infiniband/hw/hfi1/chip.c 			entry->is_int(dd, source - entry->start);
dd               8296 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "invalid interrupt source %u\n", source);
dd               8310 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = data;
dd               8316 drivers/infiniband/hw/hfi1/chip.c 	this_cpu_inc(*dd->int_counter);
dd               8320 drivers/infiniband/hw/hfi1/chip.c 		if (dd->gi_mask[i] == 0) {
dd               8324 drivers/infiniband/hw/hfi1/chip.c 		regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
dd               8325 drivers/infiniband/hw/hfi1/chip.c 				dd->gi_mask[i];
dd               8328 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
dd               8334 drivers/infiniband/hw/hfi1/chip.c 		is_interrupt(dd, bit);
dd               8344 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = sde->dd;
dd               8348 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
dd               8353 drivers/infiniband/hw/hfi1/chip.c 	this_cpu_inc(*dd->int_counter);
dd               8356 drivers/infiniband/hw/hfi1/chip.c 	status = read_csr(dd,
dd               8361 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd,
dd               8368 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
dd               8381 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = rcd->dd;
dd               8384 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, addr, rcd->imask);
dd               8386 drivers/infiniband/hw/hfi1/chip.c 	(void)read_csr(dd, addr);
dd               8392 drivers/infiniband/hw/hfi1/chip.c 	write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
dd               8420 drivers/infiniband/hw/hfi1/chip.c 	tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
dd               8435 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = rcd->dd;
dd               8439 drivers/infiniband/hw/hfi1/chip.c 	trace_hfi1_receive_interrupt(dd, rcd);
dd               8440 drivers/infiniband/hw/hfi1/chip.c 	this_cpu_inc(*dd->int_counter);
dd               8499 drivers/infiniband/hw/hfi1/chip.c u32 read_physical_state(struct hfi1_devdata *dd)
dd               8503 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
dd               8508 drivers/infiniband/hw/hfi1/chip.c u32 read_logical_state(struct hfi1_devdata *dd)
dd               8512 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
dd               8517 drivers/infiniband/hw/hfi1/chip.c static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
dd               8521 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
dd               8525 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
dd               8531 drivers/infiniband/hw/hfi1/chip.c static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
dd               8536 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
dd               8537 drivers/infiniband/hw/hfi1/chip.c 		if (acquire_lcb_access(dd, 0) == 0) {
dd               8538 drivers/infiniband/hw/hfi1/chip.c 			*data = read_csr(dd, addr);
dd               8539 drivers/infiniband/hw/hfi1/chip.c 			release_lcb_access(dd, 0);
dd               8547 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
dd               8569 drivers/infiniband/hw/hfi1/chip.c static void update_lcb_cache(struct hfi1_devdata *dd)
dd               8576 drivers/infiniband/hw/hfi1/chip.c 		ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
dd               8603 drivers/infiniband/hw/hfi1/chip.c int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
dd               8605 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               8609 drivers/infiniband/hw/hfi1/chip.c 		return read_lcb_via_8051(dd, addr, data);
dd               8618 drivers/infiniband/hw/hfi1/chip.c 	*data = read_csr(dd, addr);
dd               8625 drivers/infiniband/hw/hfi1/chip.c static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
dd               8630 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
dd               8631 drivers/infiniband/hw/hfi1/chip.c 	    (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
dd               8632 drivers/infiniband/hw/hfi1/chip.c 		if (acquire_lcb_access(dd, 0) == 0) {
dd               8633 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, addr, data);
dd               8634 drivers/infiniband/hw/hfi1/chip.c 			release_lcb_access(dd, 0);
dd               8642 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
dd               8652 drivers/infiniband/hw/hfi1/chip.c int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
dd               8654 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               8658 drivers/infiniband/hw/hfi1/chip.c 		return write_lcb_via_8051(dd, addr, data);
dd               8663 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, addr, data);
dd               8672 drivers/infiniband/hw/hfi1/chip.c static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
dd               8681 drivers/infiniband/hw/hfi1/chip.c 	mutex_lock(&dd->dc8051_lock);
dd               8684 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc_shutdown) {
dd               8699 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc8051_timed_out) {
dd               8700 drivers/infiniband/hw/hfi1/chip.c 		if (dd->dc8051_timed_out > 1) {
dd               8701 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               8707 drivers/infiniband/hw/hfi1/chip.c 		_dc_shutdown(dd);
dd               8708 drivers/infiniband/hw/hfi1/chip.c 		_dc_start(dd);
dd               8730 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
dd               8736 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
dd               8747 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
dd               8749 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
dd               8754 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
dd               8759 drivers/infiniband/hw/hfi1/chip.c 			dd->dc8051_timed_out++;
dd               8760 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "8051 host command %u timeout\n", type);
dd               8774 drivers/infiniband/hw/hfi1/chip.c 			*out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
dd               8782 drivers/infiniband/hw/hfi1/chip.c 	dd->dc8051_timed_out = 0;
dd               8786 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
dd               8789 drivers/infiniband/hw/hfi1/chip.c 	mutex_unlock(&dd->dc8051_lock);
dd               8793 drivers/infiniband/hw/hfi1/chip.c static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
dd               8795 drivers/infiniband/hw/hfi1/chip.c 	return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
dd               8798 drivers/infiniband/hw/hfi1/chip.c int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
dd               8807 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
dd               8809 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               8821 drivers/infiniband/hw/hfi1/chip.c int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
dd               8837 drivers/infiniband/hw/hfi1/chip.c 	ret = read_8051_data(dd, addr, 8, &big_data);
dd               8847 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
dd               8854 drivers/infiniband/hw/hfi1/chip.c static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
dd               8861 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
dd               8865 drivers/infiniband/hw/hfi1/chip.c static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
dd               8875 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
dd               8879 drivers/infiniband/hw/hfi1/chip.c static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
dd               8884 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
dd               8891 drivers/infiniband/hw/hfi1/chip.c static int write_vc_local_link_mode(struct hfi1_devdata *dd,
dd               8901 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
dd               8905 drivers/infiniband/hw/hfi1/chip.c static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
dd               8912 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
dd               8915 drivers/infiniband/hw/hfi1/chip.c static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
dd               8920 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
dd               8926 drivers/infiniband/hw/hfi1/chip.c int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
dd               8932 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
dd               8936 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
dd               8940 drivers/infiniband/hw/hfi1/chip.c void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
dd               8945 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
dd               8951 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
dd               8956 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
dd               8961 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
dd               8968 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
dd               8973 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
dd               8981 drivers/infiniband/hw/hfi1/chip.c static void read_vc_remote_link_width(struct hfi1_devdata *dd,
dd               8987 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
dd               8994 drivers/infiniband/hw/hfi1/chip.c static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
dd               8998 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
dd               9002 drivers/infiniband/hw/hfi1/chip.c static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
dd               9004 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
dd               9007 drivers/infiniband/hw/hfi1/chip.c static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
dd               9009 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
dd               9012 drivers/infiniband/hw/hfi1/chip.c void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
dd               9018 drivers/infiniband/hw/hfi1/chip.c 	if (dd->pport->host_link_state & HLS_UP) {
dd               9019 drivers/infiniband/hw/hfi1/chip.c 		ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
dd               9027 drivers/infiniband/hw/hfi1/chip.c static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
dd               9031 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
dd               9035 drivers/infiniband/hw/hfi1/chip.c static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
dd               9039 drivers/infiniband/hw/hfi1/chip.c 	read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
dd               9043 drivers/infiniband/hw/hfi1/chip.c static int read_tx_settings(struct hfi1_devdata *dd,
dd               9052 drivers/infiniband/hw/hfi1/chip.c 	ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
dd               9063 drivers/infiniband/hw/hfi1/chip.c static int write_tx_settings(struct hfi1_devdata *dd,
dd               9076 drivers/infiniband/hw/hfi1/chip.c 	return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
dd               9084 drivers/infiniband/hw/hfi1/chip.c static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
dd               9088 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
dd               9090 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "read idle message: type %d, err %d\n",
dd               9094 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
dd               9106 drivers/infiniband/hw/hfi1/chip.c static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
dd               9108 drivers/infiniband/hw/hfi1/chip.c 	return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
dd               9117 drivers/infiniband/hw/hfi1/chip.c static int send_idle_message(struct hfi1_devdata *dd, u64 data)
dd               9121 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
dd               9122 drivers/infiniband/hw/hfi1/chip.c 	ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
dd               9124 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
dd               9136 drivers/infiniband/hw/hfi1/chip.c int send_idle_sma(struct hfi1_devdata *dd, u64 message)
dd               9142 drivers/infiniband/hw/hfi1/chip.c 	return send_idle_message(dd, data);
dd               9151 drivers/infiniband/hw/hfi1/chip.c static int do_quick_linkup(struct hfi1_devdata *dd)
dd               9155 drivers/infiniband/hw/hfi1/chip.c 	lcb_shutdown(dd, 0);
dd               9160 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_LOOPBACK,
dd               9162 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
dd               9167 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
dd               9170 drivers/infiniband/hw/hfi1/chip.c 	if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
dd               9172 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_RUN,
dd               9175 drivers/infiniband/hw/hfi1/chip.c 		ret = wait_link_transfer_active(dd, 10);
dd               9179 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
dd               9191 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               9194 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Continuing with quick linkup\n");
dd               9197 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
dd               9198 drivers/infiniband/hw/hfi1/chip.c 	set_8051_lcb_access(dd);
dd               9205 drivers/infiniband/hw/hfi1/chip.c 	ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
dd               9207 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               9211 drivers/infiniband/hw/hfi1/chip.c 		set_host_lcb_access(dd);
dd               9212 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
dd               9225 drivers/infiniband/hw/hfi1/chip.c static int init_loopback(struct hfi1_devdata *dd)
dd               9227 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Entering loopback mode\n");
dd               9230 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_CFG_MODE,
dd               9231 drivers/infiniband/hw/hfi1/chip.c 		  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
dd               9239 drivers/infiniband/hw/hfi1/chip.c 	if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
dd               9258 drivers/infiniband/hw/hfi1/chip.c 		if (dd->icode == ICODE_FPGA_EMULATION) {
dd               9259 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               9270 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
dd               9305 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9312 drivers/infiniband/hw/hfi1/chip.c 	fabric_serdes_reset(dd);
dd               9315 drivers/infiniband/hw/hfi1/chip.c 	ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
dd               9320 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
dd               9336 drivers/infiniband/hw/hfi1/chip.c 	ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
dd               9341 drivers/infiniband/hw/hfi1/chip.c 	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
dd               9343 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               9352 drivers/infiniband/hw/hfi1/chip.c 	ret = write_vc_local_phy(dd,
dd               9359 drivers/infiniband/hw/hfi1/chip.c 	ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
dd               9376 drivers/infiniband/hw/hfi1/chip.c 	if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
dd               9379 drivers/infiniband/hw/hfi1/chip.c 	ret = write_vc_local_link_mode(dd, misc_bits, 0,
dd               9386 drivers/infiniband/hw/hfi1/chip.c 	ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
dd               9391 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd,
dd               9411 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(ppd->dd,
dd               9429 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9448 drivers/infiniband/hw/hfi1/chip.c 		mask = read_csr(dd, dd->hfi1_id ?
dd               9453 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
dd               9463 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9466 drivers/infiniband/hw/hfi1/chip.c 	mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
dd               9472 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
dd               9478 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
dd               9483 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9492 drivers/infiniband/hw/hfi1/chip.c 	qsfp_mask = read_csr(dd,
dd               9493 drivers/infiniband/hw/hfi1/chip.c 			     dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
dd               9495 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd,
dd               9496 drivers/infiniband/hw/hfi1/chip.c 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
dd               9501 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd,
dd               9502 drivers/infiniband/hw/hfi1/chip.c 		  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
dd               9523 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9527 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
dd               9532 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
dd               9543 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
dd               9548 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
dd               9555 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
dd               9560 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
dd               9565 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
dd               9570 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
dd               9575 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
dd               9580 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
dd               9585 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
dd               9590 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
dd               9595 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
dd               9600 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
dd               9605 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
dd               9610 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
dd               9624 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd;
dd               9628 drivers/infiniband/hw/hfi1/chip.c 	dd = ppd->dd;
dd               9635 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(ppd->dd,
dd               9645 drivers/infiniband/hw/hfi1/chip.c 	dc_start(dd);
dd               9664 drivers/infiniband/hw/hfi1/chip.c 		if (one_qsfp_read(ppd, dd->hfi1_id, 6,
dd               9666 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd,
dd               9682 drivers/infiniband/hw/hfi1/chip.c void init_qsfp_int(struct hfi1_devdata *dd)
dd               9684 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               9689 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
dd               9691 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
dd               9699 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd,
dd               9700 drivers/infiniband/hw/hfi1/chip.c 		  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
dd               9704 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->hfi1_id)
dd               9705 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
dd               9707 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
dd               9713 drivers/infiniband/hw/hfi1/chip.c static void init_lcb(struct hfi1_devdata *dd)
dd               9716 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
dd               9722 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
dd               9723 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
dd               9724 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
dd               9725 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
dd               9726 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
dd               9727 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
dd               9728 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
dd               9748 drivers/infiniband/hw/hfi1/chip.c 	ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
dd               9775 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
dd               9778 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(ppd->dd,
dd               9803 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9808 drivers/infiniband/hw/hfi1/chip.c 		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
dd               9812 drivers/infiniband/hw/hfi1/chip.c 		if (dd->base_guid)
dd               9813 drivers/infiniband/hw/hfi1/chip.c 			guid = dd->base_guid + ppd->port - 1;
dd               9821 drivers/infiniband/hw/hfi1/chip.c 	init_lcb(dd);
dd               9824 drivers/infiniband/hw/hfi1/chip.c 		ret = init_loopback(dd);
dd               9842 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               9865 drivers/infiniband/hw/hfi1/chip.c 	clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
dd               9869 drivers/infiniband/hw/hfi1/chip.c static inline int init_cpu_counters(struct hfi1_devdata *dd)
dd               9874 drivers/infiniband/hw/hfi1/chip.c 	ppd = (struct hfi1_pportdata *)(dd + 1);
dd               9875 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               9893 drivers/infiniband/hw/hfi1/chip.c void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
dd               9898 drivers/infiniband/hw/hfi1/chip.c 	if (!(dd->flags & HFI1_PRESENT))
dd               9905 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               9910 drivers/infiniband/hw/hfi1/chip.c 	trace_hfi1_put_tid(dd, index, type, pa, order);
dd               9917 drivers/infiniband/hw/hfi1/chip.c 	trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
dd               9918 drivers/infiniband/hw/hfi1/chip.c 	writeq(reg, dd->rcvarray_wc + (index * 8));
dd               9933 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = rcd->dd;
dd               9939 drivers/infiniband/hw/hfi1/chip.c 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
dd               9943 drivers/infiniband/hw/hfi1/chip.c 		hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
dd               9980 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10027 drivers/infiniband/hw/hfi1/chip.c 				dd,
dd               10051 drivers/infiniband/hw/hfi1/chip.c u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
dd               10063 drivers/infiniband/hw/hfi1/chip.c 	return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
dd               10079 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10080 drivers/infiniband/hw/hfi1/chip.c 	u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
dd               10081 drivers/infiniband/hw/hfi1/chip.c 	u32 maxvlmtu = dd->vld[15].mtu;
dd               10082 drivers/infiniband/hw/hfi1/chip.c 	u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
dd               10089 drivers/infiniband/hw/hfi1/chip.c 		if (dd->vld[i].mtu > maxvlmtu)
dd               10090 drivers/infiniband/hw/hfi1/chip.c 			maxvlmtu = dd->vld[i].mtu;
dd               10092 drivers/infiniband/hw/hfi1/chip.c 			len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
dd               10096 drivers/infiniband/hw/hfi1/chip.c 			len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
dd               10100 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_LEN_CHECK0, len1);
dd               10101 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_LEN_CHECK1, len2);
dd               10105 drivers/infiniband/hw/hfi1/chip.c 		thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
dd               10106 drivers/infiniband/hw/hfi1/chip.c 			    sc_mtu_to_threshold(dd->vld[i].sc,
dd               10107 drivers/infiniband/hw/hfi1/chip.c 						dd->vld[i].mtu,
dd               10108 drivers/infiniband/hw/hfi1/chip.c 						dd->rcd[0]->rcvhdrqentsize));
dd               10111 drivers/infiniband/hw/hfi1/chip.c 					pio_select_send_context_vl(dd, j, i),
dd               10114 drivers/infiniband/hw/hfi1/chip.c 	thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
dd               10115 drivers/infiniband/hw/hfi1/chip.c 		    sc_mtu_to_threshold(dd->vld[15].sc,
dd               10116 drivers/infiniband/hw/hfi1/chip.c 					dd->vld[15].mtu,
dd               10117 drivers/infiniband/hw/hfi1/chip.c 					dd->rcd[0]->rcvhdrqentsize));
dd               10118 drivers/infiniband/hw/hfi1/chip.c 	sc_set_cr_threshold(dd->vld[15].sc, thres);
dd               10123 drivers/infiniband/hw/hfi1/chip.c 	len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
dd               10127 drivers/infiniband/hw/hfi1/chip.c 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
dd               10134 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10136 drivers/infiniband/hw/hfi1/chip.c 	u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
dd               10150 drivers/infiniband/hw/hfi1/chip.c 	write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
dd               10160 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++) {
dd               10163 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
dd               10167 drivers/infiniband/hw/hfi1/chip.c 	sdma_update_lmc(dd, mask, lid);
dd               10244 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10263 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
dd               10265 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
dd               10267 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "    state successfully completed: %s\n",
dd               10269 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "    fail reason 0x%x: %s\n",
dd               10271 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
dd               10284 drivers/infiniband/hw/hfi1/chip.c 	read_last_local_state(ppd->dd, &last_local_state);
dd               10285 drivers/infiniband/hw/hfi1/chip.c 	read_last_remote_state(ppd->dd, &last_remote_state);
dd               10300 drivers/infiniband/hw/hfi1/chip.c static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
dd               10308 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
dd               10312 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10324 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10329 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
dd               10330 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
dd               10333 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
dd               10334 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
dd               10335 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
dd               10336 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
dd               10338 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
dd               10339 drivers/infiniband/hw/hfi1/chip.c 	(void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
dd               10341 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
dd               10342 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
dd               10344 drivers/infiniband/hw/hfi1/chip.c 	wait_link_transfer_active(dd, 100);
dd               10349 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
dd               10350 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
dd               10351 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
dd               10353 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
dd               10366 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10371 drivers/infiniband/hw/hfi1/chip.c 	update_lcb_cache(dd);
dd               10377 drivers/infiniband/hw/hfi1/chip.c 	ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
dd               10380 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               10400 drivers/infiniband/hw/hfi1/chip.c 		ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
dd               10403 drivers/infiniband/hw/hfi1/chip.c 			release_chip_resource(dd, qsfp_resource(dd));
dd               10406 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10425 drivers/infiniband/hw/hfi1/chip.c 	set_host_lcb_access(dd);
dd               10426 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
dd               10445 drivers/infiniband/hw/hfi1/chip.c 	ret = wait_fm_ready(dd, 7000);
dd               10447 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               10463 drivers/infiniband/hw/hfi1/chip.c 		handle_linkup_change(dd, 0);
dd               10554 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
dd               10578 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
dd               10611 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
dd               10612 drivers/infiniband/hw/hfi1/chip.c 		if ((reg && !ppd->dd->vld[i].mtu) ||
dd               10613 drivers/infiniband/hw/hfi1/chip.c 		    (!reg && ppd->dd->vld[i].mtu))
dd               10630 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               10645 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
dd               10669 drivers/infiniband/hw/hfi1/chip.c 		    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
dd               10689 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10697 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10709 drivers/infiniband/hw/hfi1/chip.c 		add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
dd               10711 drivers/infiniband/hw/hfi1/chip.c 		handle_linkup_change(dd, 1);
dd               10712 drivers/infiniband/hw/hfi1/chip.c 		pio_kernel_linkup(dd);
dd               10729 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10736 drivers/infiniband/hw/hfi1/chip.c 		set_logical_state(dd, LSTATE_ARMED);
dd               10739 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10751 drivers/infiniband/hw/hfi1/chip.c 		if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
dd               10758 drivers/infiniband/hw/hfi1/chip.c 		set_logical_state(dd, LSTATE_ACTIVE);
dd               10761 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10766 drivers/infiniband/hw/hfi1/chip.c 			sdma_all_running(dd);
dd               10771 drivers/infiniband/hw/hfi1/chip.c 			event.device = &dd->verbs_dev.rdi.ibdev;
dd               10779 drivers/infiniband/hw/hfi1/chip.c 		    dd->dc_shutdown)
dd               10780 drivers/infiniband/hw/hfi1/chip.c 			dc_start(dd);
dd               10782 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, DCC_CFG_LED_CNTRL, 0);
dd               10798 drivers/infiniband/hw/hfi1/chip.c 		set_all_slowpath(ppd->dd);
dd               10807 drivers/infiniband/hw/hfi1/chip.c 			ret = do_quick_linkup(dd);
dd               10809 drivers/infiniband/hw/hfi1/chip.c 			ret1 = set_physical_link_state(dd, PLS_POLLING);
dd               10814 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_err(dd,
dd               10853 drivers/infiniband/hw/hfi1/chip.c 		if (!dd->dc_shutdown) {
dd               10854 drivers/infiniband/hw/hfi1/chip.c 			ret1 = set_physical_link_state(dd, PLS_DISABLED);
dd               10856 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_err(dd,
dd               10864 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_err(dd,
dd               10869 drivers/infiniband/hw/hfi1/chip.c 			dc_shutdown(dd);
dd               10875 drivers/infiniband/hw/hfi1/chip.c 			dc_start(dd);
dd               10892 drivers/infiniband/hw/hfi1/chip.c 		ret1 = set_physical_link_state(dd, PLS_LINKUP);
dd               10894 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               10906 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "%s: state 0x%x: not supported\n",
dd               10915 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
dd               10946 drivers/infiniband/hw/hfi1/chip.c 		write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
dd               11004 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(ppd->dd,
dd               11079 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               11089 drivers/infiniband/hw/hfi1/chip.c 	drain = !is_ax(dd) && is_up;
dd               11098 drivers/infiniband/hw/hfi1/chip.c 		ret = stop_drain_data_vls(dd);
dd               11102 drivers/infiniband/hw/hfi1/chip.c 			dd,
dd               11118 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, target + (i * 8), reg);
dd               11120 drivers/infiniband/hw/hfi1/chip.c 	pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
dd               11123 drivers/infiniband/hw/hfi1/chip.c 		open_fill_data_vls(dd); /* reopen all VLs */
dd               11134 drivers/infiniband/hw/hfi1/chip.c static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
dd               11137 drivers/infiniband/hw/hfi1/chip.c 	u64 reg = read_csr(dd, csr);
dd               11150 drivers/infiniband/hw/hfi1/chip.c static int get_buffer_control(struct hfi1_devdata *dd,
dd               11161 drivers/infiniband/hw/hfi1/chip.c 		read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
dd               11164 drivers/infiniband/hw/hfi1/chip.c 	read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
dd               11166 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
dd               11177 drivers/infiniband/hw/hfi1/chip.c static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
dd               11183 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
dd               11191 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
dd               11201 drivers/infiniband/hw/hfi1/chip.c static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
dd               11212 drivers/infiniband/hw/hfi1/chip.c static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
dd               11214 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
dd               11232 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
dd               11252 drivers/infiniband/hw/hfi1/chip.c static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
dd               11256 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
dd               11261 drivers/infiniband/hw/hfi1/chip.c static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
dd               11265 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
dd               11268 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
dd               11272 drivers/infiniband/hw/hfi1/chip.c static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
dd               11276 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
dd               11279 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
dd               11283 drivers/infiniband/hw/hfi1/chip.c static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
dd               11293 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, addr);
dd               11296 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, addr, reg);
dd               11300 drivers/infiniband/hw/hfi1/chip.c static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
dd               11310 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, addr);
dd               11313 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, addr, reg);
dd               11317 drivers/infiniband/hw/hfi1/chip.c static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
dd               11325 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
dd               11334 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd,
dd               11341 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd,
dd               11372 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               11408 drivers/infiniband/hw/hfi1/chip.c 		nonzero_msg(dd, i, "dedicated",
dd               11410 drivers/infiniband/hw/hfi1/chip.c 		nonzero_msg(dd, i, "shared",
dd               11418 drivers/infiniband/hw/hfi1/chip.c 	get_buffer_control(dd, &cur_bc, &cur_total);
dd               11457 drivers/infiniband/hw/hfi1/chip.c 		set_global_limit(dd, new_total);
dd               11465 drivers/infiniband/hw/hfi1/chip.c 	    (is_ax(dd) && any_shared_limit_changing)) {
dd               11466 drivers/infiniband/hw/hfi1/chip.c 		set_global_shared(dd, 0);
dd               11476 drivers/infiniband/hw/hfi1/chip.c 			set_vl_shared(dd, i, 0);
dd               11481 drivers/infiniband/hw/hfi1/chip.c 	wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
dd               11490 drivers/infiniband/hw/hfi1/chip.c 				set_vl_dedicated(dd, i,
dd               11498 drivers/infiniband/hw/hfi1/chip.c 		wait_for_vl_status_clear(dd, ld_mask, "dedicated");
dd               11507 drivers/infiniband/hw/hfi1/chip.c 				set_vl_dedicated(dd, i,
dd               11520 drivers/infiniband/hw/hfi1/chip.c 			set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
dd               11526 drivers/infiniband/hw/hfi1/chip.c 		set_global_shared(dd,
dd               11531 drivers/infiniband/hw/hfi1/chip.c 		set_global_limit(dd, new_total);
dd               11543 drivers/infiniband/hw/hfi1/chip.c 		ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
dd               11548 drivers/infiniband/hw/hfi1/chip.c 			ret = pio_map_init(dd, ppd->port - 1, vl_count ?
dd               11590 drivers/infiniband/hw/hfi1/chip.c 		size = get_buffer_control(ppd->dd, t, NULL);
dd               11593 drivers/infiniband/hw/hfi1/chip.c 		size = get_sc2vlnt(ppd->dd, t);
dd               11598 drivers/infiniband/hw/hfi1/chip.c 		get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
dd               11648 drivers/infiniband/hw/hfi1/chip.c 		set_sc2vlnt(ppd->dd, t);
dd               11661 drivers/infiniband/hw/hfi1/chip.c static int disable_data_vls(struct hfi1_devdata *dd)
dd               11663 drivers/infiniband/hw/hfi1/chip.c 	if (is_ax(dd))
dd               11666 drivers/infiniband/hw/hfi1/chip.c 	pio_send_control(dd, PSC_DATA_VL_DISABLE);
dd               11679 drivers/infiniband/hw/hfi1/chip.c int open_fill_data_vls(struct hfi1_devdata *dd)
dd               11681 drivers/infiniband/hw/hfi1/chip.c 	if (is_ax(dd))
dd               11684 drivers/infiniband/hw/hfi1/chip.c 	pio_send_control(dd, PSC_DATA_VL_ENABLE);
dd               11694 drivers/infiniband/hw/hfi1/chip.c static void drain_data_vls(struct hfi1_devdata *dd)
dd               11696 drivers/infiniband/hw/hfi1/chip.c 	sc_wait(dd);
dd               11697 drivers/infiniband/hw/hfi1/chip.c 	sdma_wait(dd);
dd               11698 drivers/infiniband/hw/hfi1/chip.c 	pause_for_credit_return(dd);
dd               11711 drivers/infiniband/hw/hfi1/chip.c int stop_drain_data_vls(struct hfi1_devdata *dd)
dd               11715 drivers/infiniband/hw/hfi1/chip.c 	ret = disable_data_vls(dd);
dd               11717 drivers/infiniband/hw/hfi1/chip.c 		drain_data_vls(dd);
dd               11726 drivers/infiniband/hw/hfi1/chip.c u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
dd               11730 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FPGA_EMULATION)
dd               11743 drivers/infiniband/hw/hfi1/chip.c u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
dd               11747 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FPGA_EMULATION)
dd               11764 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = rcd->dd;
dd               11789 drivers/infiniband/hw/hfi1/chip.c 		if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
dd               11791 drivers/infiniband/hw/hfi1/chip.c 		timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
dd               11799 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
dd               11807 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = rcd->dd;
dd               11820 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
dd               11825 drivers/infiniband/hw/hfi1/chip.c 	write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
dd               11832 drivers/infiniband/hw/hfi1/chip.c 	head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
dd               11838 drivers/infiniband/hw/hfi1/chip.c 		tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
dd               11879 drivers/infiniband/hw/hfi1/chip.c void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
dd               11893 drivers/infiniband/hw/hfi1/chip.c 	rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
dd               11898 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
dd               11901 drivers/infiniband/hw/hfi1/chip.c 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
dd               11917 drivers/infiniband/hw/hfi1/chip.c 		rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
dd               11929 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
dd               11933 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
dd               11942 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
dd               11956 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
dd               11958 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
dd               11961 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_VL15, 0);
dd               11967 drivers/infiniband/hw/hfi1/chip.c 		if (dd->rcvhdrtail_dummy_dma) {
dd               11968 drivers/infiniband/hw/hfi1/chip.c 			write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
dd               11969 drivers/infiniband/hw/hfi1/chip.c 					dd->rcvhdrtail_dummy_dma);
dd               11977 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
dd               11982 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
dd               12016 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
dd               12019 drivers/infiniband/hw/hfi1/chip.c 		set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
dd               12023 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
dd               12028 drivers/infiniband/hw/hfi1/chip.c 		reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
dd               12030 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
dd               12032 drivers/infiniband/hw/hfi1/chip.c 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
dd               12033 drivers/infiniband/hw/hfi1/chip.c 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
dd               12034 drivers/infiniband/hw/hfi1/chip.c 			write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
dd               12035 drivers/infiniband/hw/hfi1/chip.c 			read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
dd               12036 drivers/infiniband/hw/hfi1/chip.c 			reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
dd               12037 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
dd               12048 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
dd               12054 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
dd               12063 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
dd               12064 drivers/infiniband/hw/hfi1/chip.c 				dd->rcvhdrtail_dummy_dma);
dd               12067 drivers/infiniband/hw/hfi1/chip.c u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
dd               12073 drivers/infiniband/hw/hfi1/chip.c 		ret = dd->cntrnameslen;
dd               12074 drivers/infiniband/hw/hfi1/chip.c 		*namep = dd->cntrnames;
dd               12079 drivers/infiniband/hw/hfi1/chip.c 		ret = (dd->ndevcntrs) * sizeof(u64);
dd               12082 drivers/infiniband/hw/hfi1/chip.c 		*cntrp = dd->cntrs;
dd               12098 drivers/infiniband/hw/hfi1/chip.c 								  dd, j,
dd               12105 drivers/infiniband/hw/hfi1/chip.c 						dd->cntrs[entry->offset + j] =
dd               12111 drivers/infiniband/hw/hfi1/chip.c 					for (j = 0; j < chip_sdma_engines(dd);
dd               12114 drivers/infiniband/hw/hfi1/chip.c 						entry->rw_cntr(entry, dd, j,
dd               12119 drivers/infiniband/hw/hfi1/chip.c 						dd->cntrs[entry->offset + j] =
dd               12123 drivers/infiniband/hw/hfi1/chip.c 					val = entry->rw_cntr(entry, dd,
dd               12126 drivers/infiniband/hw/hfi1/chip.c 					dd->cntrs[entry->offset] = val;
dd               12144 drivers/infiniband/hw/hfi1/chip.c 		ret = ppd->dd->portcntrnameslen;
dd               12145 drivers/infiniband/hw/hfi1/chip.c 		*namep = ppd->dd->portcntrnames;
dd               12150 drivers/infiniband/hw/hfi1/chip.c 		ret = ppd->dd->nportcntrs * sizeof(u64);
dd               12187 drivers/infiniband/hw/hfi1/chip.c static void free_cntrs(struct hfi1_devdata *dd)
dd               12192 drivers/infiniband/hw/hfi1/chip.c 	if (dd->synth_stats_timer.function)
dd               12193 drivers/infiniband/hw/hfi1/chip.c 		del_timer_sync(&dd->synth_stats_timer);
dd               12194 drivers/infiniband/hw/hfi1/chip.c 	ppd = (struct hfi1_pportdata *)(dd + 1);
dd               12195 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               12207 drivers/infiniband/hw/hfi1/chip.c 	kfree(dd->portcntrnames);
dd               12208 drivers/infiniband/hw/hfi1/chip.c 	dd->portcntrnames = NULL;
dd               12209 drivers/infiniband/hw/hfi1/chip.c 	kfree(dd->cntrs);
dd               12210 drivers/infiniband/hw/hfi1/chip.c 	dd->cntrs = NULL;
dd               12211 drivers/infiniband/hw/hfi1/chip.c 	kfree(dd->scntrs);
dd               12212 drivers/infiniband/hw/hfi1/chip.c 	dd->scntrs = NULL;
dd               12213 drivers/infiniband/hw/hfi1/chip.c 	kfree(dd->cntrnames);
dd               12214 drivers/infiniband/hw/hfi1/chip.c 	dd->cntrnames = NULL;
dd               12215 drivers/infiniband/hw/hfi1/chip.c 	if (dd->update_cntr_wq) {
dd               12216 drivers/infiniband/hw/hfi1/chip.c 		destroy_workqueue(dd->update_cntr_wq);
dd               12217 drivers/infiniband/hw/hfi1/chip.c 		dd->update_cntr_wq = NULL;
dd               12221 drivers/infiniband/hw/hfi1/chip.c static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
dd               12228 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
dd               12272 drivers/infiniband/hw/hfi1/chip.c static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
dd               12279 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Counter %s not enabled", entry->name);
dd               12306 drivers/infiniband/hw/hfi1/chip.c u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
dd               12312 drivers/infiniband/hw/hfi1/chip.c 	sval = dd->scntrs + entry->offset;
dd               12317 drivers/infiniband/hw/hfi1/chip.c 	return read_dev_port_cntr(dd, entry, sval, dd, vl);
dd               12320 drivers/infiniband/hw/hfi1/chip.c u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
dd               12326 drivers/infiniband/hw/hfi1/chip.c 	sval = dd->scntrs + entry->offset;
dd               12331 drivers/infiniband/hw/hfi1/chip.c 	return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
dd               12345 drivers/infiniband/hw/hfi1/chip.c 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
dd               12351 drivers/infiniband/hw/hfi1/chip.c 	return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
dd               12365 drivers/infiniband/hw/hfi1/chip.c 	if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
dd               12371 drivers/infiniband/hw/hfi1/chip.c 	return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
dd               12383 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
dd               12393 drivers/infiniband/hw/hfi1/chip.c 	cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
dd               12396 drivers/infiniband/hw/hfi1/chip.c 	cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
dd               12401 drivers/infiniband/hw/hfi1/chip.c 	    dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
dd               12403 drivers/infiniband/hw/hfi1/chip.c 	if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
dd               12410 drivers/infiniband/hw/hfi1/chip.c 			  dd->unit);
dd               12412 drivers/infiniband/hw/hfi1/chip.c 		total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
dd               12414 drivers/infiniband/hw/hfi1/chip.c 			  "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
dd               12418 drivers/infiniband/hw/hfi1/chip.c 				  dd->unit);
dd               12424 drivers/infiniband/hw/hfi1/chip.c 		hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
dd               12429 drivers/infiniband/hw/hfi1/chip.c 					read_dev_cntr(dd, i, vl);
dd               12431 drivers/infiniband/hw/hfi1/chip.c 				read_dev_cntr(dd, i, CNTR_INVALID_VL);
dd               12434 drivers/infiniband/hw/hfi1/chip.c 		ppd = (struct hfi1_pportdata *)(dd + 1);
dd               12435 drivers/infiniband/hw/hfi1/chip.c 		for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               12454 drivers/infiniband/hw/hfi1/chip.c 		dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
dd               12458 drivers/infiniband/hw/hfi1/chip.c 		dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
dd               12462 drivers/infiniband/hw/hfi1/chip.c 			  dd->unit, dd->last_tx, dd->last_rx);
dd               12465 drivers/infiniband/hw/hfi1/chip.c 		hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
dd               12471 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
dd               12473 drivers/infiniband/hw/hfi1/chip.c 	queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
dd               12474 drivers/infiniband/hw/hfi1/chip.c 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
dd               12478 drivers/infiniband/hw/hfi1/chip.c static int init_cntrs(struct hfi1_devdata *dd)
dd               12487 drivers/infiniband/hw/hfi1/chip.c 	u32 sdma_engines = chip_sdma_engines(dd);
dd               12490 drivers/infiniband/hw/hfi1/chip.c 	timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
dd               12497 drivers/infiniband/hw/hfi1/chip.c 	dd->ndevcntrs = 0;
dd               12507 drivers/infiniband/hw/hfi1/chip.c 			dev_cntrs[i].offset = dd->ndevcntrs;
dd               12516 drivers/infiniband/hw/hfi1/chip.c 				dd->ndevcntrs++;
dd               12519 drivers/infiniband/hw/hfi1/chip.c 			dev_cntrs[i].offset = dd->ndevcntrs;
dd               12528 drivers/infiniband/hw/hfi1/chip.c 				dd->ndevcntrs++;
dd               12536 drivers/infiniband/hw/hfi1/chip.c 			dev_cntrs[i].offset = dd->ndevcntrs;
dd               12537 drivers/infiniband/hw/hfi1/chip.c 			dd->ndevcntrs++;
dd               12542 drivers/infiniband/hw/hfi1/chip.c 	dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
dd               12544 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->cntrs)
dd               12547 drivers/infiniband/hw/hfi1/chip.c 	dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
dd               12548 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->scntrs)
dd               12552 drivers/infiniband/hw/hfi1/chip.c 	dd->cntrnameslen = sz;
dd               12553 drivers/infiniband/hw/hfi1/chip.c 	dd->cntrnames = kmalloc(sz, GFP_KERNEL);
dd               12554 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->cntrnames)
dd               12558 drivers/infiniband/hw/hfi1/chip.c 	for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
dd               12615 drivers/infiniband/hw/hfi1/chip.c 	rcv_ctxts = dd->num_rcv_contexts;
dd               12623 drivers/infiniband/hw/hfi1/chip.c 	dd->nportcntrs = 0;
dd               12631 drivers/infiniband/hw/hfi1/chip.c 			port_cntrs[i].offset = dd->nportcntrs;
dd               12640 drivers/infiniband/hw/hfi1/chip.c 				dd->nportcntrs++;
dd               12648 drivers/infiniband/hw/hfi1/chip.c 			port_cntrs[i].offset = dd->nportcntrs;
dd               12649 drivers/infiniband/hw/hfi1/chip.c 			dd->nportcntrs++;
dd               12654 drivers/infiniband/hw/hfi1/chip.c 	dd->portcntrnameslen = sz;
dd               12655 drivers/infiniband/hw/hfi1/chip.c 	dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
dd               12656 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->portcntrnames)
dd               12660 drivers/infiniband/hw/hfi1/chip.c 	for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
dd               12695 drivers/infiniband/hw/hfi1/chip.c 	ppd = (struct hfi1_pportdata *)(dd + 1);
dd               12696 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               12697 drivers/infiniband/hw/hfi1/chip.c 		ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
dd               12701 drivers/infiniband/hw/hfi1/chip.c 		ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
dd               12707 drivers/infiniband/hw/hfi1/chip.c 	if (init_cpu_counters(dd))
dd               12710 drivers/infiniband/hw/hfi1/chip.c 	dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
dd               12711 drivers/infiniband/hw/hfi1/chip.c 						     WQ_MEM_RECLAIM, dd->unit);
dd               12712 drivers/infiniband/hw/hfi1/chip.c 	if (!dd->update_cntr_wq)
dd               12715 drivers/infiniband/hw/hfi1/chip.c 	INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
dd               12717 drivers/infiniband/hw/hfi1/chip.c 	mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
dd               12720 drivers/infiniband/hw/hfi1/chip.c 	free_cntrs(dd);
dd               12724 drivers/infiniband/hw/hfi1/chip.c static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
dd               12728 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               12743 drivers/infiniband/hw/hfi1/chip.c u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
dd               12748 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
dd               12840 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
dd               12862 drivers/infiniband/hw/hfi1/chip.c 		new_state = chip_to_opa_lstate(ppd->dd,
dd               12863 drivers/infiniband/hw/hfi1/chip.c 					       read_logical_state(ppd->dd));
dd               12867 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               12880 drivers/infiniband/hw/hfi1/chip.c 	u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
dd               12882 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(ppd->dd,
dd               12893 drivers/infiniband/hw/hfi1/chip.c 	u32 read_state = read_physical_state(ppd->dd);
dd               12898 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(ppd->dd,
dd               12921 drivers/infiniband/hw/hfi1/chip.c 		read_state = read_physical_state(ppd->dd);
dd               12925 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               12954 drivers/infiniband/hw/hfi1/chip.c 		read_state = read_physical_state(ppd->dd);
dd               12958 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               12987 drivers/infiniband/hw/hfi1/chip.c 		read_state = read_physical_state(ppd->dd);
dd               12991 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(ppd->dd,
dd               13012 drivers/infiniband/hw/hfi1/chip.c 		struct hfi1_devdata *dd = sc->dd;
dd               13017 drivers/infiniband/hw/hfi1/chip.c 		reg = read_kctxt_csr(dd, sc->hw_context,
dd               13023 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, sc->hw_context,
dd               13028 drivers/infiniband/hw/hfi1/chip.c int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
dd               13033 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode != ICODE_RTL_SILICON) {
dd               13035 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "%s: tempsense not supported by HW\n",
dd               13039 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, ASIC_STS_THERM);
dd               13064 drivers/infiniband/hw/hfi1/chip.c static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
dd               13070 drivers/infiniband/hw/hfi1/chip.c 	spin_lock(&dd->irq_src_lock);
dd               13071 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
dd               13076 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
dd               13077 drivers/infiniband/hw/hfi1/chip.c 	spin_unlock(&dd->irq_src_lock);
dd               13089 drivers/infiniband/hw/hfi1/chip.c int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
dd               13105 drivers/infiniband/hw/hfi1/chip.c 			read_mod_write(dd, src - 1, bits, set);
dd               13110 drivers/infiniband/hw/hfi1/chip.c 	read_mod_write(dd, last, bits, set);
dd               13118 drivers/infiniband/hw/hfi1/chip.c void clear_all_interrupts(struct hfi1_devdata *dd)
dd               13123 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
dd               13125 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
dd               13126 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
dd               13127 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
dd               13128 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
dd               13129 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
dd               13130 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
dd               13131 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
dd               13132 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++)
dd               13133 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
dd               13134 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_sdma_engines(dd); i++)
dd               13135 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
dd               13137 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
dd               13138 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
dd               13139 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
dd               13146 drivers/infiniband/hw/hfi1/chip.c void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
dd               13155 drivers/infiniband/hw/hfi1/chip.c 		dd->gi_mask[m] &= ~((u64)1 << n);
dd               13157 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "remap interrupt err\n");
dd               13164 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_INT_MAP + (8 * m));
dd               13167 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_MAP + (8 * m), reg);
dd               13170 drivers/infiniband/hw/hfi1/chip.c void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
dd               13179 drivers/infiniband/hw/hfi1/chip.c 	remap_intr(dd, IS_SDMA_START + engine, msix_intr);
dd               13180 drivers/infiniband/hw/hfi1/chip.c 	remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
dd               13181 drivers/infiniband/hw/hfi1/chip.c 	remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
dd               13188 drivers/infiniband/hw/hfi1/chip.c void reset_interrupts(struct hfi1_devdata *dd)
dd               13194 drivers/infiniband/hw/hfi1/chip.c 		dd->gi_mask[i] = ~(u64)0;
dd               13198 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
dd               13206 drivers/infiniband/hw/hfi1/chip.c static int set_up_interrupts(struct hfi1_devdata *dd)
dd               13211 drivers/infiniband/hw/hfi1/chip.c 	set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
dd               13214 drivers/infiniband/hw/hfi1/chip.c 	clear_all_interrupts(dd);
dd               13217 drivers/infiniband/hw/hfi1/chip.c 	reset_interrupts(dd);
dd               13220 drivers/infiniband/hw/hfi1/chip.c 	ret = msix_initialize(dd);
dd               13224 drivers/infiniband/hw/hfi1/chip.c 	ret = msix_request_irqs(dd);
dd               13226 drivers/infiniband/hw/hfi1/chip.c 		msix_clean_up_interrupts(dd);
dd               13242 drivers/infiniband/hw/hfi1/chip.c static int set_up_context_variables(struct hfi1_devdata *dd)
dd               13252 drivers/infiniband/hw/hfi1/chip.c 	u32 send_contexts = chip_send_contexts(dd);
dd               13253 drivers/infiniband/hw/hfi1/chip.c 	u32 rcv_contexts = chip_rcv_contexts(dd);
dd               13276 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               13285 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "No receive contexts available for VNIC\n");
dd               13303 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               13323 drivers/infiniband/hw/hfi1/chip.c 	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
dd               13328 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd,
dd               13339 drivers/infiniband/hw/hfi1/chip.c 	dd->num_rcv_contexts = total_contexts;
dd               13340 drivers/infiniband/hw/hfi1/chip.c 	dd->n_krcv_queues = num_kernel_contexts;
dd               13341 drivers/infiniband/hw/hfi1/chip.c 	dd->first_dyn_alloc_ctxt = num_kernel_contexts;
dd               13342 drivers/infiniband/hw/hfi1/chip.c 	dd->num_vnic_contexts = num_vnic_contexts;
dd               13343 drivers/infiniband/hw/hfi1/chip.c 	dd->num_user_contexts = n_usr_ctxts;
dd               13344 drivers/infiniband/hw/hfi1/chip.c 	dd->freectxts = n_usr_ctxts;
dd               13345 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd,
dd               13348 drivers/infiniband/hw/hfi1/chip.c 		    (int)dd->num_rcv_contexts,
dd               13349 drivers/infiniband/hw/hfi1/chip.c 		    (int)dd->n_krcv_queues,
dd               13350 drivers/infiniband/hw/hfi1/chip.c 		    dd->num_vnic_contexts,
dd               13351 drivers/infiniband/hw/hfi1/chip.c 		    dd->num_user_contexts);
dd               13364 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_entries.group_size = RCV_INCREMENT;
dd               13365 drivers/infiniband/hw/hfi1/chip.c 	ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
dd               13366 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
dd               13367 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_entries.nctxt_extra = ngroups -
dd               13368 drivers/infiniband/hw/hfi1/chip.c 		(dd->num_rcv_contexts * dd->rcv_entries.ngroups);
dd               13369 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
dd               13370 drivers/infiniband/hw/hfi1/chip.c 		    dd->rcv_entries.ngroups,
dd               13371 drivers/infiniband/hw/hfi1/chip.c 		    dd->rcv_entries.nctxt_extra);
dd               13372 drivers/infiniband/hw/hfi1/chip.c 	if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
dd               13374 drivers/infiniband/hw/hfi1/chip.c 		dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
dd               13375 drivers/infiniband/hw/hfi1/chip.c 			dd->rcv_entries.group_size;
dd               13376 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd,
dd               13378 drivers/infiniband/hw/hfi1/chip.c 			    dd->rcv_entries.ngroups);
dd               13379 drivers/infiniband/hw/hfi1/chip.c 		dd->rcv_entries.nctxt_extra = 0;
dd               13384 drivers/infiniband/hw/hfi1/chip.c 	ret = init_sc_pools_and_sizes(dd);
dd               13386 drivers/infiniband/hw/hfi1/chip.c 		dd->num_send_contexts = ret;
dd               13388 drivers/infiniband/hw/hfi1/chip.c 			dd,
dd               13391 drivers/infiniband/hw/hfi1/chip.c 			dd->num_send_contexts,
dd               13392 drivers/infiniband/hw/hfi1/chip.c 			dd->sc_sizes[SC_KERNEL].count,
dd               13393 drivers/infiniband/hw/hfi1/chip.c 			dd->sc_sizes[SC_ACK].count,
dd               13394 drivers/infiniband/hw/hfi1/chip.c 			dd->sc_sizes[SC_USER].count,
dd               13395 drivers/infiniband/hw/hfi1/chip.c 			dd->sc_sizes[SC_VL15].count);
dd               13409 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *dd = ppd->dd;
dd               13413 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Setting partition keys\n");
dd               13414 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < hfi1_get_npkeys(dd); i++) {
dd               13421 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, RCV_PARTITION_KEY +
dd               13428 drivers/infiniband/hw/hfi1/chip.c 	add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
dd               13439 drivers/infiniband/hw/hfi1/chip.c static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
dd               13445 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_MAP + (8 * i), 0);
dd               13448 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++)
dd               13449 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
dd               13461 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_rcv_contexts(dd); i++) {
dd               13462 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
dd               13463 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
dd               13465 drivers/infiniband/hw/hfi1/chip.c 			write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
dd               13469 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_rcv_array_count(dd); i++)
dd               13470 drivers/infiniband/hw/hfi1/chip.c 		hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
dd               13474 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
dd               13480 drivers/infiniband/hw/hfi1/chip.c static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
dd               13487 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_STATUS);
dd               13492 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_CTRL, ctrl_bits);
dd               13497 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, CCE_STATUS);
dd               13501 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               13511 drivers/infiniband/hw/hfi1/chip.c static void reset_cce_csrs(struct hfi1_devdata *dd)
dd               13519 drivers/infiniband/hw/hfi1/chip.c 	clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
dd               13520 drivers/infiniband/hw/hfi1/chip.c 	clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
dd               13521 drivers/infiniband/hw/hfi1/chip.c 	clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
dd               13523 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_SCRATCH + (8 * i), 0);
dd               13525 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_ERR_MASK, 0);
dd               13526 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_ERR_CLEAR, ~0ull);
dd               13529 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
dd               13530 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
dd               13533 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
dd               13534 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
dd               13539 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
dd               13540 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
dd               13543 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_MAP, 0);
dd               13546 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_MASK + (8 * i), 0);
dd               13547 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
dd               13552 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
dd               13556 drivers/infiniband/hw/hfi1/chip.c static void reset_misc_csrs(struct hfi1_devdata *dd)
dd               13561 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
dd               13562 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
dd               13563 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
dd               13570 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_CFG_RSA_CMD, 1);
dd               13571 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_CFG_RSA_MU, 0);
dd               13572 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
dd               13578 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_ERR_MASK, 0);
dd               13579 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_ERR_CLEAR, ~0ull);
dd               13584 drivers/infiniband/hw/hfi1/chip.c static void reset_txe_csrs(struct hfi1_devdata *dd)
dd               13591 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CTRL, 0);
dd               13592 drivers/infiniband/hw/hfi1/chip.c 	__cm_reset(dd, 0);	/* reset CM internal state */
dd               13597 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
dd               13598 drivers/infiniband/hw/hfi1/chip.c 	pio_reset_all(dd);	/* SEND_PIO_INIT_CTXT */
dd               13600 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_PIO_ERR_MASK, 0);
dd               13601 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
dd               13604 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_DMA_ERR_MASK, 0);
dd               13605 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
dd               13608 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
dd               13609 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
dd               13611 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_BTH_QP, 0);
dd               13612 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
dd               13613 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT0, 0);
dd               13614 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT1, 0);
dd               13615 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT2, 0);
dd               13616 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT3, 0);
dd               13617 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_LEN_CHECK0, 0);
dd               13618 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_LEN_CHECK1, 0);
dd               13620 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_ERR_MASK, 0);
dd               13621 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_ERR_CLEAR, ~0ull);
dd               13624 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
dd               13626 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
dd               13627 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
dd               13628 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
dd               13630 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
dd               13632 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
dd               13633 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
dd               13634 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
dd               13636 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_TIMER_CTRL, 0);
dd               13637 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
dd               13638 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
dd               13639 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
dd               13640 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
dd               13642 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
dd               13643 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
dd               13648 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
dd               13655 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++) {
dd               13656 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
dd               13657 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
dd               13658 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
dd               13659 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
dd               13660 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
dd               13661 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
dd               13662 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
dd               13663 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
dd               13664 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
dd               13665 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
dd               13666 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
dd               13667 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
dd               13673 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_sdma_engines(dd); i++) {
dd               13674 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
dd               13676 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
dd               13677 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
dd               13678 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
dd               13680 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
dd               13681 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
dd               13683 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
dd               13684 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
dd               13687 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
dd               13688 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
dd               13690 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
dd               13691 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
dd               13692 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
dd               13693 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
dd               13694 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
dd               13695 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
dd               13696 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
dd               13704 drivers/infiniband/hw/hfi1/chip.c static void init_rbufs(struct hfi1_devdata *dd)
dd               13715 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, RCV_STATUS);
dd               13727 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               13736 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
dd               13744 drivers/infiniband/hw/hfi1/chip.c 	read_csr(dd, RCV_CTRL);
dd               13751 drivers/infiniband/hw/hfi1/chip.c 		reg = read_csr(dd, RCV_STATUS);
dd               13757 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd,
dd               13766 drivers/infiniband/hw/hfi1/chip.c static void reset_rxe_csrs(struct hfi1_devdata *dd)
dd               13773 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_CTRL, 0);
dd               13774 drivers/infiniband/hw/hfi1/chip.c 	init_rbufs(dd);
dd               13779 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_BTH_QP, 0);
dd               13780 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_MULTICAST, 0);
dd               13781 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_BYPASS, 0);
dd               13782 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_VL15, 0);
dd               13784 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_ERR_INFO,
dd               13787 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_ERR_MASK, 0);
dd               13788 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_ERR_CLEAR, ~0ull);
dd               13791 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
dd               13793 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
dd               13795 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
dd               13797 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
dd               13799 drivers/infiniband/hw/hfi1/chip.c 		clear_rsm_rule(dd, i);
dd               13801 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
dd               13806 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_rcv_contexts(dd); i++) {
dd               13808 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
dd               13810 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
dd               13811 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
dd               13812 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
dd               13813 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
dd               13814 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
dd               13815 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
dd               13816 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
dd               13817 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
dd               13818 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
dd               13819 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
dd               13823 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
dd               13825 drivers/infiniband/hw/hfi1/chip.c 		write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
dd               13828 drivers/infiniband/hw/hfi1/chip.c 			write_uctxt_csr(dd, i,
dd               13845 drivers/infiniband/hw/hfi1/chip.c static void init_sc2vl_tables(struct hfi1_devdata *dd)
dd               13851 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
dd               13857 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
dd               13863 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
dd               13869 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
dd               13877 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
dd               13881 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
dd               13889 drivers/infiniband/hw/hfi1/chip.c 			*((u8 *)(dd->sc2vl) + i) = (u8)i;
dd               13891 drivers/infiniband/hw/hfi1/chip.c 			*((u8 *)(dd->sc2vl) + i) = 0;
dd               13904 drivers/infiniband/hw/hfi1/chip.c static int init_chip(struct hfi1_devdata *dd)
dd               13921 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_CTRL, 0);
dd               13922 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++)
dd               13923 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
dd               13924 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_sdma_engines(dd); i++)
dd               13925 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
dd               13927 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_CTRL, 0);
dd               13928 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_rcv_contexts(dd); i++)
dd               13929 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, RCV_CTXT_CTRL, 0);
dd               13932 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
dd               13940 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
dd               13941 drivers/infiniband/hw/hfi1/chip.c 	(void)read_csr(dd, CCE_DC_CTRL);
dd               13949 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "Resetting CSRs with FLR\n");
dd               13952 drivers/infiniband/hw/hfi1/chip.c 		pcie_flr(dd->pcidev);
dd               13955 drivers/infiniband/hw/hfi1/chip.c 		ret = restore_pci_variables(dd);
dd               13957 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "%s: Could not restore PCI variables\n",
dd               13962 drivers/infiniband/hw/hfi1/chip.c 		if (is_ax(dd)) {
dd               13963 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_info(dd, "Resetting CSRs with FLR\n");
dd               13964 drivers/infiniband/hw/hfi1/chip.c 			pcie_flr(dd->pcidev);
dd               13965 drivers/infiniband/hw/hfi1/chip.c 			ret = restore_pci_variables(dd);
dd               13967 drivers/infiniband/hw/hfi1/chip.c 				dd_dev_err(dd, "%s: Could not restore PCI variables\n",
dd               13973 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "Resetting CSRs with writes\n");
dd               13974 drivers/infiniband/hw/hfi1/chip.c 		reset_cce_csrs(dd);
dd               13975 drivers/infiniband/hw/hfi1/chip.c 		reset_txe_csrs(dd);
dd               13976 drivers/infiniband/hw/hfi1/chip.c 		reset_rxe_csrs(dd);
dd               13977 drivers/infiniband/hw/hfi1/chip.c 		reset_misc_csrs(dd);
dd               13980 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_DC_CTRL, 0);
dd               13983 drivers/infiniband/hw/hfi1/chip.c 	setextled(dd, 0);
dd               13995 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
dd               13996 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
dd               13997 drivers/infiniband/hw/hfi1/chip.c 	init_chip_resources(dd);
dd               14001 drivers/infiniband/hw/hfi1/chip.c static void init_early_variables(struct hfi1_devdata *dd)
dd               14006 drivers/infiniband/hw/hfi1/chip.c 	dd->vau = CM_VAU;
dd               14007 drivers/infiniband/hw/hfi1/chip.c 	dd->link_credits = CM_GLOBAL_CREDITS;
dd               14008 drivers/infiniband/hw/hfi1/chip.c 	if (is_ax(dd))
dd               14009 drivers/infiniband/hw/hfi1/chip.c 		dd->link_credits--;
dd               14010 drivers/infiniband/hw/hfi1/chip.c 	dd->vcu = cu_to_vcu(hfi1_cu);
dd               14012 drivers/infiniband/hw/hfi1/chip.c 	dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
dd               14013 drivers/infiniband/hw/hfi1/chip.c 	if (dd->vl15_init > dd->link_credits)
dd               14014 drivers/infiniband/hw/hfi1/chip.c 		dd->vl15_init = dd->link_credits;
dd               14016 drivers/infiniband/hw/hfi1/chip.c 	write_uninitialized_csrs_and_memories(dd);
dd               14019 drivers/infiniband/hw/hfi1/chip.c 		for (i = 0; i < dd->num_pports; i++) {
dd               14020 drivers/infiniband/hw/hfi1/chip.c 			struct hfi1_pportdata *ppd = &dd->pport[i];
dd               14024 drivers/infiniband/hw/hfi1/chip.c 	init_sc2vl_tables(dd);
dd               14027 drivers/infiniband/hw/hfi1/chip.c static void init_kdeth_qp(struct hfi1_devdata *dd)
dd               14032 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
dd               14038 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_BTH_QP,
dd               14042 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_BTH_QP,
dd               14052 drivers/infiniband/hw/hfi1/chip.c u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
dd               14054 drivers/infiniband/hw/hfi1/chip.c 	u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
dd               14077 drivers/infiniband/hw/hfi1/chip.c static void init_qpmap_table(struct hfi1_devdata *dd,
dd               14092 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, regno, reg);
dd               14098 drivers/infiniband/hw/hfi1/chip.c 	add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
dd               14126 drivers/infiniband/hw/hfi1/chip.c static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
dd               14129 drivers/infiniband/hw/hfi1/chip.c 	u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
dd               14144 drivers/infiniband/hw/hfi1/chip.c static void complete_rsm_map_table(struct hfi1_devdata *dd,
dd               14152 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
dd               14155 drivers/infiniband/hw/hfi1/chip.c 		add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
dd               14162 drivers/infiniband/hw/hfi1/chip.c static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
dd               14165 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
dd               14169 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
dd               14176 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
dd               14186 drivers/infiniband/hw/hfi1/chip.c static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
dd               14188 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
dd               14189 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
dd               14190 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
dd               14194 drivers/infiniband/hw/hfi1/chip.c static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
dd               14202 drivers/infiniband/hw/hfi1/chip.c 	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
dd               14251 drivers/infiniband/hw/hfi1/chip.c static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
dd               14260 drivers/infiniband/hw/hfi1/chip.c 	rmt_entries = qos_rmt_entries(dd, &m, &n);
dd               14308 drivers/infiniband/hw/hfi1/chip.c 	add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
dd               14313 drivers/infiniband/hw/hfi1/chip.c 	init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
dd               14314 drivers/infiniband/hw/hfi1/chip.c 	dd->qos_shift = n + 1;
dd               14317 drivers/infiniband/hw/hfi1/chip.c 	dd->qos_shift = 1;
dd               14318 drivers/infiniband/hw/hfi1/chip.c 	init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
dd               14321 drivers/infiniband/hw/hfi1/chip.c static void init_fecn_handling(struct hfi1_devdata *dd,
dd               14334 drivers/infiniband/hw/hfi1/chip.c 		start = dd->first_dyn_alloc_ctxt;
dd               14336 drivers/infiniband/hw/hfi1/chip.c 	total_cnt = dd->num_rcv_contexts - start;
dd               14340 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
dd               14356 drivers/infiniband/hw/hfi1/chip.c 	for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
dd               14390 drivers/infiniband/hw/hfi1/chip.c 	add_rsm_rule(dd, RSM_INS_FECN, &rrd);
dd               14396 drivers/infiniband/hw/hfi1/chip.c void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
dd               14404 drivers/infiniband/hw/hfi1/chip.c 	if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
dd               14405 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
dd               14406 drivers/infiniband/hw/hfi1/chip.c 			   dd->vnic.rmt_start);
dd               14410 drivers/infiniband/hw/hfi1/chip.c 	dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
dd               14411 drivers/infiniband/hw/hfi1/chip.c 		dd->vnic.rmt_start,
dd               14412 drivers/infiniband/hw/hfi1/chip.c 		dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
dd               14415 drivers/infiniband/hw/hfi1/chip.c 	regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
dd               14416 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, regoff);
dd               14419 drivers/infiniband/hw/hfi1/chip.c 		j = (dd->vnic.rmt_start + i) % 8;
dd               14421 drivers/infiniband/hw/hfi1/chip.c 		reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
dd               14423 drivers/infiniband/hw/hfi1/chip.c 		ctx_id %= dd->vnic.num_ctxt;
dd               14426 drivers/infiniband/hw/hfi1/chip.c 			dev_dbg(&(dd)->pcidev->dev,
dd               14430 drivers/infiniband/hw/hfi1/chip.c 			write_csr(dd, regoff, reg);
dd               14433 drivers/infiniband/hw/hfi1/chip.c 				reg = read_csr(dd, regoff);
dd               14438 drivers/infiniband/hw/hfi1/chip.c 	rrd.offset = dd->vnic.rmt_start;
dd               14453 drivers/infiniband/hw/hfi1/chip.c 	add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
dd               14456 drivers/infiniband/hw/hfi1/chip.c 	add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
dd               14459 drivers/infiniband/hw/hfi1/chip.c void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
dd               14461 drivers/infiniband/hw/hfi1/chip.c 	clear_rsm_rule(dd, RSM_INS_VNIC);
dd               14464 drivers/infiniband/hw/hfi1/chip.c 	if (dd->vnic.rmt_start == 0)
dd               14465 drivers/infiniband/hw/hfi1/chip.c 		clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
dd               14468 drivers/infiniband/hw/hfi1/chip.c static int init_rxe(struct hfi1_devdata *dd)
dd               14474 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_ERR_MASK, ~0ull);
dd               14476 drivers/infiniband/hw/hfi1/chip.c 	rmt = alloc_rsm_map_table(dd);
dd               14481 drivers/infiniband/hw/hfi1/chip.c 	init_qos(dd, rmt);
dd               14482 drivers/infiniband/hw/hfi1/chip.c 	init_fecn_handling(dd, rmt);
dd               14483 drivers/infiniband/hw/hfi1/chip.c 	complete_rsm_map_table(dd, rmt);
dd               14485 drivers/infiniband/hw/hfi1/chip.c 	dd->vnic.rmt_start = rmt->used;
dd               14501 drivers/infiniband/hw/hfi1/chip.c 	val = read_csr(dd, RCV_BYPASS);
dd               14505 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, RCV_BYPASS, val);
dd               14509 drivers/infiniband/hw/hfi1/chip.c static void init_other(struct hfi1_devdata *dd)
dd               14512 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_ERR_MASK, ~0ull);
dd               14514 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
dd               14516 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
dd               14517 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
dd               14528 drivers/infiniband/hw/hfi1/chip.c static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
dd               14531 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, csr0to3,
dd               14538 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, csr4to7,
dd               14549 drivers/infiniband/hw/hfi1/chip.c static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
dd               14551 drivers/infiniband/hw/hfi1/chip.c 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
dd               14555 drivers/infiniband/hw/hfi1/chip.c void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
dd               14557 drivers/infiniband/hw/hfi1/chip.c 	assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
dd               14561 drivers/infiniband/hw/hfi1/chip.c static void init_txe(struct hfi1_devdata *dd)
dd               14566 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
dd               14567 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
dd               14568 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_ERR_MASK, ~0ull);
dd               14569 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
dd               14572 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_send_contexts(dd); i++)
dd               14573 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
dd               14574 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < chip_sdma_engines(dd); i++)
dd               14575 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
dd               14578 drivers/infiniband/hw/hfi1/chip.c 	assign_local_cm_au_table(dd, dd->vcu);
dd               14584 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
dd               14585 drivers/infiniband/hw/hfi1/chip.c 		write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
dd               14588 drivers/infiniband/hw/hfi1/chip.c int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
dd               14604 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
dd               14608 drivers/infiniband/hw/hfi1/chip.c 	if (!is_ax(dd)) {
dd               14609 drivers/infiniband/hw/hfi1/chip.c 		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
dd               14611 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
dd               14618 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
dd               14623 drivers/infiniband/hw/hfi1/chip.c int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
dd               14632 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
dd               14638 drivers/infiniband/hw/hfi1/chip.c 	if (!is_ax(dd)) {
dd               14639 drivers/infiniband/hw/hfi1/chip.c 		reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
dd               14641 drivers/infiniband/hw/hfi1/chip.c 		write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
dd               14644 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
dd               14649 drivers/infiniband/hw/hfi1/chip.c int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
dd               14661 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
dd               14662 drivers/infiniband/hw/hfi1/chip.c 	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
dd               14665 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
dd               14670 drivers/infiniband/hw/hfi1/chip.c int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
dd               14679 drivers/infiniband/hw/hfi1/chip.c 	reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
dd               14681 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
dd               14682 drivers/infiniband/hw/hfi1/chip.c 	write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
dd               14691 drivers/infiniband/hw/hfi1/chip.c void hfi1_start_cleanup(struct hfi1_devdata *dd)
dd               14693 drivers/infiniband/hw/hfi1/chip.c 	aspm_exit(dd);
dd               14694 drivers/infiniband/hw/hfi1/chip.c 	free_cntrs(dd);
dd               14695 drivers/infiniband/hw/hfi1/chip.c 	free_rcverr(dd);
dd               14696 drivers/infiniband/hw/hfi1/chip.c 	finish_chip_resources(dd);
dd               14707 drivers/infiniband/hw/hfi1/chip.c static int init_asic_data(struct hfi1_devdata *dd)
dd               14715 drivers/infiniband/hw/hfi1/chip.c 	asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
dd               14722 drivers/infiniband/hw/hfi1/chip.c 		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
dd               14723 drivers/infiniband/hw/hfi1/chip.c 		    dd->unit != peer->unit)
dd               14729 drivers/infiniband/hw/hfi1/chip.c 		dd->asic_data = peer->asic_data;
dd               14732 drivers/infiniband/hw/hfi1/chip.c 		dd->asic_data = asic_data;
dd               14733 drivers/infiniband/hw/hfi1/chip.c 		mutex_init(&dd->asic_data->asic_resource_mutex);
dd               14735 drivers/infiniband/hw/hfi1/chip.c 	dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
dd               14740 drivers/infiniband/hw/hfi1/chip.c 		ret = set_up_i2c(dd, dd->asic_data);
dd               14751 drivers/infiniband/hw/hfi1/chip.c static int obtain_boardname(struct hfi1_devdata *dd)
dd               14759 drivers/infiniband/hw/hfi1/chip.c 	ret = read_hfi1_efi_var(dd, "description", &size,
dd               14760 drivers/infiniband/hw/hfi1/chip.c 				(void **)&dd->boardname);
dd               14762 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_info(dd, "Board description not found\n");
dd               14764 drivers/infiniband/hw/hfi1/chip.c 		dd->boardname = kstrdup(generic, GFP_KERNEL);
dd               14765 drivers/infiniband/hw/hfi1/chip.c 		if (!dd->boardname)
dd               14779 drivers/infiniband/hw/hfi1/chip.c static int check_int_registers(struct hfi1_devdata *dd)
dd               14786 drivers/infiniband/hw/hfi1/chip.c 	mask = read_csr(dd, CCE_INT_MASK);
dd               14787 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_MASK, 0ull);
dd               14788 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_INT_MASK);
dd               14793 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_CLEAR, all_bits);
dd               14794 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_INT_STATUS);
dd               14799 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_FORCE, all_bits);
dd               14800 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_INT_STATUS);
dd               14805 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_CLEAR, all_bits);
dd               14806 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_MASK, mask);
dd               14810 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, CCE_INT_MASK, mask);
dd               14811 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
dd               14823 drivers/infiniband/hw/hfi1/chip.c int hfi1_init_dd(struct hfi1_devdata *dd)
dd               14825 drivers/infiniband/hw/hfi1/chip.c 	struct pci_dev *pdev = dd->pcidev;
dd               14836 drivers/infiniband/hw/hfi1/chip.c 	u32 sdma_engines = chip_sdma_engines(dd);
dd               14838 drivers/infiniband/hw/hfi1/chip.c 	ppd = dd->pport;
dd               14839 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               14842 drivers/infiniband/hw/hfi1/chip.c 		hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
dd               14858 drivers/infiniband/hw/hfi1/chip.c 			dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
dd               14866 drivers/infiniband/hw/hfi1/chip.c 			dd->vld[vl].mtu = hfi1_max_mtu;
dd               14867 drivers/infiniband/hw/hfi1/chip.c 		dd->vld[15].mtu = MAX_MAD_PACKET;
dd               14889 drivers/infiniband/hw/hfi1/chip.c 	ret = hfi1_pcie_ddinit(dd, pdev);
dd               14894 drivers/infiniband/hw/hfi1/chip.c 	ret = save_pci_variables(dd);
dd               14898 drivers/infiniband/hw/hfi1/chip.c 	dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
dd               14900 drivers/infiniband/hw/hfi1/chip.c 	dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
dd               14909 drivers/infiniband/hw/hfi1/chip.c 		ret = check_int_registers(dd);
dd               14918 drivers/infiniband/hw/hfi1/chip.c 	reg = read_csr(dd, CCE_REVISION2);
dd               14919 drivers/infiniband/hw/hfi1/chip.c 	dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
dd               14922 drivers/infiniband/hw/hfi1/chip.c 	dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
dd               14923 drivers/infiniband/hw/hfi1/chip.c 	dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
dd               14924 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
dd               14925 drivers/infiniband/hw/hfi1/chip.c 		    dd->icode < ARRAY_SIZE(inames) ?
dd               14926 drivers/infiniband/hw/hfi1/chip.c 		    inames[dd->icode] : "unknown", (int)dd->irev);
dd               14929 drivers/infiniband/hw/hfi1/chip.c 	dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
dd               14931 drivers/infiniband/hw/hfi1/chip.c 	dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
dd               14933 drivers/infiniband/hw/hfi1/chip.c 	dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
dd               14936 drivers/infiniband/hw/hfi1/chip.c 	ppd = dd->pport;
dd               14937 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
dd               14946 drivers/infiniband/hw/hfi1/chip.c 		dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
dd               14961 drivers/infiniband/hw/hfi1/chip.c 	dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
dd               14962 drivers/infiniband/hw/hfi1/chip.c 	if (dd->rcv_intr_timeout_csr >
dd               14964 drivers/infiniband/hw/hfi1/chip.c 		dd->rcv_intr_timeout_csr =
dd               14966 drivers/infiniband/hw/hfi1/chip.c 	else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
dd               14967 drivers/infiniband/hw/hfi1/chip.c 		dd->rcv_intr_timeout_csr = 1;
dd               14970 drivers/infiniband/hw/hfi1/chip.c 	read_guid(dd);
dd               14973 drivers/infiniband/hw/hfi1/chip.c 	ret = init_asic_data(dd);
dd               14978 drivers/infiniband/hw/hfi1/chip.c 	ret = init_chip(dd);
dd               14983 drivers/infiniband/hw/hfi1/chip.c 	ret = pcie_speeds(dd);
dd               14988 drivers/infiniband/hw/hfi1/chip.c 	ret = eprom_init(dd);
dd               14993 drivers/infiniband/hw/hfi1/chip.c 	get_platform_config(dd);
dd               14996 drivers/infiniband/hw/hfi1/chip.c 	ret = hfi1_firmware_init(dd);
dd               15012 drivers/infiniband/hw/hfi1/chip.c 	ret = do_pcie_gen3_transition(dd);
dd               15020 drivers/infiniband/hw/hfi1/chip.c 	tune_pcie_caps(dd);
dd               15023 drivers/infiniband/hw/hfi1/chip.c 	init_early_variables(dd);
dd               15025 drivers/infiniband/hw/hfi1/chip.c 	parse_platform_config(dd);
dd               15027 drivers/infiniband/hw/hfi1/chip.c 	ret = obtain_boardname(dd);
dd               15031 drivers/infiniband/hw/hfi1/chip.c 	snprintf(dd->boardversion, BOARD_VERS_MAX,
dd               15034 drivers/infiniband/hw/hfi1/chip.c 		 (u32)dd->majrev,
dd               15035 drivers/infiniband/hw/hfi1/chip.c 		 (u32)dd->minrev,
dd               15036 drivers/infiniband/hw/hfi1/chip.c 		 (dd->revision >> CCE_REVISION_SW_SHIFT)
dd               15039 drivers/infiniband/hw/hfi1/chip.c 	ret = set_up_context_variables(dd);
dd               15044 drivers/infiniband/hw/hfi1/chip.c 	ret = init_rxe(dd);
dd               15049 drivers/infiniband/hw/hfi1/chip.c 	init_txe(dd);
dd               15051 drivers/infiniband/hw/hfi1/chip.c 	init_other(dd);
dd               15053 drivers/infiniband/hw/hfi1/chip.c 	init_kdeth_qp(dd);
dd               15055 drivers/infiniband/hw/hfi1/chip.c 	ret = hfi1_dev_affinity_init(dd);
dd               15060 drivers/infiniband/hw/hfi1/chip.c 	ret = init_send_contexts(dd);
dd               15064 drivers/infiniband/hw/hfi1/chip.c 	ret = hfi1_create_kctxts(dd);
dd               15072 drivers/infiniband/hw/hfi1/chip.c 	aspm_init(dd);
dd               15074 drivers/infiniband/hw/hfi1/chip.c 	ret = init_pervl_scs(dd);
dd               15079 drivers/infiniband/hw/hfi1/chip.c 	for (i = 0; i < dd->num_pports; ++i) {
dd               15080 drivers/infiniband/hw/hfi1/chip.c 		ret = sdma_init(dd, i);
dd               15086 drivers/infiniband/hw/hfi1/chip.c 	ret = set_up_interrupts(dd);
dd               15090 drivers/infiniband/hw/hfi1/chip.c 	ret = hfi1_comp_vectors_set_up(dd);
dd               15095 drivers/infiniband/hw/hfi1/chip.c 	init_lcb_access(dd);
dd               15102 drivers/infiniband/hw/hfi1/chip.c 	snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
dd               15103 drivers/infiniband/hw/hfi1/chip.c 		 (dd->base_guid & 0xFFFFFF) |
dd               15104 drivers/infiniband/hw/hfi1/chip.c 		     ((dd->base_guid >> 11) & 0xF000000));
dd               15106 drivers/infiniband/hw/hfi1/chip.c 	dd->oui1 = dd->base_guid >> 56 & 0xFF;
dd               15107 drivers/infiniband/hw/hfi1/chip.c 	dd->oui2 = dd->base_guid >> 48 & 0xFF;
dd               15108 drivers/infiniband/hw/hfi1/chip.c 	dd->oui3 = dd->base_guid >> 40 & 0xFF;
dd               15110 drivers/infiniband/hw/hfi1/chip.c 	ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
dd               15114 drivers/infiniband/hw/hfi1/chip.c 	thermal_init(dd);
dd               15116 drivers/infiniband/hw/hfi1/chip.c 	ret = init_cntrs(dd);
dd               15120 drivers/infiniband/hw/hfi1/chip.c 	ret = init_rcverr(dd);
dd               15124 drivers/infiniband/hw/hfi1/chip.c 	init_completion(&dd->user_comp);
dd               15127 drivers/infiniband/hw/hfi1/chip.c 	atomic_set(&dd->user_refcount, 1);
dd               15132 drivers/infiniband/hw/hfi1/chip.c 	free_rcverr(dd);
dd               15134 drivers/infiniband/hw/hfi1/chip.c 	free_cntrs(dd);
dd               15136 drivers/infiniband/hw/hfi1/chip.c 	hfi1_comp_vectors_clean_up(dd);
dd               15137 drivers/infiniband/hw/hfi1/chip.c 	msix_clean_up_interrupts(dd);
dd               15139 drivers/infiniband/hw/hfi1/chip.c 	hfi1_pcie_ddcleanup(dd);
dd               15141 drivers/infiniband/hw/hfi1/chip.c 	hfi1_free_devdata(dd);
dd               15201 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_err((dd),						\
dd               15215 drivers/infiniband/hw/hfi1/chip.c static int thermal_init(struct hfi1_devdata *dd)
dd               15219 drivers/infiniband/hw/hfi1/chip.c 	if (dd->icode != ICODE_RTL_SILICON ||
dd               15220 drivers/infiniband/hw/hfi1/chip.c 	    check_chip_resource(dd, CR_THERM_INIT, NULL))
dd               15223 drivers/infiniband/hw/hfi1/chip.c 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
dd               15225 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Acquire SBus");
dd               15229 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_info(dd, "Initializing thermal sensor\n");
dd               15231 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
dd               15235 drivers/infiniband/hw/hfi1/chip.c 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
dd               15238 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Bus Reset");
dd               15242 drivers/infiniband/hw/hfi1/chip.c 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
dd               15245 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Therm Block Reset");
dd               15249 drivers/infiniband/hw/hfi1/chip.c 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
dd               15252 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Write Clock Div");
dd               15256 drivers/infiniband/hw/hfi1/chip.c 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
dd               15260 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Write Mode Sel");
dd               15264 drivers/infiniband/hw/hfi1/chip.c 	ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
dd               15267 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Write Reset Deassert");
dd               15274 drivers/infiniband/hw/hfi1/chip.c 	write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
dd               15277 drivers/infiniband/hw/hfi1/chip.c 	ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
dd               15279 drivers/infiniband/hw/hfi1/chip.c 		THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
dd               15282 drivers/infiniband/hw/hfi1/chip.c 	release_chip_resource(dd, CR_SBUS);
dd               15286 drivers/infiniband/hw/hfi1/chip.c static void handle_temp_err(struct hfi1_devdata *dd)
dd               15288 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_pportdata *ppd = &dd->pport[0];
dd               15294 drivers/infiniband/hw/hfi1/chip.c 	dd_dev_emerg(dd,
dd               15296 drivers/infiniband/hw/hfi1/chip.c 	dd->flags |= HFI1_FORCED_FREEZE;
dd               15311 drivers/infiniband/hw/hfi1/chip.c 	set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
dd               15317 drivers/infiniband/hw/hfi1/chip.c 	dc_shutdown(dd);
dd                615 drivers/infiniband/hw/hfi1/chip.h u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
dd                616 drivers/infiniband/hw/hfi1/chip.h void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
dd                623 drivers/infiniband/hw/hfi1/chip.h static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt,
dd                627 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, offset0 + (0x100 * ctxt));
dd                630 drivers/infiniband/hw/hfi1/chip.h static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt,
dd                634 drivers/infiniband/hw/hfi1/chip.h 	write_csr(dd, offset0 + (0x100 * ctxt), value);
dd                637 drivers/infiniband/hw/hfi1/chip.h int read_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 *data);
dd                638 drivers/infiniband/hw/hfi1/chip.h int write_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 data);
dd                641 drivers/infiniband/hw/hfi1/chip.h 	const struct hfi1_devdata *dd,
dd                645 drivers/infiniband/hw/hfi1/chip.h 	const struct hfi1_devdata *dd,
dd                649 drivers/infiniband/hw/hfi1/chip.h 	return get_csr_addr(dd, offset0 + (0x100 * ctxt));
dd                658 drivers/infiniband/hw/hfi1/chip.h static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt,
dd                662 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, offset0 + (0x1000 * ctxt));
dd                665 drivers/infiniband/hw/hfi1/chip.h static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
dd                669 drivers/infiniband/hw/hfi1/chip.h 	write_csr(dd, offset0 + (0x1000 * ctxt), value);
dd                672 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_rcv_contexts(struct hfi1_devdata *dd)
dd                674 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, RCV_CONTEXTS);
dd                677 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_send_contexts(struct hfi1_devdata *dd)
dd                679 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, SEND_CONTEXTS);
dd                682 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_sdma_engines(struct hfi1_devdata *dd)
dd                684 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, SEND_DMA_ENGINES);
dd                687 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_pio_mem_size(struct hfi1_devdata *dd)
dd                689 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, SEND_PIO_MEM_SIZE);
dd                692 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_sdma_mem_size(struct hfi1_devdata *dd)
dd                694 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, SEND_DMA_MEM_SIZE);
dd                697 drivers/infiniband/hw/hfi1/chip.h static inline u32 chip_rcv_array_count(struct hfi1_devdata *dd)
dd                699 drivers/infiniband/hw/hfi1/chip.h 	return read_csr(dd, RCV_ARRAY_CNT);
dd                715 drivers/infiniband/hw/hfi1/chip.h void sbus_request(struct hfi1_devdata *dd,
dd                717 drivers/infiniband/hw/hfi1/chip.h int sbus_request_slow(struct hfi1_devdata *dd,
dd                719 drivers/infiniband/hw/hfi1/chip.h void set_sbus_fast_mode(struct hfi1_devdata *dd);
dd                720 drivers/infiniband/hw/hfi1/chip.h void clear_sbus_fast_mode(struct hfi1_devdata *dd);
dd                721 drivers/infiniband/hw/hfi1/chip.h int hfi1_firmware_init(struct hfi1_devdata *dd);
dd                722 drivers/infiniband/hw/hfi1/chip.h int load_pcie_firmware(struct hfi1_devdata *dd);
dd                723 drivers/infiniband/hw/hfi1/chip.h int load_firmware(struct hfi1_devdata *dd);
dd                725 drivers/infiniband/hw/hfi1/chip.h int acquire_hw_mutex(struct hfi1_devdata *dd);
dd                726 drivers/infiniband/hw/hfi1/chip.h void release_hw_mutex(struct hfi1_devdata *dd);
dd                750 drivers/infiniband/hw/hfi1/chip.h int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait);
dd                751 drivers/infiniband/hw/hfi1/chip.h void release_chip_resource(struct hfi1_devdata *dd, u32 resource);
dd                752 drivers/infiniband/hw/hfi1/chip.h bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
dd                754 drivers/infiniband/hw/hfi1/chip.h void init_chip_resources(struct hfi1_devdata *dd);
dd                755 drivers/infiniband/hw/hfi1/chip.h void finish_chip_resources(struct hfi1_devdata *dd);
dd                763 drivers/infiniband/hw/hfi1/chip.h void fabric_serdes_reset(struct hfi1_devdata *dd);
dd                764 drivers/infiniband/hw/hfi1/chip.h int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
dd                767 drivers/infiniband/hw/hfi1/chip.h void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
dd                769 drivers/infiniband/hw/hfi1/chip.h int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
dd                770 drivers/infiniband/hw/hfi1/chip.h void read_guid(struct hfi1_devdata *dd);
dd                771 drivers/infiniband/hw/hfi1/chip.h int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
dd                787 drivers/infiniband/hw/hfi1/chip.h int send_idle_sma(struct hfi1_devdata *dd, u64 message);
dd                792 drivers/infiniband/hw/hfi1/chip.h void set_intr_state(struct hfi1_devdata *dd, u32 enable);
dd                797 drivers/infiniband/hw/hfi1/chip.h int stop_drain_data_vls(struct hfi1_devdata *dd);
dd                798 drivers/infiniband/hw/hfi1/chip.h int open_fill_data_vls(struct hfi1_devdata *dd);
dd                799 drivers/infiniband/hw/hfi1/chip.h u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
dd                800 drivers/infiniband/hw/hfi1/chip.h u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclock);
dd                802 drivers/infiniband/hw/hfi1/chip.h void read_ltp_rtt(struct hfi1_devdata *dd);
dd                803 drivers/infiniband/hw/hfi1/chip.h void clear_linkup_counters(struct hfi1_devdata *dd);
dd                805 drivers/infiniband/hw/hfi1/chip.h int is_ax(struct hfi1_devdata *dd);
dd                806 drivers/infiniband/hw/hfi1/chip.h int is_bx(struct hfi1_devdata *dd);
dd                808 drivers/infiniband/hw/hfi1/chip.h u32 read_physical_state(struct hfi1_devdata *dd);
dd                809 drivers/infiniband/hw/hfi1/chip.h u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
dd                815 drivers/infiniband/hw/hfi1/chip.h int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
dd                816 drivers/infiniband/hw/hfi1/chip.h int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
dd                827 drivers/infiniband/hw/hfi1/chip.h u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl);
dd                828 drivers/infiniband/hw/hfi1/chip.h u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data);
dd                831 drivers/infiniband/hw/hfi1/chip.h u32 read_logical_state(struct hfi1_devdata *dd);
dd               1418 drivers/infiniband/hw/hfi1/chip.h void hfi1_start_cleanup(struct hfi1_devdata *dd);
dd               1421 drivers/infiniband/hw/hfi1/chip.h void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
dd               1424 drivers/infiniband/hw/hfi1/chip.h void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
dd               1426 drivers/infiniband/hw/hfi1/chip.h u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
dd               1430 drivers/infiniband/hw/hfi1/chip.h int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
dd               1432 drivers/infiniband/hw/hfi1/chip.h int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
dd               1433 drivers/infiniband/hw/hfi1/chip.h int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt,
dd               1435 drivers/infiniband/hw/hfi1/chip.h int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
dd               1436 drivers/infiniband/hw/hfi1/chip.h void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
dd               1437 drivers/infiniband/hw/hfi1/chip.h void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
dd               1438 drivers/infiniband/hw/hfi1/chip.h void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
dd               1445 drivers/infiniband/hw/hfi1/chip.h int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
dd               1446 drivers/infiniband/hw/hfi1/chip.h void init_qsfp_int(struct hfi1_devdata *dd);
dd               1447 drivers/infiniband/hw/hfi1/chip.h void clear_all_interrupts(struct hfi1_devdata *dd);
dd               1448 drivers/infiniband/hw/hfi1/chip.h void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
dd               1449 drivers/infiniband/hw/hfi1/chip.h void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
dd               1450 drivers/infiniband/hw/hfi1/chip.h void reset_interrupts(struct hfi1_devdata *dd);
dd               1451 drivers/infiniband/hw/hfi1/chip.h u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
dd               1465 drivers/infiniband/hw/hfi1/chip.h 	void (*is_int)(struct hfi1_devdata *dd, unsigned int source);
dd                138 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                141 drivers/infiniband/hw/hfi1/debugfs.c 	for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
dd                142 drivers/infiniband/hw/hfi1/debugfs.c 		rcd = hfi1_rcd_get_by_index(dd, j);
dd                177 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                181 drivers/infiniband/hw/hfi1/debugfs.c 			per_cpu_ptr(dd->tx_opstats, j);
dd                195 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                199 drivers/infiniband/hw/hfi1/debugfs.c 	if (*pos >= dd->first_dyn_alloc_ctxt)
dd                207 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                213 drivers/infiniband/hw/hfi1/debugfs.c 	if (*pos >= dd->first_dyn_alloc_ctxt)
dd                229 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                240 drivers/infiniband/hw/hfi1/debugfs.c 	rcd = hfi1_rcd_get_by_index_safe(dd, i);
dd                325 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                328 drivers/infiniband/hw/hfi1/debugfs.c 	dd = dd_from_dev(ibd);
dd                329 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->per_sdma || *pos >= dd->num_sdma)
dd                337 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                340 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->per_sdma || *pos >= dd->num_sdma)
dd                352 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                356 drivers/infiniband/hw/hfi1/debugfs.c 	sdma_seqfile_dump_sde(s, &dd->per_sdma[i]);
dd                367 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                370 drivers/infiniband/hw/hfi1/debugfs.c 	dd = dd_from_dev(ibd);
dd                371 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->rcd || *pos >= dd->n_krcv_queues)
dd                379 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                382 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->rcd || *pos >= dd->n_krcv_queues)
dd                394 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                399 drivers/infiniband/hw/hfi1/debugfs.c 	rcd = hfi1_rcd_get_by_index_safe(dd, i);
dd                413 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                416 drivers/infiniband/hw/hfi1/debugfs.c 	dd = dd_from_dev(ibd);
dd                417 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->send_contexts || *pos >= dd->num_send_contexts)
dd                425 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                428 drivers/infiniband/hw/hfi1/debugfs.c 	if (!dd->send_contexts || *pos >= dd->num_send_contexts)
dd                440 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                446 drivers/infiniband/hw/hfi1/debugfs.c 	spin_lock_irqsave(&dd->sc_lock, flags);
dd                447 drivers/infiniband/hw/hfi1/debugfs.c 	sci = &dd->send_contexts[i];
dd                450 drivers/infiniband/hw/hfi1/debugfs.c 	spin_unlock_irqrestore(&dd->sc_lock, flags);
dd                464 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                467 drivers/infiniband/hw/hfi1/debugfs.c 	dd = private2dd(file);
dd                468 drivers/infiniband/hw/hfi1/debugfs.c 	avail = hfi1_read_cntrs(dd, NULL, &counters);
dd                479 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                482 drivers/infiniband/hw/hfi1/debugfs.c 	dd = private2dd(file);
dd                483 drivers/infiniband/hw/hfi1/debugfs.c 	avail = hfi1_read_cntrs(dd, &names, NULL);
dd                504 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                507 drivers/infiniband/hw/hfi1/debugfs.c 	dd = private2dd(file);
dd                508 drivers/infiniband/hw/hfi1/debugfs.c 	avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
dd                546 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                555 drivers/infiniband/hw/hfi1/debugfs.c 	dd = ppd->dd;
dd                562 drivers/infiniband/hw/hfi1/debugfs.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd                575 drivers/infiniband/hw/hfi1/debugfs.c 		check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
dd                577 drivers/infiniband/hw/hfi1/debugfs.c 		check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
dd                579 drivers/infiniband/hw/hfi1/debugfs.c 		check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
dd                581 drivers/infiniband/hw/hfi1/debugfs.c 		check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
dd                595 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd                603 drivers/infiniband/hw/hfi1/debugfs.c 	dd = ppd->dd;
dd                616 drivers/infiniband/hw/hfi1/debugfs.c 	mutex_lock(&dd->asic_data->asic_resource_mutex);
dd                617 drivers/infiniband/hw/hfi1/debugfs.c 	acquire_hw_mutex(dd);
dd                619 drivers/infiniband/hw/hfi1/debugfs.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd                621 drivers/infiniband/hw/hfi1/debugfs.c 	write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
dd                623 drivers/infiniband/hw/hfi1/debugfs.c 	(void)read_csr(dd, ASIC_CFG_SCRATCH);
dd                625 drivers/infiniband/hw/hfi1/debugfs.c 	release_hw_mutex(dd);
dd                626 drivers/infiniband/hw/hfi1/debugfs.c 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
dd                664 drivers/infiniband/hw/hfi1/debugfs.c 		rval = read_8051_data(ppd->dd, start, end - start,
dd                681 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = ppd->dd;
dd                702 drivers/infiniband/hw/hfi1/debugfs.c 		if (read_lcb_csr(dd, csr_off, (u64 *)&data))
dd                715 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = ppd->dd;
dd                737 drivers/infiniband/hw/hfi1/debugfs.c 		if (write_lcb_csr(dd, csr_off, data))
dd                992 drivers/infiniband/hw/hfi1/debugfs.c 	ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
dd               1015 drivers/infiniband/hw/hfi1/debugfs.c 	release_chip_resource(ppd->dd, i2c_target(target));
dd               1041 drivers/infiniband/hw/hfi1/debugfs.c 	ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
dd               1064 drivers/infiniband/hw/hfi1/debugfs.c 	release_chip_resource(ppd->dd, i2c_target(target));
dd               1084 drivers/infiniband/hw/hfi1/debugfs.c static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
dd               1091 drivers/infiniband/hw/hfi1/debugfs.c 		dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
dd               1094 drivers/infiniband/hw/hfi1/debugfs.c 		dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
dd               1097 drivers/infiniband/hw/hfi1/debugfs.c 	write_csr(dd, ASIC_GPIO_OUT, gpio_val);
dd               1098 drivers/infiniband/hw/hfi1/debugfs.c 	write_csr(dd, ASIC_GPIO_OE, gpio_val);
dd               1121 drivers/infiniband/hw/hfi1/debugfs.c 		exprom_wp_set(ppd->dd, false);
dd               1123 drivers/infiniband/hw/hfi1/debugfs.c 		exprom_wp_set(ppd->dd, true);
dd               1145 drivers/infiniband/hw/hfi1/debugfs.c 		exprom_wp_set(ppd->dd, false);
dd               1225 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd               1229 drivers/infiniband/hw/hfi1/debugfs.c 	sdma_seqfile_dump_cpu_list(s, dd, (unsigned long)i);
dd               1241 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd               1244 drivers/infiniband/hw/hfi1/debugfs.c 	int unit = dd->unit;
dd               1271 drivers/infiniband/hw/hfi1/debugfs.c 		debugfs_create_file(cntr_ops[i].name, 0444, root, dd,
dd               1275 drivers/infiniband/hw/hfi1/debugfs.c 	for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
dd               1379 drivers/infiniband/hw/hfi1/debugfs.c 	struct hfi1_devdata *dd;
dd               1383 drivers/infiniband/hw/hfi1/debugfs.c 	xa_for_each(&hfi1_dev_table, index, dd) {
dd               1384 drivers/infiniband/hw/hfi1/debugfs.c 		sps_ints += get_all_cpu_total(dd->int_counter);
dd                164 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = container_of(ibdev,
dd                166 drivers/infiniband/hw/hfi1/driver.c 	return dd->pcidev;
dd                174 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd;
dd                180 drivers/infiniband/hw/hfi1/driver.c 	xa_for_each(&hfi1_dev_table, index, dd) {
dd                181 drivers/infiniband/hw/hfi1/driver.c 		if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
dd                183 drivers/infiniband/hw/hfi1/driver.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                184 drivers/infiniband/hw/hfi1/driver.c 			ppd = dd->pport + pidx;
dd                257 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd                258 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_ibdev *verbs_dev = &dd->verbs_dev;
dd                628 drivers/infiniband/hw/hfi1/driver.c 		struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
dd                719 drivers/infiniband/hw/hfi1/driver.c 		this_cpu_inc(*packet->rcd->dd->rcv_limit);
dd                737 drivers/infiniband/hw/hfi1/driver.c 	packet->rcd->dd->ctx0_seq_drop++;
dd                894 drivers/infiniband/hw/hfi1/driver.c static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
dd                904 drivers/infiniband/hw/hfi1/driver.c 	if (ctxt >= dd->first_dyn_alloc_ctxt) {
dd                905 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
dd                914 drivers/infiniband/hw/hfi1/driver.c 	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
dd                915 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                923 drivers/infiniband/hw/hfi1/driver.c static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
dd                933 drivers/infiniband/hw/hfi1/driver.c 	if (ctxt >= dd->first_dyn_alloc_ctxt) {
dd                934 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
dd                943 drivers/infiniband/hw/hfi1/driver.c 	for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
dd                944 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                952 drivers/infiniband/hw/hfi1/driver.c void set_all_slowpath(struct hfi1_devdata *dd)
dd                958 drivers/infiniband/hw/hfi1/driver.c 	for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
dd                959 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                962 drivers/infiniband/hw/hfi1/driver.c 		if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
dd                971 drivers/infiniband/hw/hfi1/driver.c 				      struct hfi1_devdata *dd)
dd                991 drivers/infiniband/hw/hfi1/driver.c 			dd_dev_info(dd,
dd               1012 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = rcd->dd;
dd               1054 drivers/infiniband/hw/hfi1/driver.c 		if (unlikely(dd->do_drop &&
dd               1055 drivers/infiniband/hw/hfi1/driver.c 			     atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
dd               1057 drivers/infiniband/hw/hfi1/driver.c 			dd->do_drop = 0;
dd               1073 drivers/infiniband/hw/hfi1/driver.c 			    set_armed_to_active(rcd, &packet, dd))
dd               1086 drivers/infiniband/hw/hfi1/driver.c 				dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
dd               1087 drivers/infiniband/hw/hfi1/driver.c 				set_nodma_rtail(dd, rcd->ctxt);
dd               1107 drivers/infiniband/hw/hfi1/driver.c 				dd_dev_info(dd,
dd               1109 drivers/infiniband/hw/hfi1/driver.c 				set_dma_rtail(dd, rcd->ctxt);
dd               1150 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1162 drivers/infiniband/hw/hfi1/driver.c 	for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
dd               1163 drivers/infiniband/hw/hfi1/driver.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd               1214 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1219 drivers/infiniband/hw/hfi1/driver.c 		if (ppd->ibmtu < dd->vld[i].mtu)
dd               1220 drivers/infiniband/hw/hfi1/driver.c 			ppd->ibmtu = dd->vld[i].mtu;
dd               1221 drivers/infiniband/hw/hfi1/driver.c 	ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
dd               1229 drivers/infiniband/hw/hfi1/driver.c 	drain = !is_ax(dd) && is_up;
dd               1237 drivers/infiniband/hw/hfi1/driver.c 		ret = stop_drain_data_vls(dd);
dd               1240 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
dd               1248 drivers/infiniband/hw/hfi1/driver.c 		open_fill_data_vls(dd); /* reopen all VLs */
dd               1258 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1264 drivers/infiniband/hw/hfi1/driver.c 	dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
dd               1271 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1287 drivers/infiniband/hw/hfi1/driver.c 	write_csr(dd, DCC_CFG_LED_CNTRL, 0);
dd               1293 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1297 drivers/infiniband/hw/hfi1/driver.c 	if (!(dd->flags & HFI1_INITTED))
dd               1302 drivers/infiniband/hw/hfi1/driver.c 	setextled(dd, phase_idx);
dd               1321 drivers/infiniband/hw/hfi1/driver.c 	if (!(ppd->dd->flags & HFI1_INITTED))
dd               1357 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = hfi1_lookup(unit);
dd               1361 drivers/infiniband/hw/hfi1/driver.c 	if (!dd) {
dd               1366 drivers/infiniband/hw/hfi1/driver.c 	dd_dev_info(dd, "Reset on unit %u requested\n", unit);
dd               1368 drivers/infiniband/hw/hfi1/driver.c 	if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
dd               1369 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_info(dd,
dd               1378 drivers/infiniband/hw/hfi1/driver.c 	if (dd->rcd)
dd               1386 drivers/infiniband/hw/hfi1/driver.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1387 drivers/infiniband/hw/hfi1/driver.c 		ppd = dd->pport + pidx;
dd               1391 drivers/infiniband/hw/hfi1/driver.c 	if (dd->flags & HFI1_HAS_SEND_DMA)
dd               1392 drivers/infiniband/hw/hfi1/driver.c 		sdma_exit(dd);
dd               1394 drivers/infiniband/hw/hfi1/driver.c 	hfi1_reset_cpu_counters(dd);
dd               1396 drivers/infiniband/hw/hfi1/driver.c 	ret = hfi1_init(dd, 1);
dd               1399 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_err(dd,
dd               1403 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
dd               1584 drivers/infiniband/hw/hfi1/driver.c 	dd_dev_err(rcd->dd,
dd               1644 drivers/infiniband/hw/hfi1/driver.c 	struct hfi1_devdata *dd = packet->rcd->dd;
dd               1664 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_err(dd,
dd               1666 drivers/infiniband/hw/hfi1/driver.c 		incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
dd               1667 drivers/infiniband/hw/hfi1/driver.c 		if (!(dd->err_info_rcvport.status_and_code &
dd               1672 drivers/infiniband/hw/hfi1/driver.c 				dd->err_info_rcvport.packet_flit1 = flits[0];
dd               1673 drivers/infiniband/hw/hfi1/driver.c 				dd->err_info_rcvport.packet_flit2 =
dd               1677 drivers/infiniband/hw/hfi1/driver.c 			dd->err_info_rcvport.status_and_code |=
dd               1688 drivers/infiniband/hw/hfi1/driver.c 		 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) &&
dd               1697 drivers/infiniband/hw/hfi1/driver.c 		dd_dev_err(packet->rcd->dd,
dd               1741 drivers/infiniband/hw/hfi1/driver.c 	dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
dd               1755 drivers/infiniband/hw/hfi1/driver.c 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
dd               1757 drivers/infiniband/hw/hfi1/driver.c 		   read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
dd                151 drivers/infiniband/hw/hfi1/efivar.c int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
dd                161 drivers/infiniband/hw/hfi1/efivar.c 		 pci_domain_nr(dd->pcidev->bus),
dd                162 drivers/infiniband/hw/hfi1/efivar.c 		 dd->pcidev->bus->number,
dd                163 drivers/infiniband/hw/hfi1/efivar.c 		 PCI_SLOT(dd->pcidev->devfn),
dd                164 drivers/infiniband/hw/hfi1/efivar.c 		 PCI_FUNC(dd->pcidev->devfn));
dd                 54 drivers/infiniband/hw/hfi1/efivar.h int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
dd                 88 drivers/infiniband/hw/hfi1/eprom.c static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
dd                 92 drivers/infiniband/hw/hfi1/eprom.c 	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
dd                 94 drivers/infiniband/hw/hfi1/eprom.c 		result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
dd                 95 drivers/infiniband/hw/hfi1/eprom.c 	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
dd                101 drivers/infiniband/hw/hfi1/eprom.c static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, void *dest)
dd                129 drivers/infiniband/hw/hfi1/eprom.c 		read_page(dd, read_start, buffer);
dd                150 drivers/infiniband/hw/hfi1/eprom.c 		read_page(dd, start, buffer);
dd                160 drivers/infiniband/hw/hfi1/eprom.c 		read_page(dd, start, buffer);
dd                170 drivers/infiniband/hw/hfi1/eprom.c int eprom_init(struct hfi1_devdata *dd)
dd                175 drivers/infiniband/hw/hfi1/eprom.c 	if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
dd                182 drivers/infiniband/hw/hfi1/eprom.c 	ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
dd                184 drivers/infiniband/hw/hfi1/eprom.c 		dd_dev_err(dd,
dd                193 drivers/infiniband/hw/hfi1/eprom.c 	write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
dd                195 drivers/infiniband/hw/hfi1/eprom.c 	write_csr(dd, ASIC_EEP_CTL_STAT,
dd                199 drivers/infiniband/hw/hfi1/eprom.c 	write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
dd                201 drivers/infiniband/hw/hfi1/eprom.c 	dd->eprom_available = true;
dd                202 drivers/infiniband/hw/hfi1/eprom.c 	release_chip_resource(dd, CR_EPROM);
dd                251 drivers/infiniband/hw/hfi1/eprom.c static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
dd                263 drivers/infiniband/hw/hfi1/eprom.c 	ret = read_length(dd, P1_START, P1_SIZE, buffer);
dd                293 drivers/infiniband/hw/hfi1/eprom.c static int read_segment_platform_config(struct hfi1_devdata *dd,
dd                334 drivers/infiniband/hw/hfi1/eprom.c 		ret = read_length(dd, SEG_SIZE - directory_size,
dd                358 drivers/infiniband/hw/hfi1/eprom.c 		dd_dev_err(dd, "Bad configuration file size 0x%x\n",
dd                366 drivers/infiniband/hw/hfi1/eprom.c 		dd_dev_err(dd,
dd                398 drivers/infiniband/hw/hfi1/eprom.c 				dd_dev_err(dd,
dd                424 drivers/infiniband/hw/hfi1/eprom.c 		ret = read_length(dd, seg_base + seg_offset, to_copy,
dd                462 drivers/infiniband/hw/hfi1/eprom.c int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
dd                467 drivers/infiniband/hw/hfi1/eprom.c 	if (!dd->eprom_available)
dd                470 drivers/infiniband/hw/hfi1/eprom.c 	ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
dd                475 drivers/infiniband/hw/hfi1/eprom.c 	ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
dd                482 drivers/infiniband/hw/hfi1/eprom.c 		ret = read_segment_platform_config(dd, directory, data, size);
dd                485 drivers/infiniband/hw/hfi1/eprom.c 		ret = read_partition_platform_config(dd, data, size);
dd                489 drivers/infiniband/hw/hfi1/eprom.c 	release_chip_resource(dd, CR_EPROM);
dd                 50 drivers/infiniband/hw/hfi1/eprom.h int eprom_init(struct hfi1_devdata *dd);
dd                 51 drivers/infiniband/hw/hfi1/eprom.h int eprom_read_platform_config(struct hfi1_devdata *dd, void **buf_ret,
dd                 78 drivers/infiniband/hw/hfi1/exp_rcv.c 	struct hfi1_devdata *dd = rcd->dd;
dd                 84 drivers/infiniband/hw/hfi1/exp_rcv.c 	ngroups = rcd->expected_count / dd->rcv_entries.group_size;
dd                 93 drivers/infiniband/hw/hfi1/exp_rcv.c 		grp->size = dd->rcv_entries.group_size;
dd                 96 drivers/infiniband/hw/hfi1/exp_rcv.c 		tidbase += dd->rcv_entries.group_size;
dd                134 drivers/infiniband/hw/hfi1/exp_rcv.h static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
dd                140 drivers/infiniband/hw/hfi1/exp_rcv.h 	if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) {
dd                141 drivers/infiniband/hw/hfi1/exp_rcv.h 		writeq(0, dd->rcvarray_wc + (index * 8));
dd                 91 drivers/infiniband/hw/hfi1/fault.c 	struct hfi1_devdata *dd = dd_from_dev(ibd);
dd                 94 drivers/infiniband/hw/hfi1/fault.c 	for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
dd                 95 drivers/infiniband/hw/hfi1/fault.c 		rcd = hfi1_rcd_get_by_index(dd, j);
dd                104 drivers/infiniband/hw/hfi1/fault.c 			per_cpu_ptr(dd->tx_opstats, j);
dd                363 drivers/infiniband/hw/hfi1/fault.c 	struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev;
dd                101 drivers/infiniband/hw/hfi1/file_ops.c static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
dd                189 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = container_of(inode->i_cdev,
dd                193 drivers/infiniband/hw/hfi1/file_ops.c 	if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
dd                196 drivers/infiniband/hw/hfi1/file_ops.c 	if (!atomic_inc_not_zero(&dd->user_refcount))
dd                211 drivers/infiniband/hw/hfi1/file_ops.c 	fd->dd = dd;
dd                212 drivers/infiniband/hw/hfi1/file_ops.c 	kobject_get(&fd->dd->kobj);
dd                218 drivers/infiniband/hw/hfi1/file_ops.c 	if (atomic_dec_and_test(&dd->user_refcount))
dd                219 drivers/infiniband/hw/hfi1/file_ops.c 		complete(&dd->user_comp);
dd                323 drivers/infiniband/hw/hfi1/file_ops.c 	trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
dd                354 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd;
dd                369 drivers/infiniband/hw/hfi1/file_ops.c 	dd = uctxt->dd;
dd                383 drivers/infiniband/hw/hfi1/file_ops.c 		memaddr = ((dd->physaddr + TXE_PIO_SEND) +
dd                409 drivers/infiniband/hw/hfi1/file_ops.c 		memvirt = dd->cr_base[uctxt->numa_id].va;
dd                412 drivers/infiniband/hw/hfi1/file_ops.c 			  (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
dd                438 drivers/infiniband/hw/hfi1/file_ops.c 			dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
dd                475 drivers/infiniband/hw/hfi1/file_ops.c 			(dd->physaddr + RXE_PER_CONTEXT_USER)
dd                492 drivers/infiniband/hw/hfi1/file_ops.c 			(dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
dd                506 drivers/infiniband/hw/hfi1/file_ops.c 		memaddr = kvirt_to_phys((void *)dd->status);
dd                641 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = container_of(inode->i_cdev,
dd                674 drivers/infiniband/hw/hfi1/file_ops.c 	ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
dd                677 drivers/infiniband/hw/hfi1/file_ops.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                680 drivers/infiniband/hw/hfi1/file_ops.c 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                683 drivers/infiniband/hw/hfi1/file_ops.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                689 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
dd                698 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_clear_ctxt_jkey(dd, uctxt);
dd                709 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_clear_ctxt_pkey(dd, uctxt);
dd                716 drivers/infiniband/hw/hfi1/file_ops.c 	kobject_put(&dd->kobj);
dd                718 drivers/infiniband/hw/hfi1/file_ops.c 	if (atomic_dec_and_test(&dd->user_refcount))
dd                719 drivers/infiniband/hw/hfi1/file_ops.c 		complete(&dd->user_comp);
dd                777 drivers/infiniband/hw/hfi1/file_ops.c 		spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
dd                779 drivers/infiniband/hw/hfi1/file_ops.c 		spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
dd                826 drivers/infiniband/hw/hfi1/file_ops.c 		ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
dd                860 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = fd->dd;
dd                880 drivers/infiniband/hw/hfi1/file_ops.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                883 drivers/infiniband/hw/hfi1/file_ops.c 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                890 drivers/infiniband/hw/hfi1/file_ops.c 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                896 drivers/infiniband/hw/hfi1/file_ops.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                922 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = fd->dd;
dd                929 drivers/infiniband/hw/hfi1/file_ops.c 	for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
dd                930 drivers/infiniband/hw/hfi1/file_ops.c 		uctxt = hfi1_rcd_get_by_index(dd, i);
dd                943 drivers/infiniband/hw/hfi1/file_ops.c static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
dd                950 drivers/infiniband/hw/hfi1/file_ops.c 	if (dd->flags & HFI1_FROZEN) {
dd                961 drivers/infiniband/hw/hfi1/file_ops.c 	if (!dd->freectxts)
dd                968 drivers/infiniband/hw/hfi1/file_ops.c 	fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
dd                973 drivers/infiniband/hw/hfi1/file_ops.c 	ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
dd                975 drivers/infiniband/hw/hfi1/file_ops.c 		dd_dev_err(dd, "user ctxtdata allocation failed\n");
dd                985 drivers/infiniband/hw/hfi1/file_ops.c 	uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
dd               1019 drivers/infiniband/hw/hfi1/file_ops.c 	if (dd->freectxts-- == dd->num_user_contexts)
dd               1020 drivers/infiniband/hw/hfi1/file_ops.c 		aspm_disable_all(dd);
dd               1035 drivers/infiniband/hw/hfi1/file_ops.c 	if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
dd               1036 drivers/infiniband/hw/hfi1/file_ops.c 		aspm_enable_all(uctxt->dd);
dd               1109 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
dd               1136 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
dd               1157 drivers/infiniband/hw/hfi1/file_ops.c 	cinfo.unit = uctxt->dd->unit;
dd               1161 drivers/infiniband/hw/hfi1/file_ops.c 				uctxt->dd->rcv_entries.group_size) +
dd               1174 drivers/infiniband/hw/hfi1/file_ops.c 	trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo);
dd               1200 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1206 drivers/infiniband/hw/hfi1/file_ops.c 	ret = hfi1_create_rcvhdrq(dd, uctxt);
dd               1258 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1261 drivers/infiniband/hw/hfi1/file_ops.c 	trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
dd               1267 drivers/infiniband/hw/hfi1/file_ops.c 	binfo.hw_version = dd->revision;
dd               1278 drivers/infiniband/hw/hfi1/file_ops.c 		  (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
dd               1303 drivers/infiniband/hw/hfi1/file_ops.c 				sizeof(*dd->events));
dd               1309 drivers/infiniband/hw/hfi1/file_ops.c 					       dd->status);
dd               1449 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1454 drivers/infiniband/hw/hfi1/file_ops.c 	spin_lock_irq(&dd->uctxt_lock);
dd               1462 drivers/infiniband/hw/hfi1/file_ops.c 	spin_unlock_irq(&dd->uctxt_lock);
dd               1472 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1477 drivers/infiniband/hw/hfi1/file_ops.c 	spin_lock_irq(&dd->uctxt_lock);
dd               1480 drivers/infiniband/hw/hfi1/file_ops.c 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
dd               1485 drivers/infiniband/hw/hfi1/file_ops.c 	spin_unlock_irq(&dd->uctxt_lock);
dd               1498 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1501 drivers/infiniband/hw/hfi1/file_ops.c 	if (!dd->events)
dd               1504 drivers/infiniband/hw/hfi1/file_ops.c 	for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
dd               1506 drivers/infiniband/hw/hfi1/file_ops.c 		uctxt = hfi1_rcd_get_by_index(dd, ctxt);
dd               1514 drivers/infiniband/hw/hfi1/file_ops.c 			evs = dd->events + uctxt_offset(uctxt);
dd               1538 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1564 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
dd               1579 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1583 drivers/infiniband/hw/hfi1/file_ops.c 	if (!dd->events)
dd               1589 drivers/infiniband/hw/hfi1/file_ops.c 	evs = dd->events + uctxt_offset(uctxt) + subctxt;
dd               1603 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd = uctxt->dd;
dd               1617 drivers/infiniband/hw/hfi1/file_ops.c 			return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
dd               1629 drivers/infiniband/hw/hfi1/file_ops.c 	struct hfi1_devdata *dd;
dd               1632 drivers/infiniband/hw/hfi1/file_ops.c 	if (!uctxt || !uctxt->dd || !uctxt->sc)
dd               1641 drivers/infiniband/hw/hfi1/file_ops.c 	dd = uctxt->dd;
dd               1660 drivers/infiniband/hw/hfi1/file_ops.c 			dd->event_queue,
dd               1661 drivers/infiniband/hw/hfi1/file_ops.c 			!(READ_ONCE(dd->flags) & HFI1_FROZEN),
dd               1663 drivers/infiniband/hw/hfi1/file_ops.c 		if (dd->flags & HFI1_FROZEN)
dd               1666 drivers/infiniband/hw/hfi1/file_ops.c 		if (dd->flags & HFI1_FORCED_FREEZE)
dd               1675 drivers/infiniband/hw/hfi1/file_ops.c 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
dd               1685 drivers/infiniband/hw/hfi1/file_ops.c static void user_remove(struct hfi1_devdata *dd)
dd               1688 drivers/infiniband/hw/hfi1/file_ops.c 	hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
dd               1691 drivers/infiniband/hw/hfi1/file_ops.c static int user_add(struct hfi1_devdata *dd)
dd               1696 drivers/infiniband/hw/hfi1/file_ops.c 	snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
dd               1697 drivers/infiniband/hw/hfi1/file_ops.c 	ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
dd               1698 drivers/infiniband/hw/hfi1/file_ops.c 			     &dd->user_cdev, &dd->user_device,
dd               1699 drivers/infiniband/hw/hfi1/file_ops.c 			     true, &dd->kobj);
dd               1701 drivers/infiniband/hw/hfi1/file_ops.c 		user_remove(dd);
dd               1709 drivers/infiniband/hw/hfi1/file_ops.c int hfi1_device_create(struct hfi1_devdata *dd)
dd               1711 drivers/infiniband/hw/hfi1/file_ops.c 	return user_add(dd);
dd               1718 drivers/infiniband/hw/hfi1/file_ops.c void hfi1_device_remove(struct hfi1_devdata *dd)
dd               1720 drivers/infiniband/hw/hfi1/file_ops.c 	user_remove(dd);
dd                255 drivers/infiniband/hw/hfi1/firmware.c static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
dd                257 drivers/infiniband/hw/hfi1/firmware.c static void dump_fw_version(struct hfi1_devdata *dd);
dd                272 drivers/infiniband/hw/hfi1/firmware.c static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
dd                280 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
dd                282 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
dd                287 drivers/infiniband/hw/hfi1/firmware.c 	while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
dd                292 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "timeout reading 8051 data\n");
dd                299 drivers/infiniband/hw/hfi1/firmware.c 	*result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
dd                308 drivers/infiniband/hw/hfi1/firmware.c int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
dd                314 drivers/infiniband/hw/hfi1/firmware.c 	spin_lock_irqsave(&dd->dc8051_memlock, flags);
dd                317 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
dd                320 drivers/infiniband/hw/hfi1/firmware.c 		ret = __read_8051_data(dd, addr, result);
dd                326 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
dd                328 drivers/infiniband/hw/hfi1/firmware.c 	spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
dd                336 drivers/infiniband/hw/hfi1/firmware.c static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
dd                349 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
dd                354 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
dd                368 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
dd                372 drivers/infiniband/hw/hfi1/firmware.c 		while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
dd                377 drivers/infiniband/hw/hfi1/firmware.c 				dd_dev_err(dd, "timeout writing 8051 data\n");
dd                385 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
dd                386 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
dd                392 drivers/infiniband/hw/hfi1/firmware.c static int invalid_header(struct hfi1_devdata *dd, const char *what,
dd                398 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_err(dd,
dd                407 drivers/infiniband/hw/hfi1/firmware.c static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
dd                410 drivers/infiniband/hw/hfi1/firmware.c 	if (invalid_header(dd, "module_type", css->module_type,
dd                412 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "header_len", css->header_len,
dd                414 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "header_version", css->header_version,
dd                416 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "module_vendor", css->module_vendor,
dd                418 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
dd                419 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "modulus_size", css->modulus_size,
dd                421 drivers/infiniband/hw/hfi1/firmware.c 	    invalid_header(dd, "exponent_size", css->exponent_size,
dd                431 drivers/infiniband/hw/hfi1/firmware.c static int payload_check(struct hfi1_devdata *dd, const char *name,
dd                436 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd,
dd                450 drivers/infiniband/hw/hfi1/firmware.c static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
dd                458 drivers/infiniband/hw/hfi1/firmware.c 	ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
dd                460 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
dd                467 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
dd                504 drivers/infiniband/hw/hfi1/firmware.c 	ret = verify_css_header(dd, css);
dd                506 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
dd                513 drivers/infiniband/hw/hfi1/firmware.c 		ret = payload_check(dd, name, fdet->fw->size,
dd                529 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
dd                538 drivers/infiniband/hw/hfi1/firmware.c 		ret = payload_check(dd, name, fdet->fw->size,
dd                553 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd,
dd                584 drivers/infiniband/hw/hfi1/firmware.c static void __obtain_firmware(struct hfi1_devdata *dd)
dd                600 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd, "using alternate firmware names\n");
dd                630 drivers/infiniband/hw/hfi1/firmware.c 		err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
dd                636 drivers/infiniband/hw/hfi1/firmware.c 		err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
dd                642 drivers/infiniband/hw/hfi1/firmware.c 		err = obtain_one_firmware(dd, fw_fabric_serdes_name,
dd                649 drivers/infiniband/hw/hfi1/firmware.c 		err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
dd                657 drivers/infiniband/hw/hfi1/firmware.c 		if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
dd                662 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "unable to obtain working firmware\n");
dd                668 drivers/infiniband/hw/hfi1/firmware.c 		    dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
dd                684 drivers/infiniband/hw/hfi1/firmware.c static int obtain_firmware(struct hfi1_devdata *dd)
dd                699 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "Timeout waiting for firmware try");
dd                712 drivers/infiniband/hw/hfi1/firmware.c 		__obtain_firmware(dd);
dd                746 drivers/infiniband/hw/hfi1/firmware.c static int retry_firmware(struct hfi1_devdata *dd, int load_result)
dd                762 drivers/infiniband/hw/hfi1/firmware.c 		__obtain_firmware(dd);
dd                777 drivers/infiniband/hw/hfi1/firmware.c static void write_rsa_data(struct hfi1_devdata *dd, int what,
dd                788 drivers/infiniband/hw/hfi1/firmware.c 			write_csr(dd, what + (8 * i), *ptr);
dd                795 drivers/infiniband/hw/hfi1/firmware.c 			write_csr(dd, what + (8 * i), value);
dd                804 drivers/infiniband/hw/hfi1/firmware.c static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
dd                811 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, what, *ptr);
dd                818 drivers/infiniband/hw/hfi1/firmware.c static int run_rsa(struct hfi1_devdata *dd, const char *who,
dd                827 drivers/infiniband/hw/hfi1/firmware.c 	write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
dd                830 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
dd                836 drivers/infiniband/hw/hfi1/firmware.c 	status = (read_csr(dd, MISC_CFG_FW_CTRL)
dd                840 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "%s security engine not idle - giving up\n",
dd                846 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
dd                871 drivers/infiniband/hw/hfi1/firmware.c 		status = (read_csr(dd, MISC_CFG_FW_CTRL)
dd                877 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "%s firmware security bad idle state\n",
dd                897 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "%s firmware security time out\n", who);
dd                911 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_ERR_CLEAR,
dd                919 drivers/infiniband/hw/hfi1/firmware.c 	reg = read_csr(dd, MISC_ERR_STATUS);
dd                922 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_warn(dd, "%s firmware authorization failed\n",
dd                925 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_warn(dd, "%s firmware key mismatch\n", who);
dd                931 drivers/infiniband/hw/hfi1/firmware.c static void load_security_variables(struct hfi1_devdata *dd,
dd                935 drivers/infiniband/hw/hfi1/firmware.c 	write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
dd                937 drivers/infiniband/hw/hfi1/firmware.c 	write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
dd                939 drivers/infiniband/hw/hfi1/firmware.c 	write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
dd                941 drivers/infiniband/hw/hfi1/firmware.c 	write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
dd                947 drivers/infiniband/hw/hfi1/firmware.c static inline u32 get_firmware_state(struct hfi1_devdata *dd)
dd                949 drivers/infiniband/hw/hfi1/firmware.c 	u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
dd                959 drivers/infiniband/hw/hfi1/firmware.c int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
dd                964 drivers/infiniband/hw/hfi1/firmware.c 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
dd                969 drivers/infiniband/hw/hfi1/firmware.c 		if (get_firmware_state(dd) == 0xa0)	/* ready */
dd                980 drivers/infiniband/hw/hfi1/firmware.c static int load_8051_firmware(struct hfi1_devdata *dd,
dd               1001 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RST, reg);
dd               1013 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RST, reg);
dd               1016 drivers/infiniband/hw/hfi1/firmware.c 	load_security_variables(dd, fdet);
dd               1021 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
dd               1024 drivers/infiniband/hw/hfi1/firmware.c 	ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
dd               1035 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
dd               1038 drivers/infiniband/hw/hfi1/firmware.c 	ret = run_rsa(dd, "8051", fdet->signature);
dd               1043 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, DC_DC8051_CFG_RST, 0ull);
dd               1049 drivers/infiniband/hw/hfi1/firmware.c 	ret = wait_fm_ready(dd, TIMEOUT_8051_START);
dd               1051 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
dd               1052 drivers/infiniband/hw/hfi1/firmware.c 			   get_firmware_state(dd));
dd               1056 drivers/infiniband/hw/hfi1/firmware.c 	read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd               1057 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
dd               1059 drivers/infiniband/hw/hfi1/firmware.c 	dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
dd               1060 drivers/infiniband/hw/hfi1/firmware.c 	ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
dd               1062 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd,
dd               1076 drivers/infiniband/hw/hfi1/firmware.c void sbus_request(struct hfi1_devdata *dd,
dd               1079 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SBUS_REQUEST,
dd               1092 drivers/infiniband/hw/hfi1/firmware.c static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
dd               1101 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
dd               1105 drivers/infiniband/hw/hfi1/firmware.c 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
dd               1118 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
dd               1133 drivers/infiniband/hw/hfi1/firmware.c static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
dd               1136 drivers/infiniband/hw/hfi1/firmware.c 	if (!is_ax(dd))
dd               1139 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "Turning off spicos:%s%s\n",
dd               1143 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
dd               1146 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
dd               1151 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
dd               1153 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, MISC_CFG_FW_CTRL, 0);
dd               1170 drivers/infiniband/hw/hfi1/firmware.c void fabric_serdes_reset(struct hfi1_devdata *dd)
dd               1177 drivers/infiniband/hw/hfi1/firmware.c 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
dd               1179 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd,
dd               1183 drivers/infiniband/hw/hfi1/firmware.c 	set_sbus_fast_mode(dd);
dd               1185 drivers/infiniband/hw/hfi1/firmware.c 	if (is_ax(dd)) {
dd               1187 drivers/infiniband/hw/hfi1/firmware.c 		u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
dd               1190 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
dd               1194 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
dd               1196 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
dd               1198 drivers/infiniband/hw/hfi1/firmware.c 		turn_off_spicos(dd, SPICO_FABRIC);
dd               1206 drivers/infiniband/hw/hfi1/firmware.c 		(void)load_fabric_serdes_firmware(dd, &fw_fabric);
dd               1209 drivers/infiniband/hw/hfi1/firmware.c 	clear_sbus_fast_mode(dd);
dd               1210 drivers/infiniband/hw/hfi1/firmware.c 	release_chip_resource(dd, CR_SBUS);
dd               1214 drivers/infiniband/hw/hfi1/firmware.c int sbus_request_slow(struct hfi1_devdata *dd,
dd               1220 drivers/infiniband/hw/hfi1/firmware.c 	clear_sbus_fast_mode(dd);
dd               1222 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, receiver_addr, data_addr, command, data_in);
dd               1223 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
dd               1226 drivers/infiniband/hw/hfi1/firmware.c 	reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
dd               1230 drivers/infiniband/hw/hfi1/firmware.c 			u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
dd               1243 drivers/infiniband/hw/hfi1/firmware.c 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
dd               1246 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
dd               1248 drivers/infiniband/hw/hfi1/firmware.c 	reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
dd               1253 drivers/infiniband/hw/hfi1/firmware.c 		reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
dd               1258 drivers/infiniband/hw/hfi1/firmware.c static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
dd               1262 drivers/infiniband/hw/hfi1/firmware.c 	const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
dd               1264 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "Downloading fabric firmware\n");
dd               1267 drivers/infiniband/hw/hfi1/firmware.c 	load_security_variables(dd, fdet);
dd               1269 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
dd               1273 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
dd               1275 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
dd               1278 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
dd               1282 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
dd               1284 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
dd               1287 drivers/infiniband/hw/hfi1/firmware.c 	err = run_rsa(dd, "fabric serdes", fdet->signature);
dd               1292 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
dd               1294 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
dd               1299 drivers/infiniband/hw/hfi1/firmware.c static int load_sbus_firmware(struct hfi1_devdata *dd,
dd               1305 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "Downloading SBus firmware\n");
dd               1308 drivers/infiniband/hw/hfi1/firmware.c 	load_security_variables(dd, fdet);
dd               1310 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
dd               1312 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
dd               1314 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
dd               1317 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
dd               1321 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
dd               1323 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
dd               1326 drivers/infiniband/hw/hfi1/firmware.c 	err = run_rsa(dd, "SBus", fdet->signature);
dd               1331 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
dd               1336 drivers/infiniband/hw/hfi1/firmware.c static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
dd               1342 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "Downloading PCIe firmware\n");
dd               1345 drivers/infiniband/hw/hfi1/firmware.c 	load_security_variables(dd, fdet);
dd               1347 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
dd               1349 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
dd               1356 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
dd               1360 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
dd               1362 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
dd               1368 drivers/infiniband/hw/hfi1/firmware.c 	return run_rsa(dd, "PCIe serdes", fdet->signature);
dd               1374 drivers/infiniband/hw/hfi1/firmware.c static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
dd               1390 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
dd               1395 drivers/infiniband/hw/hfi1/firmware.c int acquire_hw_mutex(struct hfi1_devdata *dd)
dd               1399 drivers/infiniband/hw/hfi1/firmware.c 	u8 mask = 1 << dd->hfi1_id;
dd               1400 drivers/infiniband/hw/hfi1/firmware.c 	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
dd               1403 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd,
dd               1412 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, ASIC_CFG_MUTEX, mask);
dd               1413 drivers/infiniband/hw/hfi1/firmware.c 		user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
dd               1422 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_err(dd,
dd               1428 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, ASIC_CFG_MUTEX, 0);
dd               1436 drivers/infiniband/hw/hfi1/firmware.c void release_hw_mutex(struct hfi1_devdata *dd)
dd               1438 drivers/infiniband/hw/hfi1/firmware.c 	u8 mask = 1 << dd->hfi1_id;
dd               1439 drivers/infiniband/hw/hfi1/firmware.c 	u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
dd               1442 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd,
dd               1446 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, ASIC_CFG_MUTEX, 0);
dd               1455 drivers/infiniband/hw/hfi1/firmware.c static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
dd               1458 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_err(dd,
dd               1468 drivers/infiniband/hw/hfi1/firmware.c static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
dd               1475 drivers/infiniband/hw/hfi1/firmware.c 		if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
dd               1484 drivers/infiniband/hw/hfi1/firmware.c 		my_bit = resource_mask(dd->hfi1_id, resource);
dd               1492 drivers/infiniband/hw/hfi1/firmware.c 	mutex_lock(&dd->asic_data->asic_resource_mutex);
dd               1494 drivers/infiniband/hw/hfi1/firmware.c 	ret = acquire_hw_mutex(dd);
dd               1496 drivers/infiniband/hw/hfi1/firmware.c 		fail_mutex_acquire_message(dd, __func__);
dd               1501 drivers/infiniband/hw/hfi1/firmware.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd               1505 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
dd               1507 drivers/infiniband/hw/hfi1/firmware.c 		(void)read_csr(dd, ASIC_CFG_SCRATCH);
dd               1510 drivers/infiniband/hw/hfi1/firmware.c 	release_hw_mutex(dd);
dd               1513 drivers/infiniband/hw/hfi1/firmware.c 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
dd               1524 drivers/infiniband/hw/hfi1/firmware.c int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
dd               1531 drivers/infiniband/hw/hfi1/firmware.c 		ret = __acquire_chip_resource(dd, resource);
dd               1544 drivers/infiniband/hw/hfi1/firmware.c void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
dd               1550 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
dd               1554 drivers/infiniband/hw/hfi1/firmware.c 	bit = resource_mask(dd->hfi1_id, resource);
dd               1557 drivers/infiniband/hw/hfi1/firmware.c 	mutex_lock(&dd->asic_data->asic_resource_mutex);
dd               1559 drivers/infiniband/hw/hfi1/firmware.c 	if (acquire_hw_mutex(dd)) {
dd               1560 drivers/infiniband/hw/hfi1/firmware.c 		fail_mutex_acquire_message(dd, __func__);
dd               1564 drivers/infiniband/hw/hfi1/firmware.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd               1567 drivers/infiniband/hw/hfi1/firmware.c 		write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
dd               1569 drivers/infiniband/hw/hfi1/firmware.c 		(void)read_csr(dd, ASIC_CFG_SCRATCH);
dd               1571 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
dd               1572 drivers/infiniband/hw/hfi1/firmware.c 			    __func__, dd->hfi1_id, resource);
dd               1575 drivers/infiniband/hw/hfi1/firmware.c 	release_hw_mutex(dd);
dd               1578 drivers/infiniband/hw/hfi1/firmware.c 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
dd               1585 drivers/infiniband/hw/hfi1/firmware.c bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
dd               1591 drivers/infiniband/hw/hfi1/firmware.c 		bit = resource_mask(dd->hfi1_id, resource);
dd               1595 drivers/infiniband/hw/hfi1/firmware.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd               1598 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_warn(dd,
dd               1600 drivers/infiniband/hw/hfi1/firmware.c 				    func, dd->hfi1_id, resource);
dd               1606 drivers/infiniband/hw/hfi1/firmware.c static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
dd               1611 drivers/infiniband/hw/hfi1/firmware.c 	mutex_lock(&dd->asic_data->asic_resource_mutex);
dd               1613 drivers/infiniband/hw/hfi1/firmware.c 	if (acquire_hw_mutex(dd)) {
dd               1614 drivers/infiniband/hw/hfi1/firmware.c 		fail_mutex_acquire_message(dd, func);
dd               1619 drivers/infiniband/hw/hfi1/firmware.c 	scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
dd               1620 drivers/infiniband/hw/hfi1/firmware.c 	scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
dd               1621 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
dd               1623 drivers/infiniband/hw/hfi1/firmware.c 	(void)read_csr(dd, ASIC_CFG_SCRATCH);
dd               1625 drivers/infiniband/hw/hfi1/firmware.c 	release_hw_mutex(dd);
dd               1628 drivers/infiniband/hw/hfi1/firmware.c 	mutex_unlock(&dd->asic_data->asic_resource_mutex);
dd               1631 drivers/infiniband/hw/hfi1/firmware.c void init_chip_resources(struct hfi1_devdata *dd)
dd               1634 drivers/infiniband/hw/hfi1/firmware.c 	clear_chip_resources(dd, __func__);
dd               1637 drivers/infiniband/hw/hfi1/firmware.c void finish_chip_resources(struct hfi1_devdata *dd)
dd               1640 drivers/infiniband/hw/hfi1/firmware.c 	clear_chip_resources(dd, __func__);
dd               1643 drivers/infiniband/hw/hfi1/firmware.c void set_sbus_fast_mode(struct hfi1_devdata *dd)
dd               1645 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
dd               1649 drivers/infiniband/hw/hfi1/firmware.c void clear_sbus_fast_mode(struct hfi1_devdata *dd)
dd               1653 drivers/infiniband/hw/hfi1/firmware.c 	reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
dd               1659 drivers/infiniband/hw/hfi1/firmware.c 		reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
dd               1661 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
dd               1664 drivers/infiniband/hw/hfi1/firmware.c int load_firmware(struct hfi1_devdata *dd)
dd               1669 drivers/infiniband/hw/hfi1/firmware.c 		ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
dd               1673 drivers/infiniband/hw/hfi1/firmware.c 		set_sbus_fast_mode(dd);
dd               1675 drivers/infiniband/hw/hfi1/firmware.c 		set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
dd               1676 drivers/infiniband/hw/hfi1/firmware.c 				     fabric_serdes_broadcast[dd->hfi1_id],
dd               1677 drivers/infiniband/hw/hfi1/firmware.c 				     fabric_serdes_addrs[dd->hfi1_id],
dd               1679 drivers/infiniband/hw/hfi1/firmware.c 		turn_off_spicos(dd, SPICO_FABRIC);
dd               1681 drivers/infiniband/hw/hfi1/firmware.c 			ret = load_fabric_serdes_firmware(dd, &fw_fabric);
dd               1682 drivers/infiniband/hw/hfi1/firmware.c 		} while (retry_firmware(dd, ret));
dd               1684 drivers/infiniband/hw/hfi1/firmware.c 		clear_sbus_fast_mode(dd);
dd               1685 drivers/infiniband/hw/hfi1/firmware.c 		release_chip_resource(dd, CR_SBUS);
dd               1692 drivers/infiniband/hw/hfi1/firmware.c 			ret = load_8051_firmware(dd, &fw_8051);
dd               1693 drivers/infiniband/hw/hfi1/firmware.c 		} while (retry_firmware(dd, ret));
dd               1698 drivers/infiniband/hw/hfi1/firmware.c 	dump_fw_version(dd);
dd               1702 drivers/infiniband/hw/hfi1/firmware.c int hfi1_firmware_init(struct hfi1_devdata *dd)
dd               1705 drivers/infiniband/hw/hfi1/firmware.c 	if (dd->icode != ICODE_RTL_SILICON) {
dd               1712 drivers/infiniband/hw/hfi1/firmware.c 	if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
dd               1716 drivers/infiniband/hw/hfi1/firmware.c 		if (dd->icode == ICODE_RTL_SILICON)
dd               1728 drivers/infiniband/hw/hfi1/firmware.c 	return obtain_firmware(dd);
dd               1738 drivers/infiniband/hw/hfi1/firmware.c static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
dd               1741 drivers/infiniband/hw/hfi1/firmware.c 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
dd               1763 drivers/infiniband/hw/hfi1/firmware.c 			dd, "%s:Please update platform config\n", __func__);
dd               1769 drivers/infiniband/hw/hfi1/firmware.c int parse_platform_config(struct hfi1_devdata *dd)
dd               1771 drivers/infiniband/hw/hfi1/firmware.c 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
dd               1772 drivers/infiniband/hw/hfi1/firmware.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               1787 drivers/infiniband/hw/hfi1/firmware.c 	if (!dd->platform_config.data) {
dd               1788 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "%s: Missing config file\n", __func__);
dd               1791 drivers/infiniband/hw/hfi1/firmware.c 	ptr = (u32 *)dd->platform_config.data;
dd               1796 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "%s: Bad config file\n", __func__);
dd               1809 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd,
dd               1817 drivers/infiniband/hw/hfi1/firmware.c 	if (file_length > dd->platform_config.size) {
dd               1818 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "%s:File claims to be larger than read size\n",
dd               1821 drivers/infiniband/hw/hfi1/firmware.c 	} else if (file_length < dd->platform_config.size) {
dd               1822 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd,
dd               1833 drivers/infiniband/hw/hfi1/firmware.c 	while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
dd               1837 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
dd               1839 drivers/infiniband/hw/hfi1/firmware.c 					      dd->platform_config.data));
dd               1862 drivers/infiniband/hw/hfi1/firmware.c 				ret = check_meta_version(dd, ptr);
dd               1881 drivers/infiniband/hw/hfi1/firmware.c 				dd_dev_err(dd,
dd               1885 drivers/infiniband/hw/hfi1/firmware.c 					    dd->platform_config.data));
dd               1905 drivers/infiniband/hw/hfi1/firmware.c 				dd_dev_err(dd,
dd               1909 drivers/infiniband/hw/hfi1/firmware.c 					    (u32 *)dd->platform_config.data));
dd               1924 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
dd               1926 drivers/infiniband/hw/hfi1/firmware.c 				   (u32 *)dd->platform_config.data));
dd               1941 drivers/infiniband/hw/hfi1/firmware.c 		struct hfi1_devdata *dd,
dd               1945 drivers/infiniband/hw/hfi1/firmware.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               2018 drivers/infiniband/hw/hfi1/firmware.c static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
dd               2022 drivers/infiniband/hw/hfi1/firmware.c 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
dd               2045 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "%s: Unknown table\n", __func__);
dd               2079 drivers/infiniband/hw/hfi1/firmware.c int get_platform_config_field(struct hfi1_devdata *dd,
dd               2086 drivers/infiniband/hw/hfi1/firmware.c 	struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
dd               2087 drivers/infiniband/hw/hfi1/firmware.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               2098 drivers/infiniband/hw/hfi1/firmware.c 		get_integrated_platform_config_field(dd, table_type,
dd               2103 drivers/infiniband/hw/hfi1/firmware.c 	ret = get_platform_fw_field_metadata(dd, table_type, field_index,
dd               2136 drivers/infiniband/hw/hfi1/firmware.c 		src_ptr = dd->hfi1_id ?
dd               2156 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "%s: Unknown table\n", __func__);
dd               2176 drivers/infiniband/hw/hfi1/firmware.c int load_pcie_firmware(struct hfi1_devdata *dd)
dd               2181 drivers/infiniband/hw/hfi1/firmware.c 	set_sbus_fast_mode(dd);
dd               2184 drivers/infiniband/hw/hfi1/firmware.c 		turn_off_spicos(dd, SPICO_SBUS);
dd               2186 drivers/infiniband/hw/hfi1/firmware.c 			ret = load_sbus_firmware(dd, &fw_sbus);
dd               2187 drivers/infiniband/hw/hfi1/firmware.c 		} while (retry_firmware(dd, ret));
dd               2193 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
dd               2194 drivers/infiniband/hw/hfi1/firmware.c 		set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
dd               2195 drivers/infiniband/hw/hfi1/firmware.c 				     pcie_serdes_broadcast[dd->hfi1_id],
dd               2196 drivers/infiniband/hw/hfi1/firmware.c 				     pcie_serdes_addrs[dd->hfi1_id],
dd               2199 drivers/infiniband/hw/hfi1/firmware.c 			ret = load_pcie_serdes_firmware(dd, &fw_pcie);
dd               2200 drivers/infiniband/hw/hfi1/firmware.c 		} while (retry_firmware(dd, ret));
dd               2206 drivers/infiniband/hw/hfi1/firmware.c 	clear_sbus_fast_mode(dd);
dd               2214 drivers/infiniband/hw/hfi1/firmware.c void read_guid(struct hfi1_devdata *dd)
dd               2217 drivers/infiniband/hw/hfi1/firmware.c 	write_csr(dd, CCE_DC_CTRL, 0);
dd               2218 drivers/infiniband/hw/hfi1/firmware.c 	(void)read_csr(dd, CCE_DC_CTRL);
dd               2220 drivers/infiniband/hw/hfi1/firmware.c 	dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
dd               2221 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "GUID %llx",
dd               2222 drivers/infiniband/hw/hfi1/firmware.c 		    (unsigned long long)dd->base_guid);
dd               2226 drivers/infiniband/hw/hfi1/firmware.c static void dump_fw_version(struct hfi1_devdata *dd)
dd               2236 drivers/infiniband/hw/hfi1/firmware.c 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
dd               2238 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
dd               2243 drivers/infiniband/hw/hfi1/firmware.c 	set_sbus_fast_mode(dd);
dd               2246 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
dd               2247 drivers/infiniband/hw/hfi1/firmware.c 	sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
dd               2250 drivers/infiniband/hw/hfi1/firmware.c 	sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
dd               2251 drivers/infiniband/hw/hfi1/firmware.c 	dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
dd               2257 drivers/infiniband/hw/hfi1/firmware.c 		rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
dd               2258 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
dd               2261 drivers/infiniband/hw/hfi1/firmware.c 		pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
dd               2267 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
dd               2270 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
dd               2272 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_info(dd,
dd               2282 drivers/infiniband/hw/hfi1/firmware.c 		rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
dd               2283 drivers/infiniband/hw/hfi1/firmware.c 		sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
dd               2286 drivers/infiniband/hw/hfi1/firmware.c 		fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
dd               2292 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
dd               2295 drivers/infiniband/hw/hfi1/firmware.c 		dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
dd               2297 drivers/infiniband/hw/hfi1/firmware.c 			dd_dev_info(dd,
dd               2303 drivers/infiniband/hw/hfi1/firmware.c 	clear_sbus_fast_mode(dd);
dd               2304 drivers/infiniband/hw/hfi1/firmware.c 	release_chip_resource(dd, CR_SBUS);
dd                217 drivers/infiniband/hw/hfi1/hfi.h 	struct hfi1_devdata *dd;
dd                772 drivers/infiniband/hw/hfi1/hfi.h 	struct hfi1_devdata *dd;
dd               1353 drivers/infiniband/hw/hfi1/hfi.h 	void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
dd               1355 drivers/infiniband/hw/hfi1/hfi.h 	int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx,
dd               1418 drivers/infiniband/hw/hfi1/hfi.h static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
dd               1420 drivers/infiniband/hw/hfi1/hfi.h 	return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
dd               1442 drivers/infiniband/hw/hfi1/hfi.h 	struct hfi1_devdata *dd;
dd               1469 drivers/infiniband/hw/hfi1/hfi.h 	return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
dd               1473 drivers/infiniband/hw/hfi1/hfi.h int hfi1_init(struct hfi1_devdata *dd, int reinit);
dd               1476 drivers/infiniband/hw/hfi1/hfi.h int hfi1_diag_add(struct hfi1_devdata *dd);
dd               1477 drivers/infiniband/hw/hfi1/hfi.h void hfi1_diag_remove(struct hfi1_devdata *dd);
dd               1478 drivers/infiniband/hw/hfi1/hfi.h void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
dd               1482 drivers/infiniband/hw/hfi1/hfi.h int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
dd               1484 drivers/infiniband/hw/hfi1/hfi.h int hfi1_create_kctxts(struct hfi1_devdata *dd);
dd               1489 drivers/infiniband/hw/hfi1/hfi.h 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
dd               1490 drivers/infiniband/hw/hfi1/hfi.h void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
dd               1493 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
dd               1495 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
dd               1499 drivers/infiniband/hw/hfi1/hfi.h void set_all_slowpath(struct hfi1_devdata *dd);
dd               1632 drivers/infiniband/hw/hfi1/hfi.h static inline void pause_for_credit_return(struct hfi1_devdata *dd)
dd               1635 drivers/infiniband/hw/hfi1/hfi.h 	u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
dd               1645 drivers/infiniband/hw/hfi1/hfi.h static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
dd               1654 drivers/infiniband/hw/hfi1/hfi.h 		seq = read_seqbegin(&dd->sc2vl_lock);
dd               1655 drivers/infiniband/hw/hfi1/hfi.h 		rval = *(((u8 *)dd->sc2vl) + sc5);
dd               1656 drivers/infiniband/hw/hfi1/hfi.h 	} while (read_seqretry(&dd->sc2vl_lock, seq));
dd               1712 drivers/infiniband/hw/hfi1/hfi.h 	struct hfi1_devdata *dd = ppd->dd;
dd               1715 drivers/infiniband/hw/hfi1/hfi.h 	if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
dd               1716 drivers/infiniband/hw/hfi1/hfi.h 		dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
dd               1717 drivers/infiniband/hw/hfi1/hfi.h 		dd->err_info_rcv_constraint.slid = slid;
dd               1718 drivers/infiniband/hw/hfi1/hfi.h 		dd->err_info_rcv_constraint.pkey = pkey;
dd               1789 drivers/infiniband/hw/hfi1/hfi.h u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
dd               1808 drivers/infiniband/hw/hfi1/hfi.h void hfi1_disable_after_error(struct hfi1_devdata *dd);
dd               1815 drivers/infiniband/hw/hfi1/hfi.h void set_up_vau(struct hfi1_devdata *dd, u8 vau);
dd               1816 drivers/infiniband/hw/hfi1/hfi.h void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
dd               1817 drivers/infiniband/hw/hfi1/hfi.h void reset_link_credits(struct hfi1_devdata *dd);
dd               1818 drivers/infiniband/hw/hfi1/hfi.h void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
dd               1824 drivers/infiniband/hw/hfi1/hfi.h 	return ppd->dd;
dd               1849 drivers/infiniband/hw/hfi1/hfi.h 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1852 drivers/infiniband/hw/hfi1/hfi.h 	WARN_ON(pidx >= dd->num_pports);
dd               1853 drivers/infiniband/hw/hfi1/hfi.h 	return &dd->pport[pidx].ibport_data;
dd               1968 drivers/infiniband/hw/hfi1/hfi.h int hfi1_init_dd(struct hfi1_devdata *dd);
dd               1969 drivers/infiniband/hw/hfi1/hfi.h void hfi1_free_devdata(struct hfi1_devdata *dd);
dd               2016 drivers/infiniband/hw/hfi1/hfi.h bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
dd               2044 drivers/infiniband/hw/hfi1/hfi.h int hfi1_device_create(struct hfi1_devdata *dd);
dd               2045 drivers/infiniband/hw/hfi1/hfi.h void hfi1_device_remove(struct hfi1_devdata *dd);
dd               2049 drivers/infiniband/hw/hfi1/hfi.h int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
dd               2050 drivers/infiniband/hw/hfi1/hfi.h void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
dd               2054 drivers/infiniband/hw/hfi1/hfi.h int hfi1_pcie_init(struct hfi1_devdata *dd);
dd               2056 drivers/infiniband/hw/hfi1/hfi.h int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
dd               2058 drivers/infiniband/hw/hfi1/hfi.h int pcie_speeds(struct hfi1_devdata *dd);
dd               2059 drivers/infiniband/hw/hfi1/hfi.h int restore_pci_variables(struct hfi1_devdata *dd);
dd               2060 drivers/infiniband/hw/hfi1/hfi.h int save_pci_variables(struct hfi1_devdata *dd);
dd               2061 drivers/infiniband/hw/hfi1/hfi.h int do_pcie_gen3_transition(struct hfi1_devdata *dd);
dd               2062 drivers/infiniband/hw/hfi1/hfi.h void tune_pcie_caps(struct hfi1_devdata *dd);
dd               2063 drivers/infiniband/hw/hfi1/hfi.h int parse_platform_config(struct hfi1_devdata *dd);
dd               2064 drivers/infiniband/hw/hfi1/hfi.h int get_platform_config_field(struct hfi1_devdata *dd,
dd               2122 drivers/infiniband/hw/hfi1/hfi.h static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
dd               2161 drivers/infiniband/hw/hfi1/hfi.h 	if (!is_ax(dd))
dd               2167 drivers/infiniband/hw/hfi1/hfi.h static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dd               2196 drivers/infiniband/hw/hfi1/hfi.h 	if (!is_ax(dd))
dd               2203 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_emerg(dd, fmt, ...) \
dd               2204 drivers/infiniband/hw/hfi1/hfi.h 	dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2205 drivers/infiniband/hw/hfi1/hfi.h 		  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               2207 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_err(dd, fmt, ...) \
dd               2208 drivers/infiniband/hw/hfi1/hfi.h 	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2209 drivers/infiniband/hw/hfi1/hfi.h 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               2211 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_err_ratelimited(dd, fmt, ...) \
dd               2212 drivers/infiniband/hw/hfi1/hfi.h 	dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2213 drivers/infiniband/hw/hfi1/hfi.h 			    rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
dd               2216 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_warn(dd, fmt, ...) \
dd               2217 drivers/infiniband/hw/hfi1/hfi.h 	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2218 drivers/infiniband/hw/hfi1/hfi.h 		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               2220 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_warn_ratelimited(dd, fmt, ...) \
dd               2221 drivers/infiniband/hw/hfi1/hfi.h 	dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2222 drivers/infiniband/hw/hfi1/hfi.h 			     rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
dd               2225 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_info(dd, fmt, ...) \
dd               2226 drivers/infiniband/hw/hfi1/hfi.h 	dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2227 drivers/infiniband/hw/hfi1/hfi.h 		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               2229 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_info_ratelimited(dd, fmt, ...) \
dd               2230 drivers/infiniband/hw/hfi1/hfi.h 	dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2231 drivers/infiniband/hw/hfi1/hfi.h 			     rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
dd               2234 drivers/infiniband/hw/hfi1/hfi.h #define dd_dev_dbg(dd, fmt, ...) \
dd               2235 drivers/infiniband/hw/hfi1/hfi.h 	dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
dd               2236 drivers/infiniband/hw/hfi1/hfi.h 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               2238 drivers/infiniband/hw/hfi1/hfi.h #define hfi1_dev_porterr(dd, port, fmt, ...) \
dd               2239 drivers/infiniband/hw/hfi1/hfi.h 	dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
dd               2240 drivers/infiniband/hw/hfi1/hfi.h 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
dd               2261 drivers/infiniband/hw/hfi1/hfi.h static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
dd               2266 drivers/infiniband/hw/hfi1/hfi.h 	dd->z_int_counter = get_all_cpu_total(dd->int_counter);
dd               2267 drivers/infiniband/hw/hfi1/hfi.h 	dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
dd               2268 drivers/infiniband/hw/hfi1/hfi.h 	dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
dd               2270 drivers/infiniband/hw/hfi1/hfi.h 	ppd = (struct hfi1_pportdata *)(dd + 1);
dd               2271 drivers/infiniband/hw/hfi1/hfi.h 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               2280 drivers/infiniband/hw/hfi1/hfi.h static inline void setextled(struct hfi1_devdata *dd, u32 on)
dd               2283 drivers/infiniband/hw/hfi1/hfi.h 		write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
dd               2285 drivers/infiniband/hw/hfi1/hfi.h 		write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
dd               2295 drivers/infiniband/hw/hfi1/hfi.h static inline u32 qsfp_resource(struct hfi1_devdata *dd)
dd               2297 drivers/infiniband/hw/hfi1/hfi.h 	return i2c_target(dd->hfi1_id);
dd               2301 drivers/infiniband/hw/hfi1/hfi.h static inline bool is_integrated(struct hfi1_devdata *dd)
dd               2303 drivers/infiniband/hw/hfi1/hfi.h 	return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
dd               2306 drivers/infiniband/hw/hfi1/hfi.h int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
dd               2308 drivers/infiniband/hw/hfi1/hfi.h #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
dd               2309 drivers/infiniband/hw/hfi1/hfi.h #define DD_DEV_ASSIGN(dd)      __assign_str(dev, dev_name(&(dd)->pcidev->dev))
dd                129 drivers/infiniband/hw/hfi1/init.c static int hfi1_create_kctxt(struct hfi1_devdata *dd,
dd                138 drivers/infiniband/hw/hfi1/init.c 	ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
dd                140 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Kernel receive context allocation failed\n");
dd                159 drivers/infiniband/hw/hfi1/init.c 	rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
dd                161 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Kernel send context allocation failed\n");
dd                172 drivers/infiniband/hw/hfi1/init.c int hfi1_create_kctxts(struct hfi1_devdata *dd)
dd                177 drivers/infiniband/hw/hfi1/init.c 	dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
dd                178 drivers/infiniband/hw/hfi1/init.c 			       GFP_KERNEL, dd->node);
dd                179 drivers/infiniband/hw/hfi1/init.c 	if (!dd->rcd)
dd                182 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
dd                183 drivers/infiniband/hw/hfi1/init.c 		ret = hfi1_create_kctxt(dd, dd->pport);
dd                190 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
dd                191 drivers/infiniband/hw/hfi1/init.c 		hfi1_free_ctxt(dd->rcd[i]);
dd                194 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->rcd);
dd                195 drivers/infiniband/hw/hfi1/init.c 	dd->rcd = NULL;
dd                218 drivers/infiniband/hw/hfi1/init.c 	spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
dd                219 drivers/infiniband/hw/hfi1/init.c 	rcd->dd->rcd[rcd->ctxt] = NULL;
dd                220 drivers/infiniband/hw/hfi1/init.c 	spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
dd                222 drivers/infiniband/hw/hfi1/init.c 	hfi1_free_ctxtdata(rcd->dd, rcd);
dd                265 drivers/infiniband/hw/hfi1/init.c static int allocate_rcd_index(struct hfi1_devdata *dd,
dd                271 drivers/infiniband/hw/hfi1/init.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                272 drivers/infiniband/hw/hfi1/init.c 	for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
dd                273 drivers/infiniband/hw/hfi1/init.c 		if (!dd->rcd[ctxt])
dd                276 drivers/infiniband/hw/hfi1/init.c 	if (ctxt < dd->num_rcv_contexts) {
dd                278 drivers/infiniband/hw/hfi1/init.c 		dd->rcd[ctxt] = rcd;
dd                281 drivers/infiniband/hw/hfi1/init.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                283 drivers/infiniband/hw/hfi1/init.c 	if (ctxt >= dd->num_rcv_contexts)
dd                303 drivers/infiniband/hw/hfi1/init.c struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
dd                306 drivers/infiniband/hw/hfi1/init.c 	if (ctxt < dd->num_rcv_contexts)
dd                307 drivers/infiniband/hw/hfi1/init.c 		return hfi1_rcd_get_by_index(dd, ctxt);
dd                324 drivers/infiniband/hw/hfi1/init.c struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
dd                329 drivers/infiniband/hw/hfi1/init.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                330 drivers/infiniband/hw/hfi1/init.c 	if (dd->rcd[ctxt]) {
dd                331 drivers/infiniband/hw/hfi1/init.c 		rcd = dd->rcd[ctxt];
dd                335 drivers/infiniband/hw/hfi1/init.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                347 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd = ppd->dd;
dd                352 drivers/infiniband/hw/hfi1/init.c 	if (dd->rcv_entries.nctxt_extra >
dd                353 drivers/infiniband/hw/hfi1/init.c 	    dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
dd                354 drivers/infiniband/hw/hfi1/init.c 		kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
dd                355 drivers/infiniband/hw/hfi1/init.c 			 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
dd                362 drivers/infiniband/hw/hfi1/init.c 		ret = allocate_rcd_index(dd, rcd, &ctxt);
dd                372 drivers/infiniband/hw/hfi1/init.c 		rcd->dd = dd;
dd                374 drivers/infiniband/hw/hfi1/init.c 		rcd->rcv_array_groups = dd->rcv_entries.ngroups;
dd                391 drivers/infiniband/hw/hfi1/init.c 		if (ctxt < dd->first_dyn_alloc_ctxt) {
dd                393 drivers/infiniband/hw/hfi1/init.c 				base = ctxt * (dd->rcv_entries.ngroups + 1);
dd                397 drivers/infiniband/hw/hfi1/init.c 					(ctxt * dd->rcv_entries.ngroups);
dd                400 drivers/infiniband/hw/hfi1/init.c 			u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
dd                402 drivers/infiniband/hw/hfi1/init.c 			base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
dd                404 drivers/infiniband/hw/hfi1/init.c 			if (ct < dd->rcv_entries.nctxt_extra) {
dd                405 drivers/infiniband/hw/hfi1/init.c 				base += ct * (dd->rcv_entries.ngroups + 1);
dd                408 drivers/infiniband/hw/hfi1/init.c 				base += dd->rcv_entries.nctxt_extra +
dd                409 drivers/infiniband/hw/hfi1/init.c 					(ct * dd->rcv_entries.ngroups);
dd                412 drivers/infiniband/hw/hfi1/init.c 		rcd->eager_base = base * dd->rcv_entries.group_size;
dd                430 drivers/infiniband/hw/hfi1/init.c 			dd->rcv_entries.group_size;
dd                433 drivers/infiniband/hw/hfi1/init.c 						dd->rcv_entries.group_size);
dd                435 drivers/infiniband/hw/hfi1/init.c 			dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
dd                478 drivers/infiniband/hw/hfi1/init.c 		if (ctxt < dd->first_dyn_alloc_ctxt) {
dd                539 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd = ppd->dd;
dd                586 drivers/infiniband/hw/hfi1/init.c 	write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
dd                644 drivers/infiniband/hw/hfi1/init.c 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
dd                650 drivers/infiniband/hw/hfi1/init.c 	ppd->dd = dd;
dd                669 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
dd                716 drivers/infiniband/hw/hfi1/init.c 	dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
dd                723 drivers/infiniband/hw/hfi1/init.c static int loadtime_init(struct hfi1_devdata *dd)
dd                736 drivers/infiniband/hw/hfi1/init.c static int init_after_reset(struct hfi1_devdata *dd)
dd                745 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; i < dd->num_rcv_contexts; i++) {
dd                746 drivers/infiniband/hw/hfi1/init.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                747 drivers/infiniband/hw/hfi1/init.c 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
dd                752 drivers/infiniband/hw/hfi1/init.c 	pio_send_control(dd, PSC_GLOBAL_DISABLE);
dd                753 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; i < dd->num_send_contexts; i++)
dd                754 drivers/infiniband/hw/hfi1/init.c 		sc_disable(dd->send_contexts[i].sc);
dd                759 drivers/infiniband/hw/hfi1/init.c static void enable_chip(struct hfi1_devdata *dd)
dd                766 drivers/infiniband/hw/hfi1/init.c 	pio_send_control(dd, PSC_GLOBAL_ENABLE);
dd                772 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
dd                773 drivers/infiniband/hw/hfi1/init.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                787 drivers/infiniband/hw/hfi1/init.c 		hfi1_rcvctrl(dd, rcvmask, rcd);
dd                797 drivers/infiniband/hw/hfi1/init.c static int create_workqueues(struct hfi1_devdata *dd)
dd                802 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                803 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd                811 drivers/infiniband/hw/hfi1/init.c 				    dd->unit, pidx);
dd                825 drivers/infiniband/hw/hfi1/init.c 				    dd->unit, pidx);
dd                833 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                834 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd                853 drivers/infiniband/hw/hfi1/init.c static void enable_general_intr(struct hfi1_devdata *dd)
dd                855 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
dd                856 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
dd                857 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
dd                858 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
dd                859 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
dd                860 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
dd                861 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
dd                879 drivers/infiniband/hw/hfi1/init.c int hfi1_init(struct hfi1_devdata *dd, int reinit)
dd                888 drivers/infiniband/hw/hfi1/init.c 	dd->process_pio_send = hfi1_verbs_send_pio;
dd                889 drivers/infiniband/hw/hfi1/init.c 	dd->process_dma_send = hfi1_verbs_send_dma;
dd                890 drivers/infiniband/hw/hfi1/init.c 	dd->pio_inline_send = pio_copy;
dd                891 drivers/infiniband/hw/hfi1/init.c 	dd->process_vnic_dma_send = hfi1_vnic_send_dma;
dd                893 drivers/infiniband/hw/hfi1/init.c 	if (is_ax(dd)) {
dd                894 drivers/infiniband/hw/hfi1/init.c 		atomic_set(&dd->drop_packet, DROP_PACKET_ON);
dd                895 drivers/infiniband/hw/hfi1/init.c 		dd->do_drop = 1;
dd                897 drivers/infiniband/hw/hfi1/init.c 		atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
dd                898 drivers/infiniband/hw/hfi1/init.c 		dd->do_drop = 0;
dd                902 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                903 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd                908 drivers/infiniband/hw/hfi1/init.c 		ret = init_after_reset(dd);
dd                910 drivers/infiniband/hw/hfi1/init.c 		ret = loadtime_init(dd);
dd                915 drivers/infiniband/hw/hfi1/init.c 	dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
dd                917 drivers/infiniband/hw/hfi1/init.c 							 &dd->rcvhdrtail_dummy_dma,
dd                920 drivers/infiniband/hw/hfi1/init.c 	if (!dd->rcvhdrtail_dummy_kvaddr) {
dd                921 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "cannot allocate dummy tail memory\n");
dd                927 drivers/infiniband/hw/hfi1/init.c 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
dd                934 drivers/infiniband/hw/hfi1/init.c 		rcd = hfi1_rcd_get_by_index(dd, i);
dd                940 drivers/infiniband/hw/hfi1/init.c 		lastfail = hfi1_create_rcvhdrq(dd, rcd);
dd                946 drivers/infiniband/hw/hfi1/init.c 			dd_dev_err(dd,
dd                955 drivers/infiniband/hw/hfi1/init.c 	len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
dd                956 drivers/infiniband/hw/hfi1/init.c 			 sizeof(*dd->events));
dd                957 drivers/infiniband/hw/hfi1/init.c 	dd->events = vmalloc_user(len);
dd                958 drivers/infiniband/hw/hfi1/init.c 	if (!dd->events)
dd                959 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Failed to allocate user events page\n");
dd                964 drivers/infiniband/hw/hfi1/init.c 	dd->status = vmalloc_user(PAGE_SIZE);
dd                965 drivers/infiniband/hw/hfi1/init.c 	if (!dd->status)
dd                966 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Failed to allocate dev status page\n");
dd                967 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                968 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd                969 drivers/infiniband/hw/hfi1/init.c 		if (dd->status)
dd                971 drivers/infiniband/hw/hfi1/init.c 			ppd->statusp = &dd->status->port;
dd                977 drivers/infiniband/hw/hfi1/init.c 	enable_chip(dd);
dd                984 drivers/infiniband/hw/hfi1/init.c 	if (dd->status)
dd                985 drivers/infiniband/hw/hfi1/init.c 		dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
dd                989 drivers/infiniband/hw/hfi1/init.c 		enable_general_intr(dd);
dd                990 drivers/infiniband/hw/hfi1/init.c 		init_qsfp_int(dd);
dd                993 drivers/infiniband/hw/hfi1/init.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                994 drivers/infiniband/hw/hfi1/init.c 			ppd = dd->pport + pidx;
dd               1002 drivers/infiniband/hw/hfi1/init.c 				dd_dev_info(dd,
dd               1031 drivers/infiniband/hw/hfi1/init.c static void stop_timers(struct hfi1_devdata *dd)
dd               1036 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1037 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd               1054 drivers/infiniband/hw/hfi1/init.c static void shutdown_device(struct hfi1_devdata *dd)
dd               1061 drivers/infiniband/hw/hfi1/init.c 	if (dd->flags & HFI1_SHUTDOWN)
dd               1063 drivers/infiniband/hw/hfi1/init.c 	dd->flags |= HFI1_SHUTDOWN;
dd               1065 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1066 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd               1073 drivers/infiniband/hw/hfi1/init.c 	dd->flags &= ~HFI1_INITTED;
dd               1076 drivers/infiniband/hw/hfi1/init.c 	set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
dd               1077 drivers/infiniband/hw/hfi1/init.c 	msix_clean_up_interrupts(dd);
dd               1079 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1080 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd               1081 drivers/infiniband/hw/hfi1/init.c 		for (i = 0; i < dd->num_rcv_contexts; i++) {
dd               1082 drivers/infiniband/hw/hfi1/init.c 			rcd = hfi1_rcd_get_by_index(dd, i);
dd               1083 drivers/infiniband/hw/hfi1/init.c 			hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
dd               1094 drivers/infiniband/hw/hfi1/init.c 		for (i = 0; i < dd->num_send_contexts; i++)
dd               1095 drivers/infiniband/hw/hfi1/init.c 			sc_flush(dd->send_contexts[i].sc);
dd               1104 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1105 drivers/infiniband/hw/hfi1/init.c 		ppd = dd->pport + pidx;
dd               1108 drivers/infiniband/hw/hfi1/init.c 		for (i = 0; i < dd->num_send_contexts; i++)
dd               1109 drivers/infiniband/hw/hfi1/init.c 			sc_disable(dd->send_contexts[i].sc);
dd               1111 drivers/infiniband/hw/hfi1/init.c 		pio_send_control(dd, PSC_GLOBAL_DISABLE);
dd               1130 drivers/infiniband/hw/hfi1/init.c 	sdma_exit(dd);
dd               1141 drivers/infiniband/hw/hfi1/init.c void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
dd               1149 drivers/infiniband/hw/hfi1/init.c 		dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
dd               1153 drivers/infiniband/hw/hfi1/init.c 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
dd               1166 drivers/infiniband/hw/hfi1/init.c 			dma_free_coherent(&dd->pcidev->dev,
dd               1194 drivers/infiniband/hw/hfi1/init.c static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
dd               1199 drivers/infiniband/hw/hfi1/init.c 	if (!dd->asic_data)
dd               1201 drivers/infiniband/hw/hfi1/init.c 	dd->asic_data->dds[dd->hfi1_id] = NULL;
dd               1202 drivers/infiniband/hw/hfi1/init.c 	other = dd->hfi1_id ? 0 : 1;
dd               1203 drivers/infiniband/hw/hfi1/init.c 	ad = dd->asic_data;
dd               1204 drivers/infiniband/hw/hfi1/init.c 	dd->asic_data = NULL;
dd               1209 drivers/infiniband/hw/hfi1/init.c static void finalize_asic_data(struct hfi1_devdata *dd,
dd               1212 drivers/infiniband/hw/hfi1/init.c 	clean_up_i2c(dd, ad);
dd               1223 drivers/infiniband/hw/hfi1/init.c static void hfi1_clean_devdata(struct hfi1_devdata *dd)
dd               1229 drivers/infiniband/hw/hfi1/init.c 	__xa_erase(&hfi1_dev_table, dd->unit);
dd               1230 drivers/infiniband/hw/hfi1/init.c 	ad = release_asic_data(dd);
dd               1233 drivers/infiniband/hw/hfi1/init.c 	finalize_asic_data(dd, ad);
dd               1234 drivers/infiniband/hw/hfi1/init.c 	free_platform_config(dd);
dd               1236 drivers/infiniband/hw/hfi1/init.c 	free_percpu(dd->int_counter);
dd               1237 drivers/infiniband/hw/hfi1/init.c 	free_percpu(dd->rcv_limit);
dd               1238 drivers/infiniband/hw/hfi1/init.c 	free_percpu(dd->send_schedule);
dd               1239 drivers/infiniband/hw/hfi1/init.c 	free_percpu(dd->tx_opstats);
dd               1240 drivers/infiniband/hw/hfi1/init.c 	dd->int_counter   = NULL;
dd               1241 drivers/infiniband/hw/hfi1/init.c 	dd->rcv_limit     = NULL;
dd               1242 drivers/infiniband/hw/hfi1/init.c 	dd->send_schedule = NULL;
dd               1243 drivers/infiniband/hw/hfi1/init.c 	dd->tx_opstats    = NULL;
dd               1244 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->comp_vect);
dd               1245 drivers/infiniband/hw/hfi1/init.c 	dd->comp_vect = NULL;
dd               1246 drivers/infiniband/hw/hfi1/init.c 	sdma_clean(dd, dd->num_sdma);
dd               1247 drivers/infiniband/hw/hfi1/init.c 	rvt_dealloc_device(&dd->verbs_dev.rdi);
dd               1252 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd =
dd               1255 drivers/infiniband/hw/hfi1/init.c 	hfi1_clean_devdata(dd);
dd               1262 drivers/infiniband/hw/hfi1/init.c void hfi1_free_devdata(struct hfi1_devdata *dd)
dd               1264 drivers/infiniband/hw/hfi1/init.c 	kobject_put(&dd->kobj);
dd               1279 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd;
dd               1285 drivers/infiniband/hw/hfi1/init.c 	dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
dd               1287 drivers/infiniband/hw/hfi1/init.c 	if (!dd)
dd               1289 drivers/infiniband/hw/hfi1/init.c 	dd->num_pports = nports;
dd               1290 drivers/infiniband/hw/hfi1/init.c 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd               1291 drivers/infiniband/hw/hfi1/init.c 	dd->pcidev = pdev;
dd               1292 drivers/infiniband/hw/hfi1/init.c 	pci_set_drvdata(pdev, dd);
dd               1293 drivers/infiniband/hw/hfi1/init.c 	dd->node = NUMA_NO_NODE;
dd               1295 drivers/infiniband/hw/hfi1/init.c 	ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
dd               1302 drivers/infiniband/hw/hfi1/init.c 	rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
dd               1308 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->sc_lock);
dd               1309 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->sendctrl_lock);
dd               1310 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->rcvctrl_lock);
dd               1311 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->uctxt_lock);
dd               1312 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->hfi1_diag_trans_lock);
dd               1313 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->sc_init_lock);
dd               1314 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->dc8051_memlock);
dd               1315 drivers/infiniband/hw/hfi1/init.c 	seqlock_init(&dd->sc2vl_lock);
dd               1316 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->sde_map_lock);
dd               1317 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->pio_map_lock);
dd               1318 drivers/infiniband/hw/hfi1/init.c 	mutex_init(&dd->dc8051_lock);
dd               1319 drivers/infiniband/hw/hfi1/init.c 	init_waitqueue_head(&dd->event_queue);
dd               1320 drivers/infiniband/hw/hfi1/init.c 	spin_lock_init(&dd->irq_src_lock);
dd               1322 drivers/infiniband/hw/hfi1/init.c 	dd->int_counter = alloc_percpu(u64);
dd               1323 drivers/infiniband/hw/hfi1/init.c 	if (!dd->int_counter) {
dd               1328 drivers/infiniband/hw/hfi1/init.c 	dd->rcv_limit = alloc_percpu(u64);
dd               1329 drivers/infiniband/hw/hfi1/init.c 	if (!dd->rcv_limit) {
dd               1334 drivers/infiniband/hw/hfi1/init.c 	dd->send_schedule = alloc_percpu(u64);
dd               1335 drivers/infiniband/hw/hfi1/init.c 	if (!dd->send_schedule) {
dd               1340 drivers/infiniband/hw/hfi1/init.c 	dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
dd               1341 drivers/infiniband/hw/hfi1/init.c 	if (!dd->tx_opstats) {
dd               1346 drivers/infiniband/hw/hfi1/init.c 	dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
dd               1347 drivers/infiniband/hw/hfi1/init.c 	if (!dd->comp_vect) {
dd               1352 drivers/infiniband/hw/hfi1/init.c 	kobject_init(&dd->kobj, &hfi1_devdata_type);
dd               1353 drivers/infiniband/hw/hfi1/init.c 	return dd;
dd               1356 drivers/infiniband/hw/hfi1/init.c 	hfi1_clean_devdata(dd);
dd               1365 drivers/infiniband/hw/hfi1/init.c void hfi1_disable_after_error(struct hfi1_devdata *dd)
dd               1367 drivers/infiniband/hw/hfi1/init.c 	if (dd->flags & HFI1_INITTED) {
dd               1370 drivers/infiniband/hw/hfi1/init.c 		dd->flags &= ~HFI1_INITTED;
dd               1371 drivers/infiniband/hw/hfi1/init.c 		if (dd->pport)
dd               1372 drivers/infiniband/hw/hfi1/init.c 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1375 drivers/infiniband/hw/hfi1/init.c 				ppd = dd->pport + pidx;
dd               1376 drivers/infiniband/hw/hfi1/init.c 				if (dd->flags & HFI1_PRESENT)
dd               1389 drivers/infiniband/hw/hfi1/init.c 	if (dd->status)
dd               1390 drivers/infiniband/hw/hfi1/init.c 		dd->status->dev |= HFI1_STATUS_HWERROR;
dd               1531 drivers/infiniband/hw/hfi1/init.c static void cleanup_device_data(struct hfi1_devdata *dd)
dd               1537 drivers/infiniband/hw/hfi1/init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1538 drivers/infiniband/hw/hfi1/init.c 		struct hfi1_pportdata *ppd = &dd->pport[pidx];
dd               1557 drivers/infiniband/hw/hfi1/init.c 	free_credit_return(dd);
dd               1559 drivers/infiniband/hw/hfi1/init.c 	if (dd->rcvhdrtail_dummy_kvaddr) {
dd               1560 drivers/infiniband/hw/hfi1/init.c 		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
dd               1561 drivers/infiniband/hw/hfi1/init.c 				  (void *)dd->rcvhdrtail_dummy_kvaddr,
dd               1562 drivers/infiniband/hw/hfi1/init.c 				  dd->rcvhdrtail_dummy_dma);
dd               1563 drivers/infiniband/hw/hfi1/init.c 		dd->rcvhdrtail_dummy_kvaddr = NULL;
dd               1570 drivers/infiniband/hw/hfi1/init.c 	for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
dd               1571 drivers/infiniband/hw/hfi1/init.c 		struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
dd               1579 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->rcd);
dd               1580 drivers/infiniband/hw/hfi1/init.c 	dd->rcd = NULL;
dd               1582 drivers/infiniband/hw/hfi1/init.c 	free_pio_map(dd);
dd               1584 drivers/infiniband/hw/hfi1/init.c 	for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
dd               1585 drivers/infiniband/hw/hfi1/init.c 		sc_free(dd->send_contexts[ctxt].sc);
dd               1586 drivers/infiniband/hw/hfi1/init.c 	dd->num_send_contexts = 0;
dd               1587 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->send_contexts);
dd               1588 drivers/infiniband/hw/hfi1/init.c 	dd->send_contexts = NULL;
dd               1589 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->hw_to_sw);
dd               1590 drivers/infiniband/hw/hfi1/init.c 	dd->hw_to_sw = NULL;
dd               1591 drivers/infiniband/hw/hfi1/init.c 	kfree(dd->boardname);
dd               1592 drivers/infiniband/hw/hfi1/init.c 	vfree(dd->events);
dd               1593 drivers/infiniband/hw/hfi1/init.c 	vfree(dd->status);
dd               1600 drivers/infiniband/hw/hfi1/init.c static void postinit_cleanup(struct hfi1_devdata *dd)
dd               1602 drivers/infiniband/hw/hfi1/init.c 	hfi1_start_cleanup(dd);
dd               1603 drivers/infiniband/hw/hfi1/init.c 	hfi1_comp_vectors_clean_up(dd);
dd               1604 drivers/infiniband/hw/hfi1/init.c 	hfi1_dev_affinity_clean_up(dd);
dd               1606 drivers/infiniband/hw/hfi1/init.c 	hfi1_pcie_ddcleanup(dd);
dd               1607 drivers/infiniband/hw/hfi1/init.c 	hfi1_pcie_cleanup(dd->pcidev);
dd               1609 drivers/infiniband/hw/hfi1/init.c 	cleanup_device_data(dd);
dd               1611 drivers/infiniband/hw/hfi1/init.c 	hfi1_free_devdata(dd);
dd               1614 drivers/infiniband/hw/hfi1/init.c static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
dd               1617 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Receive header queue count too small\n");
dd               1622 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd,
dd               1629 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
dd               1640 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd;
dd               1656 drivers/infiniband/hw/hfi1/init.c 	dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
dd               1658 drivers/infiniband/hw/hfi1/init.c 	if (IS_ERR(dd)) {
dd               1659 drivers/infiniband/hw/hfi1/init.c 		ret = PTR_ERR(dd);
dd               1664 drivers/infiniband/hw/hfi1/init.c 	ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
dd               1670 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
dd               1693 drivers/infiniband/hw/hfi1/init.c 		dd_dev_info(dd, "Eager buffer size %u\n",
dd               1696 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
dd               1704 drivers/infiniband/hw/hfi1/init.c 	ret = hfi1_pcie_init(dd);
dd               1712 drivers/infiniband/hw/hfi1/init.c 	ret = hfi1_init_dd(dd);
dd               1716 drivers/infiniband/hw/hfi1/init.c 	ret = create_workqueues(dd);
dd               1721 drivers/infiniband/hw/hfi1/init.c 	initfail = hfi1_init(dd, 0);
dd               1724 drivers/infiniband/hw/hfi1/init.c 	hfi1_vnic_setup(dd);
dd               1726 drivers/infiniband/hw/hfi1/init.c 	ret = hfi1_register_ib_device(dd);
dd               1735 drivers/infiniband/hw/hfi1/init.c 		dd->flags |= HFI1_INITTED;
dd               1737 drivers/infiniband/hw/hfi1/init.c 		hfi1_dbg_ibdev_init(&dd->verbs_dev);
dd               1740 drivers/infiniband/hw/hfi1/init.c 	j = hfi1_device_create(dd);
dd               1742 drivers/infiniband/hw/hfi1/init.c 		dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
dd               1745 drivers/infiniband/hw/hfi1/init.c 		msix_clean_up_interrupts(dd);
dd               1746 drivers/infiniband/hw/hfi1/init.c 		stop_timers(dd);
dd               1748 drivers/infiniband/hw/hfi1/init.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1749 drivers/infiniband/hw/hfi1/init.c 			hfi1_quiet_serdes(dd->pport + pidx);
dd               1750 drivers/infiniband/hw/hfi1/init.c 			ppd = dd->pport + pidx;
dd               1761 drivers/infiniband/hw/hfi1/init.c 			hfi1_device_remove(dd);
dd               1763 drivers/infiniband/hw/hfi1/init.c 			hfi1_unregister_ib_device(dd);
dd               1764 drivers/infiniband/hw/hfi1/init.c 		hfi1_vnic_cleanup(dd);
dd               1765 drivers/infiniband/hw/hfi1/init.c 		postinit_cleanup(dd);
dd               1771 drivers/infiniband/hw/hfi1/init.c 	sdma_start(dd);
dd               1781 drivers/infiniband/hw/hfi1/init.c static void wait_for_clients(struct hfi1_devdata *dd)
dd               1787 drivers/infiniband/hw/hfi1/init.c 	if (atomic_dec_and_test(&dd->user_refcount))
dd               1788 drivers/infiniband/hw/hfi1/init.c 		complete(&dd->user_comp);
dd               1790 drivers/infiniband/hw/hfi1/init.c 	wait_for_completion(&dd->user_comp);
dd               1795 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd               1798 drivers/infiniband/hw/hfi1/init.c 	hfi1_dbg_ibdev_exit(&dd->verbs_dev);
dd               1801 drivers/infiniband/hw/hfi1/init.c 	hfi1_device_remove(dd);
dd               1804 drivers/infiniband/hw/hfi1/init.c 	wait_for_clients(dd);
dd               1807 drivers/infiniband/hw/hfi1/init.c 	hfi1_unregister_ib_device(dd);
dd               1810 drivers/infiniband/hw/hfi1/init.c 	hfi1_vnic_cleanup(dd);
dd               1816 drivers/infiniband/hw/hfi1/init.c 	shutdown_device(dd);
dd               1818 drivers/infiniband/hw/hfi1/init.c 	stop_timers(dd);
dd               1823 drivers/infiniband/hw/hfi1/init.c 	postinit_cleanup(dd);
dd               1828 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd               1830 drivers/infiniband/hw/hfi1/init.c 	shutdown_device(dd);
dd               1842 drivers/infiniband/hw/hfi1/init.c int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
dd               1852 drivers/infiniband/hw/hfi1/init.c 		if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
dd               1856 drivers/infiniband/hw/hfi1/init.c 		rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
dd               1861 drivers/infiniband/hw/hfi1/init.c 			dd_dev_err(dd,
dd               1869 drivers/infiniband/hw/hfi1/init.c 			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
dd               1886 drivers/infiniband/hw/hfi1/init.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
dd               1890 drivers/infiniband/hw/hfi1/init.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
dd               1893 drivers/infiniband/hw/hfi1/init.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
dd               1899 drivers/infiniband/hw/hfi1/init.c 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
dd               1900 drivers/infiniband/hw/hfi1/init.c 			dd->rcvhdrtail_dummy_dma);
dd               1905 drivers/infiniband/hw/hfi1/init.c 	dd_dev_err(dd,
dd               1908 drivers/infiniband/hw/hfi1/init.c 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
dd               1926 drivers/infiniband/hw/hfi1/init.c 	struct hfi1_devdata *dd = rcd->dd;
dd               1948 drivers/infiniband/hw/hfi1/init.c 	if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
dd               1949 drivers/infiniband/hw/hfi1/init.c 		rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
dd               1968 drivers/infiniband/hw/hfi1/init.c 			dma_alloc_coherent(&dd->pcidev->dev,
dd               1994 drivers/infiniband/hw/hfi1/init.c 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
dd               2058 drivers/infiniband/hw/hfi1/init.c 	max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
dd               2059 drivers/infiniband/hw/hfi1/init.c 	egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
dd               2078 drivers/infiniband/hw/hfi1/init.c 		hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
dd               2089 drivers/infiniband/hw/hfi1/init.c 		dma_free_coherent(&dd->pcidev->dev,
dd                 61 drivers/infiniband/hw/hfi1/intr.c 	struct hfi1_devdata *dd = ppd->dd;
dd                 66 drivers/infiniband/hw/hfi1/intr.c 		read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
dd                 81 drivers/infiniband/hw/hfi1/intr.c 	struct hfi1_devdata *dd = ppd->dd;
dd                 85 drivers/infiniband/hw/hfi1/intr.c 		dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
dd                 89 drivers/infiniband/hw/hfi1/intr.c 	hfi1_event_pkey_change(ppd->dd, ppd->port);
dd                126 drivers/infiniband/hw/hfi1/intr.c 	struct hfi1_devdata *dd = ppd->dd;
dd                133 drivers/infiniband/hw/hfi1/intr.c 	if (!(dd->flags & HFI1_INITTED))
dd                135 drivers/infiniband/hw/hfi1/intr.c 	event.device = &dd->verbs_dev.rdi.ibdev;
dd                152 drivers/infiniband/hw/hfi1/intr.c void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
dd                154 drivers/infiniband/hw/hfi1/intr.c 	struct hfi1_pportdata *ppd = &dd->pport[0];
dd                175 drivers/infiniband/hw/hfi1/intr.c 		if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
dd                176 drivers/infiniband/hw/hfi1/intr.c 			set_up_vau(dd, dd->vau);
dd                177 drivers/infiniband/hw/hfi1/intr.c 			set_up_vl15(dd, dd->vl15_init);
dd                178 drivers/infiniband/hw/hfi1/intr.c 			assign_remote_cm_au_table(dd, dd->vcu);
dd                182 drivers/infiniband/hw/hfi1/intr.c 			read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
dd                184 drivers/infiniband/hw/hfi1/intr.c 			read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
dd                187 drivers/infiniband/hw/hfi1/intr.c 			read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
dd                190 drivers/infiniband/hw/hfi1/intr.c 			read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
dd                192 drivers/infiniband/hw/hfi1/intr.c 		dd_dev_info(dd,
dd                223 drivers/infiniband/hw/hfi1/intr.c 		reset_link_credits(dd);
dd                246 drivers/infiniband/hw/hfi1/intr.c 	struct hfi1_devdata *dd = rcd->dd;
dd                249 drivers/infiniband/hw/hfi1/intr.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                255 drivers/infiniband/hw/hfi1/intr.c 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd);
dd                262 drivers/infiniband/hw/hfi1/intr.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                111 drivers/infiniband/hw/hfi1/mad.c void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
dd                116 drivers/infiniband/hw/hfi1/mad.c 	event.device = &dd->verbs_dev.rdi.ibdev;
dd                299 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ppd(ppd);
dd                303 drivers/infiniband/hw/hfi1/mad.c 	attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
dd                522 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
dd                523 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
dd                608 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd                614 drivers/infiniband/hw/hfi1/mad.c 	if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
dd                629 drivers/infiniband/hw/hfi1/mad.c 	ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
dd                630 drivers/infiniband/hw/hfi1/mad.c 	ni->device_id = cpu_to_be16(dd->pcidev->device);
dd                631 drivers/infiniband/hw/hfi1/mad.c 	ni->revision = cpu_to_be32(dd->minrev);
dd                633 drivers/infiniband/hw/hfi1/mad.c 	ni->vendor_id[0] = dd->oui1;
dd                634 drivers/infiniband/hw/hfi1/mad.c 	ni->vendor_id[1] = dd->oui2;
dd                635 drivers/infiniband/hw/hfi1/mad.c 	ni->vendor_id[2] = dd->oui3;
dd                647 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd                651 drivers/infiniband/hw/hfi1/mad.c 	if (smp->attr_mod || pidx >= dd->num_pports ||
dd                666 drivers/infiniband/hw/hfi1/mad.c 	nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
dd                667 drivers/infiniband/hw/hfi1/mad.c 	nip->device_id = cpu_to_be16(dd->pcidev->device);
dd                668 drivers/infiniband/hw/hfi1/mad.c 	nip->revision = cpu_to_be32(dd->minrev);
dd                670 drivers/infiniband/hw/hfi1/mad.c 	nip->vendor_id[0] = dd->oui1;
dd                671 drivers/infiniband/hw/hfi1/mad.c 	nip->vendor_id[1] = dd->oui2;
dd                672 drivers/infiniband/hw/hfi1/mad.c 	nip->vendor_id[2] = dd->oui3;
dd                786 drivers/infiniband/hw/hfi1/mad.c void read_ltp_rtt(struct hfi1_devdata *dd)
dd                790 drivers/infiniband/hw/hfi1/mad.c 	if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
dd                791 drivers/infiniband/hw/hfi1/mad.c 		dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
dd                801 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd;
dd                819 drivers/infiniband/hw/hfi1/mad.c 	dd = dd_from_ibdev(ibdev);
dd                821 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd                825 drivers/infiniband/hw/hfi1/mad.c 	    ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
dd                890 drivers/infiniband/hw/hfi1/mad.c 		mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
dd                897 drivers/infiniband/hw/hfi1/mad.c 	mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
dd                962 drivers/infiniband/hw/hfi1/mad.c 	pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
dd                976 drivers/infiniband/hw/hfi1/mad.c 	buffer_units  = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
dd                977 drivers/infiniband/hw/hfi1/mad.c 	buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
dd                980 drivers/infiniband/hw/hfi1/mad.c 	buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
dd               1012 drivers/infiniband/hw/hfi1/mad.c static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
dd               1014 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_pportdata *ppd = dd->pport + port - 1;
dd               1025 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1032 drivers/infiniband/hw/hfi1/mad.c 	unsigned npkeys = hfi1_get_npkeys(dd);
dd               1065 drivers/infiniband/hw/hfi1/mad.c 		get_pkeys(dd, port, q);
dd               1243 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = ppd->dd;
dd               1323 drivers/infiniband/hw/hfi1/mad.c 			send_idle_sma(dd, SMA_IDLE_ARM);
dd               1329 drivers/infiniband/hw/hfi1/mad.c 				send_idle_sma(dd, SMA_IDLE_ACTIVE);
dd               1357 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd;
dd               1398 drivers/infiniband/hw/hfi1/mad.c 	dd = dd_from_ibdev(ibdev);
dd               1400 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               1527 drivers/infiniband/hw/hfi1/mad.c 	    ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
dd               1545 drivers/infiniband/hw/hfi1/mad.c 		if (dd->vld[i].mtu != mtu) {
dd               1546 drivers/infiniband/hw/hfi1/mad.c 			dd_dev_info(dd,
dd               1548 drivers/infiniband/hw/hfi1/mad.c 				    i, dd->vld[i].mtu, mtu);
dd               1549 drivers/infiniband/hw/hfi1/mad.c 			dd->vld[i].mtu = mtu;
dd               1559 drivers/infiniband/hw/hfi1/mad.c 	if (dd->vld[15].mtu != mtu) {
dd               1560 drivers/infiniband/hw/hfi1/mad.c 		dd_dev_info(dd,
dd               1562 drivers/infiniband/hw/hfi1/mad.c 			    dd->vld[15].mtu, mtu);
dd               1563 drivers/infiniband/hw/hfi1/mad.c 		dd->vld[15].mtu = mtu;
dd               1670 drivers/infiniband/hw/hfi1/mad.c static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
dd               1683 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               1714 drivers/infiniband/hw/hfi1/mad.c 		hfi1_event_pkey_change(dd, port);
dd               1724 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1731 drivers/infiniband/hw/hfi1/mad.c 	unsigned npkeys = hfi1_get_npkeys(dd);
dd               1762 drivers/infiniband/hw/hfi1/mad.c 	if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
dd               1798 drivers/infiniband/hw/hfi1/mad.c static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
dd               1804 drivers/infiniband/hw/hfi1/mad.c 	write_csr(dd, SEND_SC2VLT0, *val++);
dd               1805 drivers/infiniband/hw/hfi1/mad.c 	write_csr(dd, SEND_SC2VLT1, *val++);
dd               1806 drivers/infiniband/hw/hfi1/mad.c 	write_csr(dd, SEND_SC2VLT2, *val++);
dd               1807 drivers/infiniband/hw/hfi1/mad.c 	write_csr(dd, SEND_SC2VLT3, *val++);
dd               1808 drivers/infiniband/hw/hfi1/mad.c 	write_seqlock_irq(&dd->sc2vl_lock);
dd               1809 drivers/infiniband/hw/hfi1/mad.c 	memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
dd               1810 drivers/infiniband/hw/hfi1/mad.c 	write_sequnlock_irq(&dd->sc2vl_lock);
dd               1814 drivers/infiniband/hw/hfi1/mad.c static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
dd               1818 drivers/infiniband/hw/hfi1/mad.c 	*val++ = read_csr(dd, SEND_SC2VLT0);
dd               1819 drivers/infiniband/hw/hfi1/mad.c 	*val++ = read_csr(dd, SEND_SC2VLT1);
dd               1820 drivers/infiniband/hw/hfi1/mad.c 	*val++ = read_csr(dd, SEND_SC2VLT2);
dd               1821 drivers/infiniband/hw/hfi1/mad.c 	*val++ = read_csr(dd, SEND_SC2VLT3);
dd               1928 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1937 drivers/infiniband/hw/hfi1/mad.c 	get_sc2vlt_tables(dd, vp);
dd               1951 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1969 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               1981 drivers/infiniband/hw/hfi1/mad.c 	set_sc2vlt_tables(dd, vp);
dd               1992 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2002 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               2017 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2029 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               2036 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               2138 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2143 drivers/infiniband/hw/hfi1/mad.c 	if (dd->pport->port_type != PORT_TYPE_QSFP ||
dd               2163 drivers/infiniband/hw/hfi1/mad.c 	ret = get_cable_info(dd, port, addr, len, data);
dd               2191 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2201 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               2203 drivers/infiniband/hw/hfi1/mad.c 	trace_bct_get(dd, p);
dd               2215 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2223 drivers/infiniband/hw/hfi1/mad.c 	ppd = dd->pport + (port - 1);
dd               2224 drivers/infiniband/hw/hfi1/mad.c 	trace_bct_set(dd, p);
dd               2630 drivers/infiniband/hw/hfi1/mad.c 	if (!is_bx(ppd->dd)) {
dd               2730 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2770 drivers/infiniband/hw/hfi1/mad.c 	hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
dd               2773 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
dd               2775 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
dd               2777 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
dd               2779 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
dd               2782 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
dd               2785 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
dd               2798 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
dd               2800 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
dd               2808 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
dd               2811 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
dd               2813 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
dd               2814 drivers/infiniband/hw/hfi1/mad.c 	tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
dd               2823 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
dd               2825 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
dd               2827 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
dd               2833 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
dd               2846 drivers/infiniband/hw/hfi1/mad.c 		tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
dd               2850 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
dd               2870 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
dd               2874 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
dd               2895 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2907 drivers/infiniband/hw/hfi1/mad.c 	error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
dd               2910 drivers/infiniband/hw/hfi1/mad.c 	error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
dd               2913 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
dd               2914 drivers/infiniband/hw/hfi1/mad.c 	tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
dd               2916 drivers/infiniband/hw/hfi1/mad.c 	error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
dd               2918 drivers/infiniband/hw/hfi1/mad.c 	error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
dd               2919 drivers/infiniband/hw/hfi1/mad.c 	error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
dd               2924 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
dd               2933 drivers/infiniband/hw/hfi1/mad.c 	if (!is_bx(ppd->dd)) {
dd               2957 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               2959 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
dd               2961 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
dd               2963 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
dd               2965 drivers/infiniband/hw/hfi1/mad.c 	rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
dd               2968 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
dd               2971 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
dd               2981 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3041 drivers/infiniband/hw/hfi1/mad.c 	hfi1_read_link_quality(dd, &lq);
dd               3056 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
dd               3058 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
dd               3078 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
dd               3086 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
dd               3099 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
dd               3102 drivers/infiniband/hw/hfi1/mad.c 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
dd               3158 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3162 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
dd               3163 drivers/infiniband/hw/hfi1/mad.c 	tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
dd               3175 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
dd               3177 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
dd               3190 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
dd               3193 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
dd               3204 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3258 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
dd               3261 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
dd               3263 drivers/infiniband/hw/hfi1/mad.c 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
dd               3267 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
dd               3378 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3422 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_rcvport.status_and_code;
dd               3424 drivers/infiniband/hw/hfi1/mad.c 	       &dd->err_info_rcvport.packet_flit1, sizeof(u64));
dd               3426 drivers/infiniband/hw/hfi1/mad.c 	       &dd->err_info_rcvport.packet_flit2, sizeof(u64));
dd               3429 drivers/infiniband/hw/hfi1/mad.c 	reg = read_csr(dd, RCV_ERR_INFO);
dd               3445 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_xmit_constraint.status;
dd               3447 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be16(dd->err_info_xmit_constraint.pkey);
dd               3449 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be32(dd->err_info_xmit_constraint.slid);
dd               3452 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_rcv_constraint.status;
dd               3454 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be16(dd->err_info_rcv_constraint.pkey);
dd               3456 drivers/infiniband/hw/hfi1/mad.c 		cpu_to_be32(dd->err_info_rcv_constraint.slid);
dd               3459 drivers/infiniband/hw/hfi1/mad.c 	rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
dd               3462 drivers/infiniband/hw/hfi1/mad.c 	rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
dd               3476 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3496 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
dd               3499 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
dd               3502 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
dd               3505 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
dd               3508 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
dd               3511 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
dd               3521 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
dd               3524 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
dd               3530 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
dd               3548 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
dd               3551 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
dd               3554 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
dd               3555 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
dd               3560 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
dd               3563 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
dd               3564 drivers/infiniband/hw/hfi1/mad.c 		dd->rcv_ovfl_cnt = 0;
dd               3568 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
dd               3574 drivers/infiniband/hw/hfi1/mad.c 		write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
dd               3581 drivers/infiniband/hw/hfi1/mad.c 			write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
dd               3587 drivers/infiniband/hw/hfi1/mad.c 			write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
dd               3597 drivers/infiniband/hw/hfi1/mad.c 			write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
dd               3600 drivers/infiniband/hw/hfi1/mad.c 			write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
dd               3606 drivers/infiniband/hw/hfi1/mad.c 			write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
dd               3628 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               3666 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
dd               3674 drivers/infiniband/hw/hfi1/mad.c 		write_csr(dd, RCV_ERR_INFO,
dd               3678 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
dd               3681 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
dd               3686 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
dd               3691 drivers/infiniband/hw/hfi1/mad.c 		dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
dd               4043 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               4044 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               4073 drivers/infiniband/hw/hfi1/mad.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               4084 drivers/infiniband/hw/hfi1/mad.c 		hfi1_start_led_override(dd->pport, 2000, 1500);
dd               4086 drivers/infiniband/hw/hfi1/mad.c 		shutdown_led_override(dd->pport);
dd               4364 drivers/infiniband/hw/hfi1/mad.c void clear_linkup_counters(struct hfi1_devdata *dd)
dd               4367 drivers/infiniband/hw/hfi1/mad.c 	write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
dd               4368 drivers/infiniband/hw/hfi1/mad.c 	dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
dd               4370 drivers/infiniband/hw/hfi1/mad.c 	write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
dd               4371 drivers/infiniband/hw/hfi1/mad.c 	write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
dd               4373 drivers/infiniband/hw/hfi1/mad.c 	write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
dd               4375 drivers/infiniband/hw/hfi1/mad.c 	write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
dd               4376 drivers/infiniband/hw/hfi1/mad.c 	dd->rcv_ovfl_cnt = 0;
dd               4377 drivers/infiniband/hw/hfi1/mad.c 	dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
dd                439 drivers/infiniband/hw/hfi1/mad.h void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
dd                 58 drivers/infiniband/hw/hfi1/msix.c int msix_initialize(struct hfi1_devdata *dd)
dd                 72 drivers/infiniband/hw/hfi1/msix.c 	total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
dd                 77 drivers/infiniband/hw/hfi1/msix.c 	ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
dd                 79 drivers/infiniband/hw/hfi1/msix.c 		dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
dd                 83 drivers/infiniband/hw/hfi1/msix.c 	entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
dd                 86 drivers/infiniband/hw/hfi1/msix.c 		pci_free_irq_vectors(dd->pcidev);
dd                 90 drivers/infiniband/hw/hfi1/msix.c 	dd->msix_info.msix_entries = entries;
dd                 91 drivers/infiniband/hw/hfi1/msix.c 	spin_lock_init(&dd->msix_info.msix_lock);
dd                 92 drivers/infiniband/hw/hfi1/msix.c 	bitmap_zero(dd->msix_info.in_use_msix, total);
dd                 93 drivers/infiniband/hw/hfi1/msix.c 	dd->msix_info.max_requested = total;
dd                 94 drivers/infiniband/hw/hfi1/msix.c 	dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
dd                116 drivers/infiniband/hw/hfi1/msix.c static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
dd                128 drivers/infiniband/hw/hfi1/msix.c 	spin_lock(&dd->msix_info.msix_lock);
dd                129 drivers/infiniband/hw/hfi1/msix.c 	nr = find_first_zero_bit(dd->msix_info.in_use_msix,
dd                130 drivers/infiniband/hw/hfi1/msix.c 				 dd->msix_info.max_requested);
dd                131 drivers/infiniband/hw/hfi1/msix.c 	if (nr < dd->msix_info.max_requested)
dd                132 drivers/infiniband/hw/hfi1/msix.c 		__set_bit(nr, dd->msix_info.in_use_msix);
dd                133 drivers/infiniband/hw/hfi1/msix.c 	spin_unlock(&dd->msix_info.msix_lock);
dd                135 drivers/infiniband/hw/hfi1/msix.c 	if (nr == dd->msix_info.max_requested)
dd                143 drivers/infiniband/hw/hfi1/msix.c 			spin_lock(&dd->msix_info.msix_lock);
dd                144 drivers/infiniband/hw/hfi1/msix.c 			__clear_bit(nr, dd->msix_info.in_use_msix);
dd                145 drivers/infiniband/hw/hfi1/msix.c 			spin_unlock(&dd->msix_info.msix_lock);
dd                146 drivers/infiniband/hw/hfi1/msix.c 			dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
dd                150 drivers/infiniband/hw/hfi1/msix.c 		snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
dd                155 drivers/infiniband/hw/hfi1/msix.c 			 dd->unit, idx);
dd                160 drivers/infiniband/hw/hfi1/msix.c 			 dd->unit, idx);
dd                169 drivers/infiniband/hw/hfi1/msix.c 	irq = pci_irq_vector(dd->pcidev, nr);
dd                170 drivers/infiniband/hw/hfi1/msix.c 	ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
dd                172 drivers/infiniband/hw/hfi1/msix.c 		dd_dev_err(dd,
dd                175 drivers/infiniband/hw/hfi1/msix.c 		spin_lock(&dd->msix_info.msix_lock);
dd                176 drivers/infiniband/hw/hfi1/msix.c 		__clear_bit(nr, dd->msix_info.in_use_msix);
dd                177 drivers/infiniband/hw/hfi1/msix.c 		spin_unlock(&dd->msix_info.msix_lock);
dd                185 drivers/infiniband/hw/hfi1/msix.c 	me = &dd->msix_info.msix_entries[nr];
dd                191 drivers/infiniband/hw/hfi1/msix.c 	ret = hfi1_get_irq_affinity(dd, me);
dd                193 drivers/infiniband/hw/hfi1/msix.c 		dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
dd                207 drivers/infiniband/hw/hfi1/msix.c 	nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
dd                219 drivers/infiniband/hw/hfi1/msix.c 	remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
dd                233 drivers/infiniband/hw/hfi1/msix.c 	nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
dd                238 drivers/infiniband/hw/hfi1/msix.c 	remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
dd                248 drivers/infiniband/hw/hfi1/msix.c static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
dd                250 drivers/infiniband/hw/hfi1/msix.c 	set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
dd                251 drivers/infiniband/hw/hfi1/msix.c 	set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
dd                253 drivers/infiniband/hw/hfi1/msix.c 	set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
dd                254 drivers/infiniband/hw/hfi1/msix.c 	set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
dd                265 drivers/infiniband/hw/hfi1/msix.c int msix_request_irqs(struct hfi1_devdata *dd)
dd                270 drivers/infiniband/hw/hfi1/msix.c 	ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
dd                274 drivers/infiniband/hw/hfi1/msix.c 	for (i = 0; i < dd->num_sdma; i++) {
dd                275 drivers/infiniband/hw/hfi1/msix.c 		struct sdma_engine *sde = &dd->per_sdma[i];
dd                280 drivers/infiniband/hw/hfi1/msix.c 		enable_sdma_srcs(sde->dd, i);
dd                283 drivers/infiniband/hw/hfi1/msix.c 	for (i = 0; i < dd->n_krcv_queues; i++) {
dd                284 drivers/infiniband/hw/hfi1/msix.c 		struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
dd                302 drivers/infiniband/hw/hfi1/msix.c void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
dd                306 drivers/infiniband/hw/hfi1/msix.c 	if (msix_intr >= dd->msix_info.max_requested)
dd                309 drivers/infiniband/hw/hfi1/msix.c 	me = &dd->msix_info.msix_entries[msix_intr];
dd                314 drivers/infiniband/hw/hfi1/msix.c 	hfi1_put_irq_affinity(dd, me);
dd                315 drivers/infiniband/hw/hfi1/msix.c 	pci_free_irq(dd->pcidev, msix_intr, me->arg);
dd                319 drivers/infiniband/hw/hfi1/msix.c 	spin_lock(&dd->msix_info.msix_lock);
dd                320 drivers/infiniband/hw/hfi1/msix.c 	__clear_bit(msix_intr, dd->msix_info.in_use_msix);
dd                321 drivers/infiniband/hw/hfi1/msix.c 	spin_unlock(&dd->msix_info.msix_lock);
dd                330 drivers/infiniband/hw/hfi1/msix.c void msix_clean_up_interrupts(struct hfi1_devdata *dd)
dd                333 drivers/infiniband/hw/hfi1/msix.c 	struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
dd                336 drivers/infiniband/hw/hfi1/msix.c 	for (i = 0; i < dd->msix_info.max_requested; i++, me++)
dd                337 drivers/infiniband/hw/hfi1/msix.c 		msix_free_irq(dd, i);
dd                340 drivers/infiniband/hw/hfi1/msix.c 	kfree(dd->msix_info.msix_entries);
dd                341 drivers/infiniband/hw/hfi1/msix.c 	dd->msix_info.msix_entries = NULL;
dd                342 drivers/infiniband/hw/hfi1/msix.c 	dd->msix_info.max_requested = 0;
dd                344 drivers/infiniband/hw/hfi1/msix.c 	pci_free_irq_vectors(dd->pcidev);
dd                351 drivers/infiniband/hw/hfi1/msix.c void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
dd                355 drivers/infiniband/hw/hfi1/msix.c 	for (i = 0; i < dd->vnic.num_ctxt; i++) {
dd                356 drivers/infiniband/hw/hfi1/msix.c 		struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
dd                359 drivers/infiniband/hw/hfi1/msix.c 		me = &dd->msix_info.msix_entries[rcd->msix_intr];
dd                 54 drivers/infiniband/hw/hfi1/msix.h int msix_initialize(struct hfi1_devdata *dd);
dd                 55 drivers/infiniband/hw/hfi1/msix.h int msix_request_irqs(struct hfi1_devdata *dd);
dd                 56 drivers/infiniband/hw/hfi1/msix.h void msix_clean_up_interrupts(struct hfi1_devdata *dd);
dd                 59 drivers/infiniband/hw/hfi1/msix.h void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
dd                 62 drivers/infiniband/hw/hfi1/msix.h void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
dd                 66 drivers/infiniband/hw/hfi1/pcie.c int hfi1_pcie_init(struct hfi1_devdata *dd)
dd                 69 drivers/infiniband/hw/hfi1/pcie.c 	struct pci_dev *pdev = dd->pcidev;
dd                 85 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
dd                 91 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
dd                104 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
dd                112 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
dd                143 drivers/infiniband/hw/hfi1/pcie.c int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd                160 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "chip PIO range does not match\n");
dd                164 drivers/infiniband/hw/hfi1/pcie.c 	dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
dd                165 drivers/infiniband/hw/hfi1/pcie.c 	if (!dd->kregbase1) {
dd                166 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
dd                169 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
dd                172 drivers/infiniband/hw/hfi1/pcie.c 	dd->revision = readq(dd->kregbase1 + CCE_REVISION);
dd                173 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->revision == ~(u64)0) {
dd                174 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Cannot read chip CSRs\n");
dd                178 drivers/infiniband/hw/hfi1/pcie.c 	rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
dd                179 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "RcvArray count: %u\n", rcv_array_count);
dd                180 drivers/infiniband/hw/hfi1/pcie.c 	dd->base2_start  = RCV_ARRAY + rcv_array_count * 8;
dd                182 drivers/infiniband/hw/hfi1/pcie.c 	dd->kregbase2 = ioremap_nocache(
dd                183 drivers/infiniband/hw/hfi1/pcie.c 		addr + dd->base2_start,
dd                184 drivers/infiniband/hw/hfi1/pcie.c 		TXE_PIO_SEND - dd->base2_start);
dd                185 drivers/infiniband/hw/hfi1/pcie.c 	if (!dd->kregbase2) {
dd                186 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "UC mapping of kregbase2 failed\n");
dd                189 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2,
dd                190 drivers/infiniband/hw/hfi1/pcie.c 		    TXE_PIO_SEND - dd->base2_start);
dd                192 drivers/infiniband/hw/hfi1/pcie.c 	dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
dd                193 drivers/infiniband/hw/hfi1/pcie.c 	if (!dd->piobase) {
dd                194 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "WC mapping of send buffers failed\n");
dd                197 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
dd                199 drivers/infiniband/hw/hfi1/pcie.c 	dd->physaddr = addr;        /* used for io_remap, etc. */
dd                205 drivers/infiniband/hw/hfi1/pcie.c 	dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
dd                207 drivers/infiniband/hw/hfi1/pcie.c 	if (!dd->rcvarray_wc) {
dd                208 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "WC mapping of receive array failed\n");
dd                211 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "WC RcvArray: %p for %x\n",
dd                212 drivers/infiniband/hw/hfi1/pcie.c 		    dd->rcvarray_wc, rcv_array_count * 8);
dd                214 drivers/infiniband/hw/hfi1/pcie.c 	dd->flags |= HFI1_PRESENT;	/* chip.c CSR routines now work */
dd                218 drivers/infiniband/hw/hfi1/pcie.c 	hfi1_pcie_ddcleanup(dd);
dd                227 drivers/infiniband/hw/hfi1/pcie.c void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
dd                229 drivers/infiniband/hw/hfi1/pcie.c 	dd->flags &= ~HFI1_PRESENT;
dd                230 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->kregbase1)
dd                231 drivers/infiniband/hw/hfi1/pcie.c 		iounmap(dd->kregbase1);
dd                232 drivers/infiniband/hw/hfi1/pcie.c 	dd->kregbase1 = NULL;
dd                233 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->kregbase2)
dd                234 drivers/infiniband/hw/hfi1/pcie.c 		iounmap(dd->kregbase2);
dd                235 drivers/infiniband/hw/hfi1/pcie.c 	dd->kregbase2 = NULL;
dd                236 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->rcvarray_wc)
dd                237 drivers/infiniband/hw/hfi1/pcie.c 		iounmap(dd->rcvarray_wc);
dd                238 drivers/infiniband/hw/hfi1/pcie.c 	dd->rcvarray_wc = NULL;
dd                239 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->piobase)
dd                240 drivers/infiniband/hw/hfi1/pcie.c 		iounmap(dd->piobase);
dd                241 drivers/infiniband/hw/hfi1/pcie.c 	dd->piobase = NULL;
dd                271 drivers/infiniband/hw/hfi1/pcie.c static void update_lbus_info(struct hfi1_devdata *dd)
dd                276 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
dd                278 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to read from PCI config\n");
dd                282 drivers/infiniband/hw/hfi1/pcie.c 	dd->lbus_width = extract_width(linkstat);
dd                283 drivers/infiniband/hw/hfi1/pcie.c 	dd->lbus_speed = extract_speed(linkstat);
dd                284 drivers/infiniband/hw/hfi1/pcie.c 	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
dd                285 drivers/infiniband/hw/hfi1/pcie.c 		 "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
dd                292 drivers/infiniband/hw/hfi1/pcie.c int pcie_speeds(struct hfi1_devdata *dd)
dd                295 drivers/infiniband/hw/hfi1/pcie.c 	struct pci_dev *parent = dd->pcidev->bus->self;
dd                298 drivers/infiniband/hw/hfi1/pcie.c 	if (!pci_is_pcie(dd->pcidev)) {
dd                299 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Can't find PCI Express capability!\n");
dd                304 drivers/infiniband/hw/hfi1/pcie.c 	dd->link_gen3_capable = 1;
dd                306 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
dd                308 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to read from PCI config\n");
dd                313 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd,
dd                316 drivers/infiniband/hw/hfi1/pcie.c 		dd->link_gen3_capable = 0;
dd                323 drivers/infiniband/hw/hfi1/pcie.c 	    (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
dd                324 drivers/infiniband/hw/hfi1/pcie.c 	     dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd                325 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd                326 drivers/infiniband/hw/hfi1/pcie.c 		dd->link_gen3_capable = 0;
dd                330 drivers/infiniband/hw/hfi1/pcie.c 	update_lbus_info(dd);
dd                332 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s\n", dd->lbus_info);
dd                338 drivers/infiniband/hw/hfi1/pcie.c int restore_pci_variables(struct hfi1_devdata *dd)
dd                342 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
dd                346 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd                347 drivers/infiniband/hw/hfi1/pcie.c 				     dd->pcibar0);
dd                351 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
dd                352 drivers/infiniband/hw/hfi1/pcie.c 				     dd->pcibar1);
dd                356 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
dd                360 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL,
dd                361 drivers/infiniband/hw/hfi1/pcie.c 					 dd->pcie_devctl);
dd                365 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL,
dd                366 drivers/infiniband/hw/hfi1/pcie.c 					 dd->pcie_lnkctl);
dd                370 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
dd                371 drivers/infiniband/hw/hfi1/pcie.c 					 dd->pcie_devctl2);
dd                375 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
dd                379 drivers/infiniband/hw/hfi1/pcie.c 	if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
dd                380 drivers/infiniband/hw/hfi1/pcie.c 		ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
dd                381 drivers/infiniband/hw/hfi1/pcie.c 					     dd->pci_tph2);
dd                388 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_err(dd, "Unable to write to PCI config\n");
dd                393 drivers/infiniband/hw/hfi1/pcie.c int save_pci_variables(struct hfi1_devdata *dd)
dd                397 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd                398 drivers/infiniband/hw/hfi1/pcie.c 				    &dd->pcibar0);
dd                402 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
dd                403 drivers/infiniband/hw/hfi1/pcie.c 				    &dd->pcibar1);
dd                407 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
dd                411 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
dd                415 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL,
dd                416 drivers/infiniband/hw/hfi1/pcie.c 					&dd->pcie_devctl);
dd                420 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL,
dd                421 drivers/infiniband/hw/hfi1/pcie.c 					&dd->pcie_lnkctl);
dd                425 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
dd                426 drivers/infiniband/hw/hfi1/pcie.c 					&dd->pcie_devctl2);
dd                430 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
dd                434 drivers/infiniband/hw/hfi1/pcie.c 	if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
dd                435 drivers/infiniband/hw/hfi1/pcie.c 		ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
dd                436 drivers/infiniband/hw/hfi1/pcie.c 					    &dd->pci_tph2);
dd                443 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_err(dd, "Unable to read from PCI config\n");
dd                460 drivers/infiniband/hw/hfi1/pcie.c void tune_pcie_caps(struct hfi1_devdata *dd)
dd                471 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
dd                473 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "Enabling PCIe extended tags\n");
dd                475 drivers/infiniband/hw/hfi1/pcie.c 		ret = pcie_capability_write_word(dd->pcidev,
dd                478 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_info(dd, "Unable to write to PCI config\n");
dd                481 drivers/infiniband/hw/hfi1/pcie.c 	parent = dd->pcidev->bus->self;
dd                487 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "Parent not found\n");
dd                491 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "Parent not root\n");
dd                495 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "Parent is not PCI Express capable\n");
dd                498 drivers/infiniband/hw/hfi1/pcie.c 	if (!pci_is_pcie(dd->pcidev)) {
dd                499 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "PCI device is not PCI Express capable\n");
dd                505 drivers/infiniband/hw/hfi1/pcie.c 	ep_mpss = dd->pcidev->pcie_mpss;
dd                506 drivers/infiniband/hw/hfi1/pcie.c 	ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
dd                523 drivers/infiniband/hw/hfi1/pcie.c 		pcie_set_mps(dd->pcidev, 128 << ep_mps);
dd                537 drivers/infiniband/hw/hfi1/pcie.c 	ep_mrrs = pcie_get_readrq(dd->pcidev);
dd                545 drivers/infiniband/hw/hfi1/pcie.c 		pcie_set_readrq(dd->pcidev, ep_mrrs);
dd                558 drivers/infiniband/hw/hfi1/pcie.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd                563 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "State Normal, ignoring\n");
dd                567 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "State Frozen, requesting reset\n");
dd                573 drivers/infiniband/hw/hfi1/pcie.c 		if (dd) {
dd                574 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_info(dd, "State Permanent Failure, disabling\n");
dd                576 drivers/infiniband/hw/hfi1/pcie.c 			dd->flags &= ~HFI1_PRESENT;
dd                577 drivers/infiniband/hw/hfi1/pcie.c 			hfi1_disable_after_error(dd);
dd                584 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
dd                595 drivers/infiniband/hw/hfi1/pcie.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd                598 drivers/infiniband/hw/hfi1/pcie.c 	if (dd && dd->pport) {
dd                599 drivers/infiniband/hw/hfi1/pcie.c 		words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
dd                602 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd,
dd                612 drivers/infiniband/hw/hfi1/pcie.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd                614 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
dd                621 drivers/infiniband/hw/hfi1/pcie.c 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd                623 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "HFI1 resume function called\n");
dd                629 drivers/infiniband/hw/hfi1/pcie.c 	hfi1_init(dd, 1); /* same as re-init after reset */
dd                766 drivers/infiniband/hw/hfi1/pcie.c static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
dd                769 drivers/infiniband/hw/hfi1/pcie.c 	struct pci_dev *pdev = dd->pcidev;
dd                786 drivers/infiniband/hw/hfi1/pcie.c 		ret = pci_read_config_dword(dd->pcidev,
dd                789 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd, "Unable to read from PCI config\n");
dd                797 drivers/infiniband/hw/hfi1/pcie.c 				dd_dev_err(dd,
dd                799 drivers/infiniband/hw/hfi1/pcie.c 				dd_dev_err(dd, "         prec   attn   post\n");
dd                801 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd, "   p%02d:   %02x     %02x     %02x\n",
dd                804 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd, "            %02x     %02x     %02x\n",
dd                819 drivers/infiniband/hw/hfi1/pcie.c static void pcie_post_steps(struct hfi1_devdata *dd)
dd                823 drivers/infiniband/hw/hfi1/pcie.c 	set_sbus_fast_mode(dd);
dd                832 drivers/infiniband/hw/hfi1/pcie.c 		sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
dd                836 drivers/infiniband/hw/hfi1/pcie.c 	clear_sbus_fast_mode(dd);
dd                845 drivers/infiniband/hw/hfi1/pcie.c static int trigger_sbr(struct hfi1_devdata *dd)
dd                847 drivers/infiniband/hw/hfi1/pcie.c 	struct pci_dev *dev = dd->pcidev;
dd                852 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "%s: no parent device\n", __func__);
dd                859 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd,
dd                876 drivers/infiniband/hw/hfi1/pcie.c static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
dd                879 drivers/infiniband/hw/hfi1/pcie.c 	write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
dd                887 drivers/infiniband/hw/hfi1/pcie.c static void arm_gasket_logic(struct hfi1_devdata *dd)
dd                891 drivers/infiniband/hw/hfi1/pcie.c 	reg = (((u64)1 << dd->hfi1_id) <<
dd                893 drivers/infiniband/hw/hfi1/pcie.c 	      ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
dd                898 drivers/infiniband/hw/hfi1/pcie.c 	write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
dd                900 drivers/infiniband/hw/hfi1/pcie.c 	read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
dd                922 drivers/infiniband/hw/hfi1/pcie.c static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
dd                930 drivers/infiniband/hw/hfi1/pcie.c 	pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
dd                940 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
dd                954 drivers/infiniband/hw/hfi1/pcie.c 		if (is_ax(dd)) {
dd                973 drivers/infiniband/hw/hfi1/pcie.c 		write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
dd                976 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
dd                983 drivers/infiniband/hw/hfi1/pcie.c int do_pcie_gen3_transition(struct hfi1_devdata *dd)
dd                985 drivers/infiniband/hw/hfi1/pcie.c 	struct pci_dev *parent = dd->pcidev->bus->self;
dd               1005 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->icode != ICODE_RTL_SILICON)
dd               1019 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
dd               1024 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->lbus_speed == target_speed) {
dd               1025 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
dd               1037 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
dd               1043 drivers/infiniband/hw/hfi1/pcie.c 	target_width = dd->lbus_width;
dd               1053 drivers/infiniband/hw/hfi1/pcie.c 	if (pcie_target == 3 && !dd->link_gen3_capable) {
dd               1054 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
dd               1060 drivers/infiniband/hw/hfi1/pcie.c 	ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
dd               1062 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
dd               1068 drivers/infiniband/hw/hfi1/pcie.c 	therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
dd               1070 drivers/infiniband/hw/hfi1/pcie.c 		write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
dd               1072 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: Disabled therm polling\n",
dd               1081 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: downloading firmware\n", __func__);
dd               1082 drivers/infiniband/hw/hfi1/pcie.c 	ret = load_pcie_firmware(dd);
dd               1090 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
dd               1100 drivers/infiniband/hw/hfi1/pcie.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
dd               1111 drivers/infiniband/hw/hfi1/pcie.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
dd               1121 drivers/infiniband/hw/hfi1/pcie.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
dd               1131 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */
dd               1152 drivers/infiniband/hw/hfi1/pcie.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
dd               1157 drivers/infiniband/hw/hfi1/pcie.c 	ret = load_eq_table(dd, eq, fs, div);
dd               1169 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
dd               1173 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pset);
dd               1174 drivers/infiniband/hw/hfi1/pcie.c 	pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
dd               1183 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
dd               1184 drivers/infiniband/hw/hfi1/pcie.c 	pcie_post_steps(dd);
dd               1190 drivers/infiniband/hw/hfi1/pcie.c 	write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
dd               1193 drivers/infiniband/hw/hfi1/pcie.c 	write_gasket_interrupt(dd, intnum++, 0x0026,
dd               1199 drivers/infiniband/hw/hfi1/pcie.c 	write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
dd               1209 drivers/infiniband/hw/hfi1/pcie.c 		write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
dd               1210 drivers/infiniband/hw/hfi1/pcie.c 		write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
dd               1211 drivers/infiniband/hw/hfi1/pcie.c 		write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
dd               1212 drivers/infiniband/hw/hfi1/pcie.c 		write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
dd               1216 drivers/infiniband/hw/hfi1/pcie.c 	write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
dd               1221 drivers/infiniband/hw/hfi1/pcie.c 	write_xmt_margin(dd, __func__);
dd               1227 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
dd               1228 drivers/infiniband/hw/hfi1/pcie.c 	aspm_hw_disable_l1(dd);
dd               1246 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
dd               1249 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to read from PCI config\n");
dd               1254 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
dd               1260 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
dd               1265 drivers/infiniband/hw/hfi1/pcie.c 			dd_dev_err(dd, "Unable to write to PCI config\n");
dd               1270 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
dd               1273 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: setting target link speed\n", __func__);
dd               1274 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
dd               1276 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to read from PCI config\n");
dd               1281 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
dd               1285 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
dd               1287 drivers/infiniband/hw/hfi1/pcie.c 	ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
dd               1289 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to write to PCI config\n");
dd               1296 drivers/infiniband/hw/hfi1/pcie.c 	write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
dd               1297 drivers/infiniband/hw/hfi1/pcie.c 	(void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
dd               1299 drivers/infiniband/hw/hfi1/pcie.c 	fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
dd               1301 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
dd               1302 drivers/infiniband/hw/hfi1/pcie.c 	arm_gasket_logic(dd);
dd               1317 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
dd               1318 drivers/infiniband/hw/hfi1/pcie.c 	ret = trigger_sbr(dd);
dd               1325 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
dd               1327 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd,
dd               1334 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
dd               1341 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
dd               1342 drivers/infiniband/hw/hfi1/pcie.c 	ret = restore_pci_variables(dd);
dd               1344 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "%s: Could not restore PCI variables\n",
dd               1351 drivers/infiniband/hw/hfi1/pcie.c 	write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
dd               1363 drivers/infiniband/hw/hfi1/pcie.c 	reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
dd               1364 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
dd               1366 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "SBR failed - unable to read from device\n");
dd               1373 drivers/infiniband/hw/hfi1/pcie.c 	write_csr(dd, CCE_DC_CTRL, 0);
dd               1376 drivers/infiniband/hw/hfi1/pcie.c 	setextled(dd, 0);
dd               1379 drivers/infiniband/hw/hfi1/pcie.c 	ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
dd               1381 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Unable to read from PCI config\n");
dd               1386 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
dd               1391 drivers/infiniband/hw/hfi1/pcie.c 	if ((status & (1 << dd->hfi1_id)) == 0) {
dd               1392 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd,
dd               1394 drivers/infiniband/hw/hfi1/pcie.c 			   __func__, status, 1 << dd->hfi1_id);
dd               1403 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
dd               1409 drivers/infiniband/hw/hfi1/pcie.c 	update_lbus_info(dd);
dd               1410 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
dd               1411 drivers/infiniband/hw/hfi1/pcie.c 		    dd->lbus_info);
dd               1413 drivers/infiniband/hw/hfi1/pcie.c 	if (dd->lbus_speed != target_speed ||
dd               1414 drivers/infiniband/hw/hfi1/pcie.c 	    dd->lbus_width < target_width) { /* not target */
dd               1417 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
dd               1429 drivers/infiniband/hw/hfi1/pcie.c 		write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
dd               1431 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_info(dd, "%s: Re-enable therm polling\n",
dd               1434 drivers/infiniband/hw/hfi1/pcie.c 	release_chip_resource(dd, CR_SBUS);
dd               1438 drivers/infiniband/hw/hfi1/pcie.c 		dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
dd               1442 drivers/infiniband/hw/hfi1/pcie.c 	dd_dev_info(dd, "%s: done\n", __func__);
dd                 63 drivers/infiniband/hw/hfi1/pio.c void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
dd                 65 drivers/infiniband/hw/hfi1/pio.c 	write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
dd                 68 drivers/infiniband/hw/hfi1/pio.c 		sendctrl = read_csr(dd, SEND_CTRL);
dd                 75 drivers/infiniband/hw/hfi1/pio.c void pio_send_control(struct hfi1_devdata *dd, int op)
dd                 83 drivers/infiniband/hw/hfi1/pio.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd                 85 drivers/infiniband/hw/hfi1/pio.c 	reg = read_csr(dd, SEND_CTRL);
dd                 92 drivers/infiniband/hw/hfi1/pio.c 		for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
dd                 93 drivers/infiniband/hw/hfi1/pio.c 			if (!dd->vld[i].mtu)
dd                110 drivers/infiniband/hw/hfi1/pio.c 		__cm_reset(dd, reg);
dd                118 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
dd                123 drivers/infiniband/hw/hfi1/pio.c 		write_csr(dd, SEND_CTRL, reg);
dd                125 drivers/infiniband/hw/hfi1/pio.c 			(void)read_csr(dd, SEND_CTRL); /* flush write */
dd                128 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd                223 drivers/infiniband/hw/hfi1/pio.c int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
dd                226 drivers/infiniband/hw/hfi1/pio.c 	int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
dd                275 drivers/infiniband/hw/hfi1/pio.c 				dd,
dd                288 drivers/infiniband/hw/hfi1/pio.c 			dd,
dd                296 drivers/infiniband/hw/hfi1/pio.c 			dd,
dd                305 drivers/infiniband/hw/hfi1/pio.c 			dd,
dd                331 drivers/infiniband/hw/hfi1/pio.c 			count = dd->n_krcv_queues;
dd                335 drivers/infiniband/hw/hfi1/pio.c 			count = dd->num_rcv_contexts - dd->n_krcv_queues;
dd                338 drivers/infiniband/hw/hfi1/pio.c 				dd,
dd                343 drivers/infiniband/hw/hfi1/pio.c 		if (total_contexts + count > chip_send_contexts(dd))
dd                344 drivers/infiniband/hw/hfi1/pio.c 			count = chip_send_contexts(dd) - total_contexts;
dd                361 drivers/infiniband/hw/hfi1/pio.c 				dd,
dd                367 drivers/infiniband/hw/hfi1/pio.c 		dd->sc_sizes[i].count = count;
dd                368 drivers/infiniband/hw/hfi1/pio.c 		dd->sc_sizes[i].size = size;
dd                372 drivers/infiniband/hw/hfi1/pio.c 			dd,
dd                382 drivers/infiniband/hw/hfi1/pio.c 			dd,
dd                399 drivers/infiniband/hw/hfi1/pio.c 				dd,
dd                408 drivers/infiniband/hw/hfi1/pio.c 					dd,
dd                420 drivers/infiniband/hw/hfi1/pio.c 		if (dd->sc_sizes[i].size < 0) {
dd                421 drivers/infiniband/hw/hfi1/pio.c 			unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
dd                424 drivers/infiniband/hw/hfi1/pio.c 			dd->sc_sizes[i].size = mem_pool_info[pool].size;
dd                428 drivers/infiniband/hw/hfi1/pio.c 		if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
dd                429 drivers/infiniband/hw/hfi1/pio.c 			dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
dd                432 drivers/infiniband/hw/hfi1/pio.c 		used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
dd                436 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_info(dd, "unused send context blocks: %d\n", extra);
dd                441 drivers/infiniband/hw/hfi1/pio.c int init_send_contexts(struct hfi1_devdata *dd)
dd                446 drivers/infiniband/hw/hfi1/pio.c 	ret = init_credit_return(dd);
dd                450 drivers/infiniband/hw/hfi1/pio.c 	dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
dd                452 drivers/infiniband/hw/hfi1/pio.c 	dd->send_contexts = kcalloc(dd->num_send_contexts,
dd                455 drivers/infiniband/hw/hfi1/pio.c 	if (!dd->send_contexts || !dd->hw_to_sw) {
dd                456 drivers/infiniband/hw/hfi1/pio.c 		kfree(dd->hw_to_sw);
dd                457 drivers/infiniband/hw/hfi1/pio.c 		kfree(dd->send_contexts);
dd                458 drivers/infiniband/hw/hfi1/pio.c 		free_credit_return(dd);
dd                464 drivers/infiniband/hw/hfi1/pio.c 		dd->hw_to_sw[i] = INVALID_SCI;
dd                473 drivers/infiniband/hw/hfi1/pio.c 		struct sc_config_sizes *scs = &dd->sc_sizes[i];
dd                477 drivers/infiniband/hw/hfi1/pio.c 						&dd->send_contexts[context];
dd                495 drivers/infiniband/hw/hfi1/pio.c static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
dd                502 drivers/infiniband/hw/hfi1/pio.c 	for (index = 0, sci = &dd->send_contexts[0];
dd                503 drivers/infiniband/hw/hfi1/pio.c 			index < dd->num_send_contexts; index++, sci++) {
dd                507 drivers/infiniband/hw/hfi1/pio.c 			context = chip_send_contexts(dd) - index - 1;
dd                508 drivers/infiniband/hw/hfi1/pio.c 			dd->hw_to_sw[context] = index;
dd                514 drivers/infiniband/hw/hfi1/pio.c 	dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
dd                523 drivers/infiniband/hw/hfi1/pio.c static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
dd                527 drivers/infiniband/hw/hfi1/pio.c 	sci = &dd->send_contexts[sw_index];
dd                529 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
dd                533 drivers/infiniband/hw/hfi1/pio.c 	dd->hw_to_sw[hw_context] = INVALID_SCI;
dd                566 drivers/infiniband/hw/hfi1/pio.c 	sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
dd                568 drivers/infiniband/hw/hfi1/pio.c 	       &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
dd                645 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(sc->dd, sc->hw_context,
dd                665 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd = sc->dd;
dd                669 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context,
dd                671 drivers/infiniband/hw/hfi1/pio.c 			hfi1_pkt_default_send_ctxt_mask(dd, type));
dd                696 drivers/infiniband/hw/hfi1/pio.c struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
dd                711 drivers/infiniband/hw/hfi1/pio.c 	if (dd->flags & HFI1_FROZEN)
dd                721 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd,
dd                727 drivers/infiniband/hw/hfi1/pio.c 	spin_lock_irqsave(&dd->sc_lock, flags);
dd                728 drivers/infiniband/hw/hfi1/pio.c 	ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
dd                730 drivers/infiniband/hw/hfi1/pio.c 		spin_unlock_irqrestore(&dd->sc_lock, flags);
dd                736 drivers/infiniband/hw/hfi1/pio.c 	sci = &dd->send_contexts[sw_index];
dd                739 drivers/infiniband/hw/hfi1/pio.c 	sc->dd = dd;
dd                762 drivers/infiniband/hw/hfi1/pio.c 	sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
dd                770 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
dd                775 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
dd                778 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
dd                793 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
dd                799 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
dd                831 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
dd                836 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
dd                839 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock_irqrestore(&dd->sc_lock, flags);
dd                880 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd;
dd                889 drivers/infiniband/hw/hfi1/pio.c 	dd = sc->dd;
dd                891 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd, "piowait list not empty!\n");
dd                897 drivers/infiniband/hw/hfi1/pio.c 	spin_lock_irqsave(&dd->sc_lock, flags);
dd                898 drivers/infiniband/hw/hfi1/pio.c 	dd->send_contexts[sw_index].sc = NULL;
dd                901 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
dd                902 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
dd                903 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
dd                904 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
dd                905 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
dd                906 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
dd                907 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
dd                910 drivers/infiniband/hw/hfi1/pio.c 	sc_hw_free(dd, sw_index, hw_context);
dd                911 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock_irqrestore(&dd->sc_lock, flags);
dd                929 drivers/infiniband/hw/hfi1/pio.c 	reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
dd                933 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
dd                989 drivers/infiniband/hw/hfi1/pio.c static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
dd                991 drivers/infiniband/hw/hfi1/pio.c 	return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
dd               1011 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd = sc->dd;
dd               1018 drivers/infiniband/hw/hfi1/pio.c 		reg = read_csr(dd, sc->hw_context * 8 +
dd               1022 drivers/infiniband/hw/hfi1/pio.c 		    is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
dd               1032 drivers/infiniband/hw/hfi1/pio.c 			dd_dev_err(dd,
dd               1036 drivers/infiniband/hw/hfi1/pio.c 			queue_work(dd->pport->link_wq,
dd               1037 drivers/infiniband/hw/hfi1/pio.c 				   &dd->pport->link_bounce_work);
dd               1046 drivers/infiniband/hw/hfi1/pio.c 		pause_for_credit_return(dd);
dd               1049 drivers/infiniband/hw/hfi1/pio.c void sc_wait(struct hfi1_devdata *dd)
dd               1053 drivers/infiniband/hw/hfi1/pio.c 	for (i = 0; i < dd->num_send_contexts; i++) {
dd               1054 drivers/infiniband/hw/hfi1/pio.c 		struct send_context *sc = dd->send_contexts[i].sc;
dd               1073 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd = sc->dd;
dd               1082 drivers/infiniband/hw/hfi1/pio.c 	dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
dd               1093 drivers/infiniband/hw/hfi1/pio.c 		reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
dd               1097 drivers/infiniband/hw/hfi1/pio.c 			dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
dd               1123 drivers/infiniband/hw/hfi1/pio.c 				dd_dev_err(dd,
dd               1158 drivers/infiniband/hw/hfi1/pio.c void pio_freeze(struct hfi1_devdata *dd)
dd               1163 drivers/infiniband/hw/hfi1/pio.c 	for (i = 0; i < dd->num_send_contexts; i++) {
dd               1164 drivers/infiniband/hw/hfi1/pio.c 		sc = dd->send_contexts[i].sc;
dd               1185 drivers/infiniband/hw/hfi1/pio.c void pio_kernel_unfreeze(struct hfi1_devdata *dd)
dd               1190 drivers/infiniband/hw/hfi1/pio.c 	for (i = 0; i < dd->num_send_contexts; i++) {
dd               1191 drivers/infiniband/hw/hfi1/pio.c 		sc = dd->send_contexts[i].sc;
dd               1213 drivers/infiniband/hw/hfi1/pio.c void pio_kernel_linkup(struct hfi1_devdata *dd)
dd               1218 drivers/infiniband/hw/hfi1/pio.c 	for (i = 0; i < dd->num_send_contexts; i++) {
dd               1219 drivers/infiniband/hw/hfi1/pio.c 		sc = dd->send_contexts[i].sc;
dd               1233 drivers/infiniband/hw/hfi1/pio.c static int pio_init_wait_progress(struct hfi1_devdata *dd)
dd               1239 drivers/infiniband/hw/hfi1/pio.c 	max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
dd               1241 drivers/infiniband/hw/hfi1/pio.c 		reg = read_csr(dd, SEND_PIO_INIT_CTXT);
dd               1257 drivers/infiniband/hw/hfi1/pio.c void pio_reset_all(struct hfi1_devdata *dd)
dd               1262 drivers/infiniband/hw/hfi1/pio.c 	ret = pio_init_wait_progress(dd);
dd               1266 drivers/infiniband/hw/hfi1/pio.c 		write_csr(dd, SEND_PIO_ERR_CLEAR,
dd               1271 drivers/infiniband/hw/hfi1/pio.c 	write_csr(dd, SEND_PIO_INIT_CTXT,
dd               1274 drivers/infiniband/hw/hfi1/pio.c 	ret = pio_init_wait_progress(dd);
dd               1276 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd,
dd               1286 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd;
dd               1292 drivers/infiniband/hw/hfi1/pio.c 	dd = sc->dd;
dd               1302 drivers/infiniband/hw/hfi1/pio.c 	sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
dd               1325 drivers/infiniband/hw/hfi1/pio.c 	reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
dd               1327 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
dd               1333 drivers/infiniband/hw/hfi1/pio.c 	spin_lock(&dd->sc_init_lock);
dd               1344 drivers/infiniband/hw/hfi1/pio.c 	write_csr(dd, SEND_PIO_INIT_CTXT, pio);
dd               1350 drivers/infiniband/hw/hfi1/pio.c 	ret = pio_init_wait_progress(dd);
dd               1351 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock(&dd->sc_init_lock);
dd               1353 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd,
dd               1363 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
dd               1368 drivers/infiniband/hw/hfi1/pio.c 	read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
dd               1384 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
dd               1390 drivers/infiniband/hw/hfi1/pio.c 	read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
dd               1392 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
dd               1410 drivers/infiniband/hw/hfi1/pio.c 	dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
dd               1558 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(sc->dd, sc->hw_context,
dd               1580 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(sc->dd, sc->hw_context,
dd               1611 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_devdata *dd = sc->dd;
dd               1619 drivers/infiniband/hw/hfi1/pio.c 	if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
dd               1620 drivers/infiniband/hw/hfi1/pio.c 	    dd->send_contexts[sc->sw_index].type != SC_VL15)
dd               1758 drivers/infiniband/hw/hfi1/pio.c void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
dd               1764 drivers/infiniband/hw/hfi1/pio.c 	spin_lock(&dd->sc_lock);
dd               1765 drivers/infiniband/hw/hfi1/pio.c 	sw_index = dd->hw_to_sw[hw_context];
dd               1766 drivers/infiniband/hw/hfi1/pio.c 	if (unlikely(sw_index >= dd->num_send_contexts)) {
dd               1767 drivers/infiniband/hw/hfi1/pio.c 		dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
dd               1771 drivers/infiniband/hw/hfi1/pio.c 	sc = dd->send_contexts[sw_index].sc;
dd               1778 drivers/infiniband/hw/hfi1/pio.c 		sw_index = dd->hw_to_sw[gc];
dd               1779 drivers/infiniband/hw/hfi1/pio.c 		if (unlikely(sw_index >= dd->num_send_contexts)) {
dd               1780 drivers/infiniband/hw/hfi1/pio.c 			dd_dev_err(dd,
dd               1785 drivers/infiniband/hw/hfi1/pio.c 		sc_release_update(dd->send_contexts[sw_index].sc);
dd               1788 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock(&dd->sc_lock);
dd               1800 drivers/infiniband/hw/hfi1/pio.c struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
dd               1818 drivers/infiniband/hw/hfi1/pio.c 	m = rcu_dereference(dd->pio_map);
dd               1821 drivers/infiniband/hw/hfi1/pio.c 		return dd->vld[0].sc;
dd               1828 drivers/infiniband/hw/hfi1/pio.c 	rval = !rval ? dd->vld[0].sc : rval;
dd               1840 drivers/infiniband/hw/hfi1/pio.c struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
dd               1843 drivers/infiniband/hw/hfi1/pio.c 	u8 vl = sc_to_vlt(dd, sc5);
dd               1845 drivers/infiniband/hw/hfi1/pio.c 	return pio_select_send_context_vl(dd, selector, vl);
dd               1873 drivers/infiniband/hw/hfi1/pio.c static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
dd               1877 drivers/infiniband/hw/hfi1/pio.c 	thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
dd               1879 drivers/infiniband/hw/hfi1/pio.c 		    sc_mtu_to_threshold(dd->kernel_send_context[scontext],
dd               1880 drivers/infiniband/hw/hfi1/pio.c 					dd->vld[i].mtu,
dd               1881 drivers/infiniband/hw/hfi1/pio.c 					dd->rcd[0]->rcvhdrqentsize));
dd               1882 drivers/infiniband/hw/hfi1/pio.c 	sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
dd               1913 drivers/infiniband/hw/hfi1/pio.c int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
dd               1923 drivers/infiniband/hw/hfi1/pio.c 		for (i = 0; i < dd->num_send_contexts; i++)
dd               1924 drivers/infiniband/hw/hfi1/pio.c 			if (dd->send_contexts[i].type == SC_KERNEL)
dd               1965 drivers/infiniband/hw/hfi1/pio.c 				if (dd->kernel_send_context[scontext]) {
dd               1967 drivers/infiniband/hw/hfi1/pio.c 					dd->kernel_send_context[scontext];
dd               1968 drivers/infiniband/hw/hfi1/pio.c 					set_threshold(dd, scontext, i);
dd               1982 drivers/infiniband/hw/hfi1/pio.c 	spin_lock_irq(&dd->pio_map_lock);
dd               1983 drivers/infiniband/hw/hfi1/pio.c 	oldmap = rcu_dereference_protected(dd->pio_map,
dd               1984 drivers/infiniband/hw/hfi1/pio.c 					   lockdep_is_held(&dd->pio_map_lock));
dd               1987 drivers/infiniband/hw/hfi1/pio.c 	rcu_assign_pointer(dd->pio_map, newmap);
dd               1989 drivers/infiniband/hw/hfi1/pio.c 	spin_unlock_irq(&dd->pio_map_lock);
dd               2000 drivers/infiniband/hw/hfi1/pio.c void free_pio_map(struct hfi1_devdata *dd)
dd               2003 drivers/infiniband/hw/hfi1/pio.c 	if (rcu_access_pointer(dd->pio_map)) {
dd               2004 drivers/infiniband/hw/hfi1/pio.c 		spin_lock_irq(&dd->pio_map_lock);
dd               2005 drivers/infiniband/hw/hfi1/pio.c 		pio_map_free(rcu_access_pointer(dd->pio_map));
dd               2006 drivers/infiniband/hw/hfi1/pio.c 		RCU_INIT_POINTER(dd->pio_map, NULL);
dd               2007 drivers/infiniband/hw/hfi1/pio.c 		spin_unlock_irq(&dd->pio_map_lock);
dd               2010 drivers/infiniband/hw/hfi1/pio.c 	kfree(dd->kernel_send_context);
dd               2011 drivers/infiniband/hw/hfi1/pio.c 	dd->kernel_send_context = NULL;
dd               2014 drivers/infiniband/hw/hfi1/pio.c int init_pervl_scs(struct hfi1_devdata *dd)
dd               2020 drivers/infiniband/hw/hfi1/pio.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               2022 drivers/infiniband/hw/hfi1/pio.c 	dd->vld[15].sc = sc_alloc(dd, SC_VL15,
dd               2023 drivers/infiniband/hw/hfi1/pio.c 				  dd->rcd[0]->rcvhdrqentsize, dd->node);
dd               2024 drivers/infiniband/hw/hfi1/pio.c 	if (!dd->vld[15].sc)
dd               2027 drivers/infiniband/hw/hfi1/pio.c 	hfi1_init_ctxt(dd->vld[15].sc);
dd               2028 drivers/infiniband/hw/hfi1/pio.c 	dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
dd               2030 drivers/infiniband/hw/hfi1/pio.c 	dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
dd               2032 drivers/infiniband/hw/hfi1/pio.c 					       GFP_KERNEL, dd->node);
dd               2033 drivers/infiniband/hw/hfi1/pio.c 	if (!dd->kernel_send_context)
dd               2036 drivers/infiniband/hw/hfi1/pio.c 	dd->kernel_send_context[0] = dd->vld[15].sc;
dd               2046 drivers/infiniband/hw/hfi1/pio.c 		dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
dd               2047 drivers/infiniband/hw/hfi1/pio.c 					 dd->rcd[0]->rcvhdrqentsize, dd->node);
dd               2048 drivers/infiniband/hw/hfi1/pio.c 		if (!dd->vld[i].sc)
dd               2050 drivers/infiniband/hw/hfi1/pio.c 		dd->kernel_send_context[i + 1] = dd->vld[i].sc;
dd               2051 drivers/infiniband/hw/hfi1/pio.c 		hfi1_init_ctxt(dd->vld[i].sc);
dd               2053 drivers/infiniband/hw/hfi1/pio.c 		dd->vld[i].mtu = hfi1_max_mtu;
dd               2056 drivers/infiniband/hw/hfi1/pio.c 		dd->kernel_send_context[i + 1] =
dd               2057 drivers/infiniband/hw/hfi1/pio.c 		sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
dd               2058 drivers/infiniband/hw/hfi1/pio.c 		if (!dd->kernel_send_context[i + 1])
dd               2060 drivers/infiniband/hw/hfi1/pio.c 		hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
dd               2063 drivers/infiniband/hw/hfi1/pio.c 	sc_enable(dd->vld[15].sc);
dd               2064 drivers/infiniband/hw/hfi1/pio.c 	ctxt = dd->vld[15].sc->hw_context;
dd               2066 drivers/infiniband/hw/hfi1/pio.c 	write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
dd               2067 drivers/infiniband/hw/hfi1/pio.c 	dd_dev_info(dd,
dd               2069 drivers/infiniband/hw/hfi1/pio.c 		    dd->vld[15].sc->sw_index, ctxt);
dd               2072 drivers/infiniband/hw/hfi1/pio.c 		sc_enable(dd->vld[i].sc);
dd               2073 drivers/infiniband/hw/hfi1/pio.c 		ctxt = dd->vld[i].sc->hw_context;
dd               2075 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
dd               2078 drivers/infiniband/hw/hfi1/pio.c 		sc_enable(dd->kernel_send_context[i + 1]);
dd               2079 drivers/infiniband/hw/hfi1/pio.c 		ctxt = dd->kernel_send_context[i + 1]->hw_context;
dd               2081 drivers/infiniband/hw/hfi1/pio.c 		write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
dd               2084 drivers/infiniband/hw/hfi1/pio.c 	if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
dd               2090 drivers/infiniband/hw/hfi1/pio.c 		sc_free(dd->vld[i].sc);
dd               2091 drivers/infiniband/hw/hfi1/pio.c 		dd->vld[i].sc = NULL;
dd               2095 drivers/infiniband/hw/hfi1/pio.c 		sc_free(dd->kernel_send_context[i + 1]);
dd               2097 drivers/infiniband/hw/hfi1/pio.c 	kfree(dd->kernel_send_context);
dd               2098 drivers/infiniband/hw/hfi1/pio.c 	dd->kernel_send_context = NULL;
dd               2101 drivers/infiniband/hw/hfi1/pio.c 	sc_free(dd->vld[15].sc);
dd               2105 drivers/infiniband/hw/hfi1/pio.c int init_credit_return(struct hfi1_devdata *dd)
dd               2110 drivers/infiniband/hw/hfi1/pio.c 	dd->cr_base = kcalloc(
dd               2114 drivers/infiniband/hw/hfi1/pio.c 	if (!dd->cr_base) {
dd               2121 drivers/infiniband/hw/hfi1/pio.c 		set_dev_node(&dd->pcidev->dev, i);
dd               2122 drivers/infiniband/hw/hfi1/pio.c 		dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
dd               2124 drivers/infiniband/hw/hfi1/pio.c 						       &dd->cr_base[i].dma,
dd               2126 drivers/infiniband/hw/hfi1/pio.c 		if (!dd->cr_base[i].va) {
dd               2127 drivers/infiniband/hw/hfi1/pio.c 			set_dev_node(&dd->pcidev->dev, dd->node);
dd               2128 drivers/infiniband/hw/hfi1/pio.c 			dd_dev_err(dd,
dd               2135 drivers/infiniband/hw/hfi1/pio.c 	set_dev_node(&dd->pcidev->dev, dd->node);
dd               2142 drivers/infiniband/hw/hfi1/pio.c void free_credit_return(struct hfi1_devdata *dd)
dd               2146 drivers/infiniband/hw/hfi1/pio.c 	if (!dd->cr_base)
dd               2149 drivers/infiniband/hw/hfi1/pio.c 		if (dd->cr_base[i].va) {
dd               2150 drivers/infiniband/hw/hfi1/pio.c 			dma_free_coherent(&dd->pcidev->dev,
dd               2153 drivers/infiniband/hw/hfi1/pio.c 					  dd->cr_base[i].va,
dd               2154 drivers/infiniband/hw/hfi1/pio.c 					  dd->cr_base[i].dma);
dd               2157 drivers/infiniband/hw/hfi1/pio.c 	kfree(dd->cr_base);
dd               2158 drivers/infiniband/hw/hfi1/pio.c 	dd->cr_base = NULL;
dd               2177 drivers/infiniband/hw/hfi1/pio.c 	reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
dd                100 drivers/infiniband/hw/hfi1/pio.h 	struct hfi1_devdata *dd;		/* device */
dd                269 drivers/infiniband/hw/hfi1/pio.h int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
dd                271 drivers/infiniband/hw/hfi1/pio.h void free_pio_map(struct hfi1_devdata *dd);
dd                272 drivers/infiniband/hw/hfi1/pio.h struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
dd                274 drivers/infiniband/hw/hfi1/pio.h struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
dd                278 drivers/infiniband/hw/hfi1/pio.h int init_credit_return(struct hfi1_devdata *dd);
dd                279 drivers/infiniband/hw/hfi1/pio.h void free_credit_return(struct hfi1_devdata *dd);
dd                280 drivers/infiniband/hw/hfi1/pio.h int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
dd                281 drivers/infiniband/hw/hfi1/pio.h int init_send_contexts(struct hfi1_devdata *dd);
dd                282 drivers/infiniband/hw/hfi1/pio.h int init_credit_return(struct hfi1_devdata *dd);
dd                283 drivers/infiniband/hw/hfi1/pio.h int init_pervl_scs(struct hfi1_devdata *dd);
dd                284 drivers/infiniband/hw/hfi1/pio.h struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
dd                298 drivers/infiniband/hw/hfi1/pio.h void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
dd                305 drivers/infiniband/hw/hfi1/pio.h void sc_wait(struct hfi1_devdata *dd);
dd                309 drivers/infiniband/hw/hfi1/pio.h void pio_reset_all(struct hfi1_devdata *dd);
dd                310 drivers/infiniband/hw/hfi1/pio.h void pio_freeze(struct hfi1_devdata *dd);
dd                311 drivers/infiniband/hw/hfi1/pio.h void pio_kernel_unfreeze(struct hfi1_devdata *dd);
dd                312 drivers/infiniband/hw/hfi1/pio.h void pio_kernel_linkup(struct hfi1_devdata *dd);
dd                323 drivers/infiniband/hw/hfi1/pio.h void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
dd                324 drivers/infiniband/hw/hfi1/pio.h void pio_send_control(struct hfi1_devdata *dd, int op);
dd                327 drivers/infiniband/hw/hfi1/pio.h void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
dd                 71 drivers/infiniband/hw/hfi1/pio_copy.c void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
dd                 56 drivers/infiniband/hw/hfi1/platform.c static int validate_scratch_checksum(struct hfi1_devdata *dd)
dd                 61 drivers/infiniband/hw/hfi1/platform.c 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
dd                 66 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
dd                 67 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_err(dd,
dd                 80 drivers/infiniband/hw/hfi1/platform.c 		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
dd                 90 drivers/infiniband/hw/hfi1/platform.c 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
dd                 97 drivers/infiniband/hw/hfi1/platform.c 	dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
dd                101 drivers/infiniband/hw/hfi1/platform.c static void save_platform_config_fields(struct hfi1_devdata *dd)
dd                103 drivers/infiniband/hw/hfi1/platform.c 	struct hfi1_pportdata *ppd = dd->pport;
dd                106 drivers/infiniband/hw/hfi1/platform.c 	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
dd                109 drivers/infiniband/hw/hfi1/platform.c 		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
dd                112 drivers/infiniband/hw/hfi1/platform.c 			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
dd                116 drivers/infiniband/hw/hfi1/platform.c 		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
dd                119 drivers/infiniband/hw/hfi1/platform.c 			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
dd                123 drivers/infiniband/hw/hfi1/platform.c 		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
dd                126 drivers/infiniband/hw/hfi1/platform.c 			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
dd                130 drivers/infiniband/hw/hfi1/platform.c 		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
dd                133 drivers/infiniband/hw/hfi1/platform.c 			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
dd                136 drivers/infiniband/hw/hfi1/platform.c 	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
dd                149 drivers/infiniband/hw/hfi1/platform.c void get_platform_config(struct hfi1_devdata *dd)
dd                156 drivers/infiniband/hw/hfi1/platform.c 	if (is_integrated(dd)) {
dd                157 drivers/infiniband/hw/hfi1/platform.c 		if (validate_scratch_checksum(dd)) {
dd                158 drivers/infiniband/hw/hfi1/platform.c 			save_platform_config_fields(dd);
dd                162 drivers/infiniband/hw/hfi1/platform.c 		ret = eprom_read_platform_config(dd,
dd                167 drivers/infiniband/hw/hfi1/platform.c 			dd->platform_config.data = temp_platform_config;
dd                168 drivers/infiniband/hw/hfi1/platform.c 			dd->platform_config.size = esize;
dd                172 drivers/infiniband/hw/hfi1/platform.c 	dd_dev_err(dd,
dd                178 drivers/infiniband/hw/hfi1/platform.c 			       &dd->pcidev->dev);
dd                180 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_err(dd,
dd                191 drivers/infiniband/hw/hfi1/platform.c 	dd->platform_config.data = kmemdup(platform_config_file->data,
dd                194 drivers/infiniband/hw/hfi1/platform.c 	dd->platform_config.size = platform_config_file->size;
dd                198 drivers/infiniband/hw/hfi1/platform.c void free_platform_config(struct hfi1_devdata *dd)
dd                201 drivers/infiniband/hw/hfi1/platform.c 	kfree(dd->platform_config.data);
dd                202 drivers/infiniband/hw/hfi1/platform.c 	dd->platform_config.data = NULL;
dd                210 drivers/infiniband/hw/hfi1/platform.c 	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                225 drivers/infiniband/hw/hfi1/platform.c 	ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
dd                242 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
dd                256 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                282 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                304 drivers/infiniband/hw/hfi1/platform.c 		ret = qsfp_write(ppd, ppd->dd->hfi1_id,
dd                312 drivers/infiniband/hw/hfi1/platform.c 			ret = qsfp_write(ppd, ppd->dd->hfi1_id,
dd                347 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
dd                353 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                359 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
dd                398 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd,
dd                404 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                410 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd,
dd                437 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
dd                451 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
dd                464 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
dd                469 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                475 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
dd                481 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                486 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                494 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
dd                495 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
dd                506 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
dd                512 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                518 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
dd                524 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                529 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                538 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
dd                539 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
dd                549 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_info(ppd->dd,
dd                571 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_info(ppd->dd,
dd                577 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_info(ppd->dd,
dd                583 drivers/infiniband/hw/hfi1/platform.c 	get_platform_config_field(ppd->dd,
dd                590 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_info(ppd->dd,
dd                595 drivers/infiniband/hw/hfi1/platform.c 	get_platform_config_field(ppd->dd,
dd                601 drivers/infiniband/hw/hfi1/platform.c 	dd_dev_info(ppd->dd,
dd                619 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
dd                623 drivers/infiniband/hw/hfi1/platform.c 	dd_dev_info(ppd->dd,
dd                627 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
dd                628 drivers/infiniband/hw/hfi1/platform.c 	qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
dd                640 drivers/infiniband/hw/hfi1/platform.c 		ret = load_8051_config(ppd->dd, field_id, i, config_data);
dd                643 drivers/infiniband/hw/hfi1/platform.c 				ppd->dd,
dd                692 drivers/infiniband/hw/hfi1/platform.c 	read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
dd                696 drivers/infiniband/hw/hfi1/platform.c 	ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
dd                699 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
dd                714 drivers/infiniband/hw/hfi1/platform.c 		ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
dd                719 drivers/infiniband/hw/hfi1/platform.c 		ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
dd                722 drivers/infiniband/hw/hfi1/platform.c 			dd_dev_err(ppd->dd,
dd                729 drivers/infiniband/hw/hfi1/platform.c 			dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
dd                736 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
dd                741 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
dd                746 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
dd                808 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                818 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                829 drivers/infiniband/hw/hfi1/platform.c 		ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                838 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                842 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                868 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd,
dd                884 drivers/infiniband/hw/hfi1/platform.c 				ppd->dd,
dd                893 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                915 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
dd                935 drivers/infiniband/hw/hfi1/platform.c 	struct hfi1_devdata *dd = ppd->dd;
dd                948 drivers/infiniband/hw/hfi1/platform.c 	    ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
dd                952 drivers/infiniband/hw/hfi1/platform.c 			ret = acquire_chip_resource(ppd->dd,
dd                953 drivers/infiniband/hw/hfi1/platform.c 						    qsfp_resource(ppd->dd),
dd                956 drivers/infiniband/hw/hfi1/platform.c 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
dd                957 drivers/infiniband/hw/hfi1/platform.c 					   __func__, (int)ppd->dd->hfi1_id);
dd                962 drivers/infiniband/hw/hfi1/platform.c 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
dd                972 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
dd                978 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                982 drivers/infiniband/hw/hfi1/platform.c 			ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd                996 drivers/infiniband/hw/hfi1/platform.c 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd               1001 drivers/infiniband/hw/hfi1/platform.c 				ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
dd               1016 drivers/infiniband/hw/hfi1/platform.c 			ret = acquire_chip_resource(ppd->dd,
dd               1017 drivers/infiniband/hw/hfi1/platform.c 						    qsfp_resource(ppd->dd),
dd               1020 drivers/infiniband/hw/hfi1/platform.c 				dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
dd               1021 drivers/infiniband/hw/hfi1/platform.c 					   __func__, (int)ppd->dd->hfi1_id);
dd               1041 drivers/infiniband/hw/hfi1/platform.c 				dd_dev_err(dd,
dd               1046 drivers/infiniband/hw/hfi1/platform.c 			release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
dd               1057 drivers/infiniband/hw/hfi1/platform.c 		dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
dd                406 drivers/infiniband/hw/hfi1/platform.h void get_platform_config(struct hfi1_devdata *dd);
dd                407 drivers/infiniband/hw/hfi1/platform.h void free_platform_config(struct hfi1_devdata *dd);
dd                221 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                230 drivers/infiniband/hw/hfi1/qp.c 		    dd->flags & HFI1_HAS_SEND_DMA)
dd                243 drivers/infiniband/hw/hfi1/qp.c 		    dd->flags & HFI1_HAS_SEND_DMA)
dd                324 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd;
dd                345 drivers/infiniband/hw/hfi1/qp.c 		dd = dd_from_ppd(ppd);
dd                346 drivers/infiniband/hw/hfi1/qp.c 		if (wqe->length > dd->vld[15].mtu)
dd                384 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                389 drivers/infiniband/hw/hfi1/qp.c 			       cpumask_first(cpumask_of_node(dd->node)));
dd                593 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                596 drivers/infiniband/hw/hfi1/qp.c 	if (!(dd->flags & HFI1_HAS_SEND_DMA))
dd                604 drivers/infiniband/hw/hfi1/qp.c 	sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
dd                618 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                623 drivers/infiniband/hw/hfi1/qp.c 		return dd->vld[15].sc;
dd                628 drivers/infiniband/hw/hfi1/qp.c 	return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
dd                764 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = container_of(verbs_dev,
dd                770 drivers/infiniband/hw/hfi1/qp.c 	for (n = 0; n < dd->num_pports; n++) {
dd                771 drivers/infiniband/hw/hfi1/qp.c 		struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
dd                857 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = container_of(verbs_dev,
dd                863 drivers/infiniband/hw/hfi1/qp.c 	ibp = &dd->pport[qp->port_num - 1].ibport_data;
dd                865 drivers/infiniband/hw/hfi1/qp.c 	vl = sc_to_vlt(dd, sc);
dd                869 drivers/infiniband/hw/hfi1/qp.c 		mtu = min_t(u32, mtu, dd->vld[vl].mtu);
dd                880 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_devdata *dd = container_of(verbs_dev,
dd                887 drivers/infiniband/hw/hfi1/qp.c 	if (mtu > dd->pport[pidx].ibmtu)
dd                888 drivers/infiniband/hw/hfi1/qp.c 		return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
dd                974 drivers/infiniband/hw/hfi1/qp.c 	struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
dd                 69 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = bus->controlling_dd;
dd                 74 drivers/infiniband/hw/hfi1/qsfp.c 	reg = read_csr(dd, target_oe);
dd                 85 drivers/infiniband/hw/hfi1/qsfp.c 	write_csr(dd, target_oe, reg);
dd                 87 drivers/infiniband/hw/hfi1/qsfp.c 	(void)read_csr(dd, target_oe);
dd                 93 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = bus->controlling_dd;
dd                 98 drivers/infiniband/hw/hfi1/qsfp.c 	reg = read_csr(dd, target_oe);
dd                109 drivers/infiniband/hw/hfi1/qsfp.c 	write_csr(dd, target_oe, reg);
dd                111 drivers/infiniband/hw/hfi1/qsfp.c 	(void)read_csr(dd, target_oe);
dd                146 drivers/infiniband/hw/hfi1/qsfp.c static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
dd                156 drivers/infiniband/hw/hfi1/qsfp.c 	bus->controlling_dd = dd;
dd                169 drivers/infiniband/hw/hfi1/qsfp.c 	bus->adapter.dev.parent = &dd->pcidev->dev;
dd                175 drivers/infiniband/hw/hfi1/qsfp.c 		dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
dd                188 drivers/infiniband/hw/hfi1/qsfp.c int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
dd                190 drivers/infiniband/hw/hfi1/qsfp.c 	ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
dd                191 drivers/infiniband/hw/hfi1/qsfp.c 	ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
dd                205 drivers/infiniband/hw/hfi1/qsfp.c void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
dd                215 drivers/infiniband/hw/hfi1/qsfp.c static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
dd                253 drivers/infiniband/hw/hfi1/qsfp.c 	i2c->controlling_dd = dd;
dd                256 drivers/infiniband/hw/hfi1/qsfp.c 		dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
dd                263 drivers/infiniband/hw/hfi1/qsfp.c static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
dd                301 drivers/infiniband/hw/hfi1/qsfp.c 	bus->controlling_dd = dd;
dd                304 drivers/infiniband/hw/hfi1/qsfp.c 		dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
dd                319 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = ppd->dd;
dd                324 drivers/infiniband/hw/hfi1/qsfp.c 	bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
dd                327 drivers/infiniband/hw/hfi1/qsfp.c 	return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
dd                340 drivers/infiniband/hw/hfi1/qsfp.c 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
dd                358 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = ppd->dd;
dd                363 drivers/infiniband/hw/hfi1/qsfp.c 	bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
dd                366 drivers/infiniband/hw/hfi1/qsfp.c 	return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
dd                379 drivers/infiniband/hw/hfi1/qsfp.c 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
dd                406 drivers/infiniband/hw/hfi1/qsfp.c 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
dd                421 drivers/infiniband/hw/hfi1/qsfp.c 			hfi1_dev_porterr(ppd->dd, ppd->port,
dd                456 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = ppd->dd;
dd                457 drivers/infiniband/hw/hfi1/qsfp.c 	u32 resource = qsfp_resource(dd);
dd                460 drivers/infiniband/hw/hfi1/qsfp.c 	ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
dd                464 drivers/infiniband/hw/hfi1/qsfp.c 	release_chip_resource(dd, resource);
dd                486 drivers/infiniband/hw/hfi1/qsfp.c 	if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
dd                500 drivers/infiniband/hw/hfi1/qsfp.c 			hfi1_dev_porterr(ppd->dd, ppd->port,
dd                533 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = ppd->dd;
dd                534 drivers/infiniband/hw/hfi1/qsfp.c 	u32 resource = qsfp_resource(dd);
dd                537 drivers/infiniband/hw/hfi1/qsfp.c 	ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
dd                541 drivers/infiniband/hw/hfi1/qsfp.c 	release_chip_resource(dd, resource);
dd                560 drivers/infiniband/hw/hfi1/qsfp.c 	u32 target = ppd->dd->hfi1_id;
dd                578 drivers/infiniband/hw/hfi1/qsfp.c 		dd_dev_info(ppd->dd,
dd                591 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                596 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                601 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                608 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                613 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                620 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                625 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                632 drivers/infiniband/hw/hfi1/qsfp.c 				dd_dev_info(ppd->dd, "%s failed\n", __func__);
dd                685 drivers/infiniband/hw/hfi1/qsfp.c 	struct hfi1_devdata *dd = ppd->dd;
dd                688 drivers/infiniband/hw/hfi1/qsfp.c 	reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
dd                707 drivers/infiniband/hw/hfi1/qsfp.c int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
dd                714 drivers/infiniband/hw/hfi1/qsfp.c 	if (port_num > dd->num_pports || port_num < 1) {
dd                715 drivers/infiniband/hw/hfi1/qsfp.c 		dd_dev_info(dd, "%s: Invalid port number %d\n",
dd                721 drivers/infiniband/hw/hfi1/qsfp.c 	ppd = dd->pport + (port_num - 1);
dd                766 drivers/infiniband/hw/hfi1/qsfp.c 		ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
dd                229 drivers/infiniband/hw/hfi1/qsfp.h int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr,
dd                245 drivers/infiniband/hw/hfi1/qsfp.h int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
dd                246 drivers/infiniband/hw/hfi1/qsfp.h void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
dd               1427 drivers/infiniband/hw/hfi1/rc.c 			 sc_to_vlt(ppd->dd, sc5), plen);
dd               1443 drivers/infiniband/hw/hfi1/rc.c 	ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
dd               1896 drivers/infiniband/hw/hfi1/rc.c 		if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
dd                498 drivers/infiniband/hw/hfi1/ruc.c 			this_cpu_inc(*ps->ppd->dd->send_schedule);
dd                504 drivers/infiniband/hw/hfi1/ruc.c 		this_cpu_inc(*ps->ppd->dd->send_schedule);
dd                589 drivers/infiniband/hw/hfi1/ruc.c 			cpumask_first(cpumask_of_node(ps.ppd->dd->node));
dd                291 drivers/infiniband/hw/hfi1/sdma.c 	write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
dd                298 drivers/infiniband/hw/hfi1/sdma.c 	return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
dd                309 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd                316 drivers/infiniband/hw/hfi1/sdma.c 		reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
dd                327 drivers/infiniband/hw/hfi1/sdma.c 			dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
dd                329 drivers/infiniband/hw/hfi1/sdma.c 			queue_work(dd->pport->link_wq,
dd                330 drivers/infiniband/hw/hfi1/sdma.c 				   &dd->pport->link_bounce_work);
dd                341 drivers/infiniband/hw/hfi1/sdma.c void sdma_wait(struct hfi1_devdata *dd)
dd                345 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; i++) {
dd                346 drivers/infiniband/hw/hfi1/sdma.c 		struct sdma_engine *sde = &dd->per_sdma[i];
dd                356 drivers/infiniband/hw/hfi1/sdma.c 	if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
dd                375 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd, "expected %llu got %llu\n",
dd                379 drivers/infiniband/hw/hfi1/sdma.c 	__sdma_txclean(sde->dd, tx);
dd                474 drivers/infiniband/hw/hfi1/sdma.c 			dd_dev_err(sde->dd,
dd                491 drivers/infiniband/hw/hfi1/sdma.c 	if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
dd                493 drivers/infiniband/hw/hfi1/sdma.c 		struct hfi1_devdata *dd = sde->dd;
dd                495 drivers/infiniband/hw/hfi1/sdma.c 		for (index = 0; index < dd->num_sdma; index++) {
dd                496 drivers/infiniband/hw/hfi1/sdma.c 			struct sdma_engine *curr_sdma = &dd->per_sdma[index];
dd                502 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd,
dd                514 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "SDE progress check event\n");
dd                515 drivers/infiniband/hw/hfi1/sdma.c 	for (index = 0; index < sde->dd->num_sdma; index++) {
dd                516 drivers/infiniband/hw/hfi1/sdma.c 		struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
dd                555 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
dd                662 drivers/infiniband/hw/hfi1/sdma.c 	atomic_set(&sde->dd->sdma_unfreeze_count, -1);
dd                663 drivers/infiniband/hw/hfi1/sdma.c 	wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
dd                751 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd                759 drivers/infiniband/hw/hfi1/sdma.c 	m = rcu_dereference(dd->sdma_map);
dd                781 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd,
dd                799 drivers/infiniband/hw/hfi1/sdma.c 	m = rcu_dereference(dd->sdma_map);
dd                802 drivers/infiniband/hw/hfi1/sdma.c 		return &dd->per_sdma[0];
dd                809 drivers/infiniband/hw/hfi1/sdma.c 	rval =  !rval ? &dd->per_sdma[0] : rval;
dd                810 drivers/infiniband/hw/hfi1/sdma.c 	trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
dd                824 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd,
dd                828 drivers/infiniband/hw/hfi1/sdma.c 	u8 vl = sc_to_vlt(dd, sc5);
dd                830 drivers/infiniband/hw/hfi1/sdma.c 	return sdma_select_engine_vl(dd, selector, vl);
dd                868 drivers/infiniband/hw/hfi1/sdma.c struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
dd                884 drivers/infiniband/hw/hfi1/sdma.c 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
dd                898 drivers/infiniband/hw/hfi1/sdma.c 	return sdma_select_engine_vl(dd, selector, vl);
dd                936 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd                960 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_warn(sde->dd, "Invalid CPU mask\n");
dd                977 drivers/infiniband/hw/hfi1/sdma.c 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
dd                997 drivers/infiniband/hw/hfi1/sdma.c 			ret = rhashtable_insert_fast(dd->sdma_rht,
dd               1003 drivers/infiniband/hw/hfi1/sdma.c 				dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
dd               1040 drivers/infiniband/hw/hfi1/sdma.c 		rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
dd               1064 drivers/infiniband/hw/hfi1/sdma.c 				ret = rhashtable_remove_fast(dd->sdma_rht,
dd               1117 drivers/infiniband/hw/hfi1/sdma.c 				struct hfi1_devdata *dd,
dd               1123 drivers/infiniband/hw/hfi1/sdma.c 	rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
dd               1200 drivers/infiniband/hw/hfi1/sdma.c int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
dd               1208 drivers/infiniband/hw/hfi1/sdma.c 	if (!(dd->flags & HFI1_HAS_SEND_DMA))
dd               1213 drivers/infiniband/hw/hfi1/sdma.c 		sde_per_vl = dd->num_sdma / num_vls;
dd               1215 drivers/infiniband/hw/hfi1/sdma.c 		extra = dd->num_sdma % num_vls;
dd               1253 drivers/infiniband/hw/hfi1/sdma.c 					&dd->per_sdma[engine];
dd               1268 drivers/infiniband/hw/hfi1/sdma.c 	spin_lock_irq(&dd->sde_map_lock);
dd               1269 drivers/infiniband/hw/hfi1/sdma.c 	oldmap = rcu_dereference_protected(dd->sdma_map,
dd               1270 drivers/infiniband/hw/hfi1/sdma.c 					   lockdep_is_held(&dd->sde_map_lock));
dd               1273 drivers/infiniband/hw/hfi1/sdma.c 	rcu_assign_pointer(dd->sdma_map, newmap);
dd               1275 drivers/infiniband/hw/hfi1/sdma.c 	spin_unlock_irq(&dd->sde_map_lock);
dd               1294 drivers/infiniband/hw/hfi1/sdma.c void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
dd               1299 drivers/infiniband/hw/hfi1/sdma.c 	if (dd->sdma_pad_dma) {
dd               1300 drivers/infiniband/hw/hfi1/sdma.c 		dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
dd               1301 drivers/infiniband/hw/hfi1/sdma.c 				  (void *)dd->sdma_pad_dma,
dd               1302 drivers/infiniband/hw/hfi1/sdma.c 				  dd->sdma_pad_phys);
dd               1303 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_pad_dma = NULL;
dd               1304 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_pad_phys = 0;
dd               1306 drivers/infiniband/hw/hfi1/sdma.c 	if (dd->sdma_heads_dma) {
dd               1307 drivers/infiniband/hw/hfi1/sdma.c 		dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
dd               1308 drivers/infiniband/hw/hfi1/sdma.c 				  (void *)dd->sdma_heads_dma,
dd               1309 drivers/infiniband/hw/hfi1/sdma.c 				  dd->sdma_heads_phys);
dd               1310 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_heads_dma = NULL;
dd               1311 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_heads_phys = 0;
dd               1313 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; dd->per_sdma && i < num_engines; ++i) {
dd               1314 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[i];
dd               1321 drivers/infiniband/hw/hfi1/sdma.c 				&dd->pcidev->dev,
dd               1332 drivers/infiniband/hw/hfi1/sdma.c 	spin_lock_irq(&dd->sde_map_lock);
dd               1333 drivers/infiniband/hw/hfi1/sdma.c 	sdma_map_free(rcu_access_pointer(dd->sdma_map));
dd               1334 drivers/infiniband/hw/hfi1/sdma.c 	RCU_INIT_POINTER(dd->sdma_map, NULL);
dd               1335 drivers/infiniband/hw/hfi1/sdma.c 	spin_unlock_irq(&dd->sde_map_lock);
dd               1337 drivers/infiniband/hw/hfi1/sdma.c 	kfree(dd->per_sdma);
dd               1338 drivers/infiniband/hw/hfi1/sdma.c 	dd->per_sdma = NULL;
dd               1340 drivers/infiniband/hw/hfi1/sdma.c 	if (dd->sdma_rht) {
dd               1341 drivers/infiniband/hw/hfi1/sdma.c 		rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
dd               1342 drivers/infiniband/hw/hfi1/sdma.c 		kfree(dd->sdma_rht);
dd               1343 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_rht = NULL;
dd               1358 drivers/infiniband/hw/hfi1/sdma.c int sdma_init(struct hfi1_devdata *dd, u8 port)
dd               1365 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_pportdata *ppd = dd->pport + port;
dd               1368 drivers/infiniband/hw/hfi1/sdma.c 	size_t num_engines = chip_sdma_engines(dd);
dd               1377 drivers/infiniband/hw/hfi1/sdma.c 	    mod_num_sdma <= chip_sdma_engines(dd) &&
dd               1382 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
dd               1383 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
dd               1384 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
dd               1385 drivers/infiniband/hw/hfi1/sdma.c 		    chip_sdma_mem_size(dd));
dd               1388 drivers/infiniband/hw/hfi1/sdma.c 		chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
dd               1391 drivers/infiniband/hw/hfi1/sdma.c 	init_waitqueue_head(&dd->sdma_unfreeze_wq);
dd               1392 drivers/infiniband/hw/hfi1/sdma.c 	atomic_set(&dd->sdma_unfreeze_count, 0);
dd               1395 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
dd               1399 drivers/infiniband/hw/hfi1/sdma.c 	dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
dd               1400 drivers/infiniband/hw/hfi1/sdma.c 				    GFP_KERNEL, dd->node);
dd               1401 drivers/infiniband/hw/hfi1/sdma.c 	if (!dd->per_sdma)
dd               1404 drivers/infiniband/hw/hfi1/sdma.c 	idle_cnt = ns_to_cclock(dd, idle_cnt);
dd               1406 drivers/infiniband/hw/hfi1/sdma.c 		dd->default_desc1 =
dd               1409 drivers/infiniband/hw/hfi1/sdma.c 		dd->default_desc1 =
dd               1417 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[this_idx];
dd               1418 drivers/infiniband/hw/hfi1/sdma.c 		sde->dd = dd;
dd               1455 drivers/infiniband/hw/hfi1/sdma.c 			get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
dd               1470 drivers/infiniband/hw/hfi1/sdma.c 		sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
dd               1478 drivers/infiniband/hw/hfi1/sdma.c 				      GFP_KERNEL, dd->node);
dd               1483 drivers/infiniband/hw/hfi1/sdma.c 	dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
dd               1485 drivers/infiniband/hw/hfi1/sdma.c 	dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
dd               1486 drivers/infiniband/hw/hfi1/sdma.c 						dd->sdma_heads_size,
dd               1487 drivers/infiniband/hw/hfi1/sdma.c 						&dd->sdma_heads_phys,
dd               1489 drivers/infiniband/hw/hfi1/sdma.c 	if (!dd->sdma_heads_dma) {
dd               1490 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
dd               1495 drivers/infiniband/hw/hfi1/sdma.c 	dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
dd               1496 drivers/infiniband/hw/hfi1/sdma.c 					      &dd->sdma_pad_phys, GFP_KERNEL);
dd               1497 drivers/infiniband/hw/hfi1/sdma.c 	if (!dd->sdma_pad_dma) {
dd               1498 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
dd               1503 drivers/infiniband/hw/hfi1/sdma.c 	curr_head = (void *)dd->sdma_heads_dma;
dd               1507 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[this_idx];
dd               1512 drivers/infiniband/hw/hfi1/sdma.c 			      (unsigned long)dd->sdma_heads_dma;
dd               1513 drivers/infiniband/hw/hfi1/sdma.c 		sde->head_phys = dd->sdma_heads_phys + phys_offset;
dd               1516 drivers/infiniband/hw/hfi1/sdma.c 	dd->flags |= HFI1_HAS_SEND_DMA;
dd               1517 drivers/infiniband/hw/hfi1/sdma.c 	dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
dd               1518 drivers/infiniband/hw/hfi1/sdma.c 	dd->num_sdma = num_engines;
dd               1519 drivers/infiniband/hw/hfi1/sdma.c 	ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
dd               1535 drivers/infiniband/hw/hfi1/sdma.c 	dd->sdma_rht = tmp_sdma_rht;
dd               1537 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
dd               1541 drivers/infiniband/hw/hfi1/sdma.c 	sdma_clean(dd, num_engines);
dd               1551 drivers/infiniband/hw/hfi1/sdma.c void sdma_all_running(struct hfi1_devdata *dd)
dd               1557 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; ++i) {
dd               1558 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[i];
dd               1569 drivers/infiniband/hw/hfi1/sdma.c void sdma_all_idle(struct hfi1_devdata *dd)
dd               1575 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; ++i) {
dd               1576 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[i];
dd               1589 drivers/infiniband/hw/hfi1/sdma.c void sdma_start(struct hfi1_devdata *dd)
dd               1595 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; ++i) {
dd               1596 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[i];
dd               1605 drivers/infiniband/hw/hfi1/sdma.c void sdma_exit(struct hfi1_devdata *dd)
dd               1610 drivers/infiniband/hw/hfi1/sdma.c 	for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
dd               1612 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[this_idx];
dd               1614 drivers/infiniband/hw/hfi1/sdma.c 			dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
dd               1633 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd,
dd               1639 drivers/infiniband/hw/hfi1/sdma.c 			&dd->pcidev->dev,
dd               1646 drivers/infiniband/hw/hfi1/sdma.c 			&dd->pcidev->dev,
dd               1676 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd,
dd               1685 drivers/infiniband/hw/hfi1/sdma.c 		sdma_unmap_desc(dd, &tx->descp[0]);
dd               1690 drivers/infiniband/hw/hfi1/sdma.c 			sdma_unmap_desc(dd, &tx->descp[i]);
dd               1704 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd               1709 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
dd               1715 drivers/infiniband/hw/hfi1/sdma.c 					(dd->flags & HFI1_HAS_SDMA_TIMEOUT);
dd               1743 drivers/infiniband/hw/hfi1/sdma.c 			dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
dd               1772 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
dd               1774 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "avail: %u\n", avail);
dd               1920 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
dd               1930 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd,
dd               1948 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
dd               1993 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
dd               2025 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
dd               2045 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd               2048 drivers/infiniband/hw/hfi1/sdma.c 		      hfi1_pkt_base_sdma_integrity(dd));
dd               2058 drivers/infiniband/hw/hfi1/sdma.c 	struct hfi1_devdata *dd = sde->dd;
dd               2060 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
dd               2086 drivers/infiniband/hw/hfi1/sdma.c 		csr = read_csr(sde->dd, reg); \
dd               2087 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd, "%36s     0x%016llx\n", #reg, csr); \
dd               2092 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
dd               2097 drivers/infiniband/hw/hfi1/sdma.c 		csr = read_csr(sde->dd, reg + (8 * i)); \
dd               2098 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd, "%33s_%02u     0x%016llx\n", \
dd               2156 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd,
dd               2179 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd,
dd               2182 drivers/infiniband/hw/hfi1/sdma.c 		dd_dev_err(sde->dd,
dd               2186 drivers/infiniband/hw/hfi1/sdma.c 			dd_dev_err(sde->dd,
dd               2568 drivers/infiniband/hw/hfi1/sdma.c 	dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
dd               2732 drivers/infiniband/hw/hfi1/sdma.c 			atomic_dec(&sde->dd->sdma_unfreeze_count);
dd               2733 drivers/infiniband/hw/hfi1/sdma.c 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
dd               2962 drivers/infiniband/hw/hfi1/sdma.c 			atomic_dec(&sde->dd->sdma_unfreeze_count);
dd               2963 drivers/infiniband/hw/hfi1/sdma.c 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
dd               3027 drivers/infiniband/hw/hfi1/sdma.c 			atomic_dec(&sde->dd->sdma_unfreeze_count);
dd               3028 drivers/infiniband/hw/hfi1/sdma.c 			wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
dd               3056 drivers/infiniband/hw/hfi1/sdma.c static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
dd               3093 drivers/infiniband/hw/hfi1/sdma.c 	__sdma_txclean(dd, tx);
dd               3113 drivers/infiniband/hw/hfi1/sdma.c int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
dd               3120 drivers/infiniband/hw/hfi1/sdma.c 	rval = _extend_sdma_tx_descs(dd, tx);
dd               3122 drivers/infiniband/hw/hfi1/sdma.c 		__sdma_txclean(dd, tx);
dd               3129 drivers/infiniband/hw/hfi1/sdma.c 			__sdma_txclean(dd, tx);
dd               3137 drivers/infiniband/hw/hfi1/sdma.c 			__sdma_txclean(dd, tx);
dd               3161 drivers/infiniband/hw/hfi1/sdma.c 		addr = dma_map_single(&dd->pcidev->dev,
dd               3166 drivers/infiniband/hw/hfi1/sdma.c 		if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
dd               3167 drivers/infiniband/hw/hfi1/sdma.c 			__sdma_txclean(dd, tx);
dd               3173 drivers/infiniband/hw/hfi1/sdma.c 		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
dd               3181 drivers/infiniband/hw/hfi1/sdma.c void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
dd               3192 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; i++) {
dd               3195 drivers/infiniband/hw/hfi1/sdma.c 		sde = &dd->per_sdma[i];
dd               3201 drivers/infiniband/hw/hfi1/sdma.c int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
dd               3207 drivers/infiniband/hw/hfi1/sdma.c 		rval = _extend_sdma_tx_descs(dd, tx);
dd               3209 drivers/infiniband/hw/hfi1/sdma.c 			__sdma_txclean(dd, tx);
dd               3217 drivers/infiniband/hw/hfi1/sdma.c 		dd->sdma_pad_phys,
dd               3219 drivers/infiniband/hw/hfi1/sdma.c 	_sdma_close_tx(dd, tx);
dd               3339 drivers/infiniband/hw/hfi1/sdma.c void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
dd               3346 drivers/infiniband/hw/hfi1/sdma.c 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
dd               3349 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; i++)
dd               3350 drivers/infiniband/hw/hfi1/sdma.c 		sdma_process_event(&dd->per_sdma[i], event);
dd               3359 drivers/infiniband/hw/hfi1/sdma.c void sdma_freeze(struct hfi1_devdata *dd)
dd               3368 drivers/infiniband/hw/hfi1/sdma.c 	ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
dd               3369 drivers/infiniband/hw/hfi1/sdma.c 				       atomic_read(&dd->sdma_unfreeze_count) <=
dd               3372 drivers/infiniband/hw/hfi1/sdma.c 	if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
dd               3376 drivers/infiniband/hw/hfi1/sdma.c 	atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
dd               3379 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; i++)
dd               3380 drivers/infiniband/hw/hfi1/sdma.c 		sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
dd               3387 drivers/infiniband/hw/hfi1/sdma.c 	(void)wait_event_interruptible(dd->sdma_unfreeze_wq,
dd               3388 drivers/infiniband/hw/hfi1/sdma.c 				atomic_read(&dd->sdma_unfreeze_count) <= 0);
dd               3400 drivers/infiniband/hw/hfi1/sdma.c void sdma_unfreeze(struct hfi1_devdata *dd)
dd               3405 drivers/infiniband/hw/hfi1/sdma.c 	for (i = 0; i < dd->num_sdma; i++)
dd               3406 drivers/infiniband/hw/hfi1/sdma.c 		sdma_process_event(&dd->per_sdma[i],
dd               3420 drivers/infiniband/hw/hfi1/sdma.c 	write_csr(sde->dd,
dd                313 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd;
dd                412 drivers/infiniband/hw/hfi1/sdma.h int sdma_init(struct hfi1_devdata *dd, u8 port);
dd                413 drivers/infiniband/hw/hfi1/sdma.h void sdma_start(struct hfi1_devdata *dd);
dd                414 drivers/infiniband/hw/hfi1/sdma.h void sdma_exit(struct hfi1_devdata *dd);
dd                415 drivers/infiniband/hw/hfi1/sdma.h void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
dd                416 drivers/infiniband/hw/hfi1/sdma.h void sdma_all_running(struct hfi1_devdata *dd);
dd                417 drivers/infiniband/hw/hfi1/sdma.h void sdma_all_idle(struct hfi1_devdata *dd);
dd                418 drivers/infiniband/hw/hfi1/sdma.h void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
dd                419 drivers/infiniband/hw/hfi1/sdma.h void sdma_freeze(struct hfi1_devdata *dd);
dd                420 drivers/infiniband/hw/hfi1/sdma.h void sdma_unfreeze(struct hfi1_devdata *dd);
dd                421 drivers/infiniband/hw/hfi1/sdma.h void sdma_wait(struct hfi1_devdata *dd);
dd                659 drivers/infiniband/hw/hfi1/sdma.h int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
dd                665 drivers/infiniband/hw/hfi1/sdma.h static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
dd                668 drivers/infiniband/hw/hfi1/sdma.h 		__sdma_txclean(dd, tx);
dd                672 drivers/infiniband/hw/hfi1/sdma.h static inline void _sdma_close_tx(struct hfi1_devdata *dd,
dd                678 drivers/infiniband/hw/hfi1/sdma.h 		dd->default_desc1;
dd                686 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd                703 drivers/infiniband/hw/hfi1/sdma.h 			rval = _pad_sdma_tx_descs(dd, tx);
dd                707 drivers/infiniband/hw/hfi1/sdma.h 			_sdma_close_tx(dd, tx);
dd                731 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd                741 drivers/infiniband/hw/hfi1/sdma.h 		rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
dd                748 drivers/infiniband/hw/hfi1/sdma.h 		       &dd->pcidev->dev,
dd                754 drivers/infiniband/hw/hfi1/sdma.h 	if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
dd                755 drivers/infiniband/hw/hfi1/sdma.h 		__sdma_txclean(dd, tx);
dd                760 drivers/infiniband/hw/hfi1/sdma.h 			dd, SDMA_MAP_PAGE, tx, addr, len);
dd                780 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd                788 drivers/infiniband/hw/hfi1/sdma.h 		rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
dd                794 drivers/infiniband/hw/hfi1/sdma.h 	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
dd                814 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd                823 drivers/infiniband/hw/hfi1/sdma.h 		rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
dd                830 drivers/infiniband/hw/hfi1/sdma.h 		       &dd->pcidev->dev,
dd                835 drivers/infiniband/hw/hfi1/sdma.h 	if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
dd                836 drivers/infiniband/hw/hfi1/sdma.h 		__sdma_txclean(dd, tx);
dd                841 drivers/infiniband/hw/hfi1/sdma.h 			dd, SDMA_MAP_SINGLE, tx, addr, len);
dd                923 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_pportdata *ppd = sde->dd->pport;
dd               1031 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd               1055 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd               1060 drivers/infiniband/hw/hfi1/sdma.h 	struct hfi1_devdata *dd,
dd               1064 drivers/infiniband/hw/hfi1/sdma.h struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
dd               1071 drivers/infiniband/hw/hfi1/sdma.h void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
dd               1091 drivers/infiniband/hw/hfi1/sdma.h void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
dd                297 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = ppd->dd;
dd                299 drivers/infiniband/hw/hfi1/sysfs.c 	return sprintf(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
dd                476 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = ppd->dd;
dd                478 drivers/infiniband/hw/hfi1/sysfs.c 	return sprintf(buf, "%u\n", dd->vld[vlattr->vl].mtu);
dd                512 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                515 drivers/infiniband/hw/hfi1/sysfs.c 	if (!dd->boardname)
dd                518 drivers/infiniband/hw/hfi1/sysfs.c 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
dd                528 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                531 drivers/infiniband/hw/hfi1/sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
dd                540 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                549 drivers/infiniband/hw/hfi1/sysfs.c 			 min(dd->num_user_contexts,
dd                550 drivers/infiniband/hw/hfi1/sysfs.c 			     (u32)dd->sc_sizes[SC_USER].count));
dd                559 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                562 drivers/infiniband/hw/hfi1/sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
dd                571 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                573 drivers/infiniband/hw/hfi1/sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
dd                583 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                586 drivers/infiniband/hw/hfi1/sysfs.c 	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
dd                591 drivers/infiniband/hw/hfi1/sysfs.c 	ret = hfi1_reset_device(dd->unit);
dd                613 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                617 drivers/infiniband/hw/hfi1/sysfs.c 	ret = hfi1_tempsense_rd(dd, &temp);
dd                660 drivers/infiniband/hw/hfi1/sysfs.c 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd                663 drivers/infiniband/hw/hfi1/sysfs.c 	if (!port_num || port_num > dd->num_pports) {
dd                664 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                669 drivers/infiniband/hw/hfi1/sysfs.c 	ppd = &dd->pport[port_num - 1];
dd                674 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                688 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                698 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                708 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                718 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                726 drivers/infiniband/hw/hfi1/sysfs.c 		dd_dev_err(dd,
dd                732 drivers/infiniband/hw/hfi1/sysfs.c 	dd_dev_info(dd,
dd                836 drivers/infiniband/hw/hfi1/sysfs.c int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
dd                838 drivers/infiniband/hw/hfi1/sysfs.c 	struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
dd                842 drivers/infiniband/hw/hfi1/sysfs.c 	for (i = 0; i < dd->num_sdma; i++) {
dd                843 drivers/infiniband/hw/hfi1/sysfs.c 		ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
dd                850 drivers/infiniband/hw/hfi1/sysfs.c 			ret = sysfs_create_file(&dd->per_sdma[i].kobj,
dd                865 drivers/infiniband/hw/hfi1/sysfs.c 		kobject_put(&dd->per_sdma[i].kobj);
dd                873 drivers/infiniband/hw/hfi1/sysfs.c void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
dd                879 drivers/infiniband/hw/hfi1/sysfs.c 	for (i = 0; i < dd->num_sdma; i++)
dd                880 drivers/infiniband/hw/hfi1/sysfs.c 		kobject_put(&dd->per_sdma[i].kobj);
dd                882 drivers/infiniband/hw/hfi1/sysfs.c 	for (i = 0; i < dd->num_pports; i++) {
dd                883 drivers/infiniband/hw/hfi1/sysfs.c 		ppd = &dd->pport[i];
dd                127 drivers/infiniband/hw/hfi1/tid_rdma.c static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
dd                306 drivers/infiniband/hw/hfi1/tid_rdma.c 	hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
dd                325 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = container_of(verbs_dev,
dd                333 drivers/infiniband/hw/hfi1/tid_rdma.c 		ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
dd                334 drivers/infiniband/hw/hfi1/tid_rdma.c 	return dd->rcd[ctxt];
dd                368 drivers/infiniband/hw/hfi1/tid_rdma.c 		struct hfi1_devdata *dd = qpriv->rcd->dd;
dd                372 drivers/infiniband/hw/hfi1/tid_rdma.c 					    GFP_KERNEL, dd->node);
dd                380 drivers/infiniband/hw/hfi1/tid_rdma.c 					    dd->node);
dd                392 drivers/infiniband/hw/hfi1/tid_rdma.c 					    dd->node);
dd                571 drivers/infiniband/hw/hfi1/tid_rdma.c 		rcd->dd->verbs_dev.n_tidwait++;
dd                611 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd;
dd                620 drivers/infiniband/hw/hfi1/tid_rdma.c 	dd = dd_from_ibdev(qp->ibqp.device);
dd                624 drivers/infiniband/hw/hfi1/tid_rdma.c 			     cpumask_first(cpumask_of_node(dd->node)),
dd                745 drivers/infiniband/hw/hfi1/tid_rdma.c 	write_uctxt_csr(rcd->dd, rcd->ctxt,
dd               1117 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd;
dd               1121 drivers/infiniband/hw/hfi1/tid_rdma.c 	dd = flow->req->rcd->dd;
dd               1125 drivers/infiniband/hw/hfi1/tid_rdma.c 			dma_unmap_page(&dd->pcidev->dev,
dd               1137 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = flow->req->rcd->dd;
dd               1143 drivers/infiniband/hw/hfi1/tid_rdma.c 			pset->addr = dma_map_page(&dd->pcidev->dev,
dd               1149 drivers/infiniband/hw/hfi1/tid_rdma.c 			if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
dd               1206 drivers/infiniband/hw/hfi1/tid_rdma.c 		dd_dev_err(rcd->dd,
dd               1233 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = rcd->dd;
dd               1239 drivers/infiniband/hw/hfi1/tid_rdma.c 	ngroups = flow->npagesets / dd->rcv_entries.group_size;
dd               1297 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = rcd->dd;
dd               1309 drivers/infiniband/hw/hfi1/tid_rdma.c 			rcv_array_wc_fill(dd, rcventry);
dd               1314 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_put_tid(dd, rcventry, PT_EXPECTED,
dd               1317 drivers/infiniband/hw/hfi1/tid_rdma.c 			hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
dd               1364 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = rcd->dd;
dd               1374 drivers/infiniband/hw/hfi1/tid_rdma.c 			rcv_array_wc_fill(dd, rcventry);
dd               1378 drivers/infiniband/hw/hfi1/tid_rdma.c 		hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
dd               1393 drivers/infiniband/hw/hfi1/tid_rdma.c 		struct hfi1_devdata *dd = rcd->dd;
dd               1395 drivers/infiniband/hw/hfi1/tid_rdma.c 		dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
dd               1669 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = context;
dd               1671 drivers/infiniband/hw/hfi1/tid_rdma.c 	return dd->verbs_dev.n_tidwait;
dd               2654 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = ppd->dd;
dd               2791 drivers/infiniband/hw/hfi1/tid_rdma.c 				last_psn = read_r_next_psn(dd, rcd->ctxt,
dd               2842 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = ppd->dd;
dd               2843 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd               2868 drivers/infiniband/hw/hfi1/tid_rdma.c 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
dd               2952 drivers/infiniband/hw/hfi1/tid_rdma.c 					read_r_next_psn(dd, rcd->ctxt,
dd               5363 drivers/infiniband/hw/hfi1/tid_rdma.c 		cpumask_first(cpumask_of_node(ps.ppd->dd->node));
dd               5409 drivers/infiniband/hw/hfi1/tid_rdma.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd               5414 drivers/infiniband/hw/hfi1/tid_rdma.c 				   cpumask_first(cpumask_of_node(dd->node)));
dd               5473 drivers/infiniband/hw/hfi1/tid_rdma.c static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
dd               5481 drivers/infiniband/hw/hfi1/tid_rdma.c 	reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
dd               5511 drivers/infiniband/hw/hfi1/tid_rdma.c 		struct hfi1_devdata *dd = rcd->dd;
dd               5514 drivers/infiniband/hw/hfi1/tid_rdma.c 			read_r_next_psn(dd, rcd->ctxt, flow->idx);
dd                 62 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt,
dd                 64 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_ARGS(dd, uctxt, subctxt),
dd                 65 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                 77 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                107 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
dd                110 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_ARGS(dd, ctxt, subctxt, cinfo),
dd                111 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                120 drivers/infiniband/hw/hfi1/trace_ctxts.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                156 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 		    TP_PROTO(struct hfi1_devdata *dd,
dd                159 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 		    TP_ARGS(dd, packet, sc5),
dd                161 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			DD_DEV_ENTRY(dd)
dd                193 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			DD_DEV_ASSIGN(dd);
dd                312 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_PROTO(struct hfi1_devdata *dd,
dd                314 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_ARGS(dd, packet, sc5));
dd                317 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 		    TP_PROTO(struct hfi1_devdata *dd,
dd                319 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 		    TP_ARGS(dd, opah, sc5),
dd                321 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			DD_DEV_ENTRY(dd)
dd                354 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			DD_DEV_ASSIGN(dd);
dd                475 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_PROTO(struct hfi1_devdata *dd,
dd                477 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_ARGS(dd, opah, sc5));
dd                480 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_PROTO(struct hfi1_devdata *dd,
dd                482 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_ARGS(dd, opah, sc5));
dd                485 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_PROTO(struct hfi1_devdata *dd,
dd                487 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 	     TP_ARGS(dd, opah, sc5));
dd                 59 drivers/infiniband/hw/hfi1/trace_misc.h 	    TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
dd                 61 drivers/infiniband/hw/hfi1/trace_misc.h 	    TP_ARGS(dd, is_entry, src),
dd                 62 drivers/infiniband/hw/hfi1/trace_misc.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                 66 drivers/infiniband/hw/hfi1/trace_misc.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd)
dd                114 drivers/infiniband/hw/hfi1/trace_misc.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->ppd->dd)
dd                122 drivers/infiniband/hw/hfi1/trace_misc.h 	     TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->ppd->dd);
dd                 68 drivers/infiniband/hw/hfi1/trace_rx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
dd                 77 drivers/infiniband/hw/hfi1/trace_rx.h 	     TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
dd                100 drivers/infiniband/hw/hfi1/trace_rx.h 	    TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
dd                101 drivers/infiniband/hw/hfi1/trace_rx.h 	    TP_ARGS(dd, rcd),
dd                102 drivers/infiniband/hw/hfi1/trace_rx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                107 drivers/infiniband/hw/hfi1/trace_rx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                136 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_PROTO(struct hfi1_devdata *dd,
dd                138 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(dd, index, type, pa, order),
dd                140 drivers/infiniband/hw/hfi1/trace_tid.h 		DD_DEV_ENTRY(dd)
dd                147 drivers/infiniband/hw/hfi1/trace_tid.h 		DD_DEV_ASSIGN(dd);
dd                 67 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
dd                 72 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
dd                 88 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
dd                 94 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
dd                156 drivers/infiniband/hw/hfi1/trace_tx.h 		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                163 drivers/infiniband/hw/hfi1/trace_tx.h 		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                189 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
dd                190 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, sel, vl, idx),
dd                191 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                196 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                210 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
dd                211 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt),
dd                212 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                216 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                228 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                230 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
dd                231 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                236 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                251 drivers/infiniband/hw/hfi1/trace_tx.h 	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
dd                253 drivers/infiniband/hw/hfi1/trace_tx.h 	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
dd                254 drivers/infiniband/hw/hfi1/trace_tx.h 	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                260 drivers/infiniband/hw/hfi1/trace_tx.h 	TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                276 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                278 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
dd                281 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                283 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
dd                286 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                288 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
dd                291 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                293 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
dd                294 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                302 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                322 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
dd                324 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, dim),
dd                325 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                330 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                346 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                350 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                374 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                378 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                405 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                414 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                442 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                450 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                474 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                478 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                506 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
dd                508 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
dd                510 drivers/infiniband/hw/hfi1/trace_tx.h 		    DD_DEV_ENTRY(dd)
dd                538 drivers/infiniband/hw/hfi1/trace_tx.h 		    DD_DEV_ASSIGN(dd);
dd                588 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
dd                589 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, i),
dd                591 drivers/infiniband/hw/hfi1/trace_tx.h 		    DD_DEV_ENTRY(dd);
dd                601 drivers/infiniband/hw/hfi1/trace_tx.h 		    DD_DEV_ASSIGN(dd);
dd                631 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
dd                633 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
dd                635 drivers/infiniband/hw/hfi1/trace_tx.h 	    DD_DEV_ENTRY(dd)
dd                643 drivers/infiniband/hw/hfi1/trace_tx.h 	    DD_DEV_ASSIGN(dd);
dd                660 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
dd                662 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
dd                664 drivers/infiniband/hw/hfi1/trace_tx.h 	    DD_DEV_ENTRY(dd)
dd                675 drivers/infiniband/hw/hfi1/trace_tx.h 	    DD_DEV_ASSIGN(dd);
dd                704 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
dd                708 drivers/infiniband/hw/hfi1/trace_tx.h 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
dd                728 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_PROTO(struct hfi1_devdata *dd,
dd                730 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_ARGS(dd, bc),
dd                731 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
dd                734 drivers/infiniband/hw/hfi1/trace_tx.h 		    TP_fast_assign(DD_DEV_ASSIGN(dd);
dd                771 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
dd                772 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_ARGS(dd, bc));
dd                775 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
dd                776 drivers/infiniband/hw/hfi1/trace_tx.h 	     TP_ARGS(dd, bc));
dd                422 drivers/infiniband/hw/hfi1/ud.c 			dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
dd                682 drivers/infiniband/hw/hfi1/ud.c 	vl = sc_to_vlt(ppd->dd, sc5);
dd                687 drivers/infiniband/hw/hfi1/ud.c 			trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
dd                688 drivers/infiniband/hw/hfi1/ud.c 			ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
dd                737 drivers/infiniband/hw/hfi1/ud.c 	vl = sc_to_vlt(ppd->dd, sc5);
dd                742 drivers/infiniband/hw/hfi1/ud.c 			trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
dd                743 drivers/infiniband/hw/hfi1/ud.c 			ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
dd               1041 drivers/infiniband/hw/hfi1/ud.c 				struct hfi1_devdata *dd = ppd->dd;
dd               1043 drivers/infiniband/hw/hfi1/ud.c 				dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
dd                 90 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                115 drivers/infiniband/hw/hfi1/user_exp_rcv.c 					   dd->pport->hfi1_wq,
dd                118 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			dd_dev_info(dd,
dd                199 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = fd->uctxt->dd;
dd                202 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		pci_unmap_single(dd->pcidev, node->dma_addr,
dd                221 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = fd->uctxt->dd;
dd                229 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		dd_dev_err(dd, "Expected buffer too big\n");
dd                236 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
dd                250 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
dd                320 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                367 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	ngroups = pageset_count / dd->rcv_entries.group_size;
dd                390 drivers/infiniband/hw/hfi1/user_exp_rcv.c 				       pageidx, dd->rcv_entries.group_size,
dd                545 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	unsigned long *ev = uctxt->dd->events +
dd                687 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                702 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		rcv_array_wc_fill(dd, grp->base + idx);
dd                717 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			rcv_array_wc_fill(dd, grp->base + useidx);
dd                743 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		rcv_array_wc_fill(dd, grp->base + useidx);
dd                756 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                769 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	phys = pci_map_single(dd->pcidev,
dd                772 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	if (dma_mapping_error(&dd->pcidev->dev, phys)) {
dd                773 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
dd                797 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
dd                802 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
dd                812 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                818 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
dd                846 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	struct hfi1_devdata *dd = uctxt->dd;
dd                856 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
dd                941 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			ev = uctxt->dd->events +
dd                 71 drivers/infiniband/hw/hfi1/user_pages.c bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
dd                 77 drivers/infiniband/hw/hfi1/user_pages.c 			dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
dd                169 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_devdata *dd;
dd                179 drivers/infiniband/hw/hfi1/user_sdma.c 	dd = uctxt->dd;
dd                184 drivers/infiniband/hw/hfi1/user_sdma.c 	pq->dd = dd;
dd                209 drivers/infiniband/hw/hfi1/user_sdma.c 	snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
dd                217 drivers/infiniband/hw/hfi1/user_sdma.c 		dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
dd                233 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
dd                236 drivers/infiniband/hw/hfi1/user_sdma.c 		dd_dev_err(dd, "Failed to register with MMU %d", ret);
dd                281 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
dd                351 drivers/infiniband/hw/hfi1/user_sdma.c 	struct hfi1_devdata *dd = pq->dd;
dd                366 drivers/infiniband/hw/hfi1/user_sdma.c 		   dd->unit, uctxt->ctxt, fd->subctxt,
dd                373 drivers/infiniband/hw/hfi1/user_sdma.c 			  dd->unit, uctxt->ctxt, fd->subctxt, ret);
dd                377 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
dd                382 drivers/infiniband/hw/hfi1/user_sdma.c 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
dd                393 drivers/infiniband/hw/hfi1/user_sdma.c 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
dd                401 drivers/infiniband/hw/hfi1/user_sdma.c 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
dd                408 drivers/infiniband/hw/hfi1/user_sdma.c 			  dd->unit, uctxt->ctxt, fd->subctxt,
dd                415 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
dd                483 drivers/infiniband/hw/hfi1/user_sdma.c 	if (vl >= dd->pport->vls_operational ||
dd                484 drivers/infiniband/hw/hfi1/user_sdma.c 	    vl != sc_to_vlt(dd, sc)) {
dd                493 drivers/infiniband/hw/hfi1/user_sdma.c 	if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
dd                517 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
dd                535 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
dd                579 drivers/infiniband/hw/hfi1/user_sdma.c 	req->sde = sdma_select_user_engine(dd, selector, vl);
dd                679 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_compute_length(req->pq->dd,
dd                730 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
dd                732 drivers/infiniband/hw/hfi1/user_sdma.c 		sdma_txclean(pq->dd, &tx->txreq);
dd                756 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
dd                951 drivers/infiniband/hw/hfi1/user_sdma.c 	sdma_txclean(pq->dd, &tx->txreq);
dd                983 drivers/infiniband/hw/hfi1/user_sdma.c 	if (!hfi1_can_pin_pages(pq->dd, pq->mm,
dd               1256 drivers/infiniband/hw/hfi1/user_sdma.c 			pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
dd               1265 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
dd               1267 drivers/infiniband/hw/hfi1/user_sdma.c 	return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
dd               1375 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
dd               1449 drivers/infiniband/hw/hfi1/user_sdma.c 			sdma_txclean(req->pq->dd, t);
dd               1482 drivers/infiniband/hw/hfi1/user_sdma.c 	trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
dd                116 drivers/infiniband/hw/hfi1/user_sdma.h 	hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
dd                126 drivers/infiniband/hw/hfi1/user_sdma.h 	struct hfi1_devdata *dd;
dd                375 drivers/infiniband/hw/hfi1/verbs.c 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
dd                391 drivers/infiniband/hw/hfi1/verbs.c 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
dd                429 drivers/infiniband/hw/hfi1/verbs.c 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
dd                445 drivers/infiniband/hw/hfi1/verbs.c 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
dd                501 drivers/infiniband/hw/hfi1/verbs.c 	struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
dd                580 drivers/infiniband/hw/hfi1/verbs.c 	trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
dd                588 drivers/infiniband/hw/hfi1/verbs.c 	trace_input_ibhdr(rcd->dd, packet, false);
dd                711 drivers/infiniband/hw/hfi1/verbs.c 			sde->dd,
dd                742 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                743 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
dd                794 drivers/infiniband/hw/hfi1/verbs.c 			sde->dd,
dd                822 drivers/infiniband/hw/hfi1/verbs.c 		ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
dd                823 drivers/infiniband/hw/hfi1/verbs.c 				       sde->dd->sdma_pad_phys, extra_bytes);
dd                928 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = sc->dd;
dd                944 drivers/infiniband/hw/hfi1/verbs.c 			struct hfi1_ibdev *dev = &dd->verbs_dev;
dd               1075 drivers/infiniband/hw/hfi1/verbs.c 		pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
dd               1091 drivers/infiniband/hw/hfi1/verbs.c 			seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
dd               1160 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd;
dd               1196 drivers/infiniband/hw/hfi1/verbs.c 		dd = ppd->dd;
dd               1197 drivers/infiniband/hw/hfi1/verbs.c 		if (!(dd->err_info_xmit_constraint.status &
dd               1199 drivers/infiniband/hw/hfi1/verbs.c 			dd->err_info_xmit_constraint.status |=
dd               1201 drivers/infiniband/hw/hfi1/verbs.c 			dd->err_info_xmit_constraint.slid = slid;
dd               1202 drivers/infiniband/hw/hfi1/verbs.c 			dd->err_info_xmit_constraint.pkey = pkey;
dd               1217 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd               1221 drivers/infiniband/hw/hfi1/verbs.c 	if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
dd               1222 drivers/infiniband/hw/hfi1/verbs.c 		return dd->process_pio_send;
dd               1225 drivers/infiniband/hw/hfi1/verbs.c 		return dd->process_pio_send;
dd               1238 drivers/infiniband/hw/hfi1/verbs.c 			return dd->process_pio_send;
dd               1243 drivers/infiniband/hw/hfi1/verbs.c 	return dd->process_dma_send;
dd               1256 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd               1295 drivers/infiniband/hw/hfi1/verbs.c 	ret = egress_pkey_check(dd->pport, slid, pkey,
dd               1306 drivers/infiniband/hw/hfi1/verbs.c 		if (sr == dd->process_pio_send) {
dd               1317 drivers/infiniband/hw/hfi1/verbs.c 	if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
dd               1329 drivers/infiniband/hw/hfi1/verbs.c static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
dd               1331 drivers/infiniband/hw/hfi1/verbs.c 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd               1332 drivers/infiniband/hw/hfi1/verbs.c 	u32 ver = dd->dc8051_ver;
dd               1347 drivers/infiniband/hw/hfi1/verbs.c 	rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
dd               1348 drivers/infiniband/hw/hfi1/verbs.c 	rdi->dparms.props.vendor_part_id = dd->pcidev->device;
dd               1349 drivers/infiniband/hw/hfi1/verbs.c 	rdi->dparms.props.hw_ver = dd->minrev;
dd               1371 drivers/infiniband/hw/hfi1/verbs.c 	rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
dd               1414 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
dd               1415 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
dd               1450 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_ibdev(device);
dd               1463 drivers/infiniband/hw/hfi1/verbs.c 		for (i = 0; i < dd->num_pports; i++) {
dd               1464 drivers/infiniband/hw/hfi1/verbs.c 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
dd               1473 drivers/infiniband/hw/hfi1/verbs.c 		for (i = 0; i < dd->num_pports; i++) {
dd               1474 drivers/infiniband/hw/hfi1/verbs.c 			struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
dd               1489 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
dd               1490 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
dd               1525 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd;
dd               1536 drivers/infiniband/hw/hfi1/verbs.c 	dd = dd_from_ppd(ppd);
dd               1544 drivers/infiniband/hw/hfi1/verbs.c 	if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
dd               1555 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd;
dd               1569 drivers/infiniband/hw/hfi1/verbs.c 	dd = dd_from_ppd(ppd);
dd               1570 drivers/infiniband/hw/hfi1/verbs.c 	ah->vl = sc_to_vlt(dd, sc5);
dd               1572 drivers/infiniband/hw/hfi1/verbs.c 		ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
dd               1579 drivers/infiniband/hw/hfi1/verbs.c unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
dd               1581 drivers/infiniband/hw/hfi1/verbs.c 	return ARRAY_SIZE(dd->pport[0].pkeys);
dd               1701 drivers/infiniband/hw/hfi1/verbs.c 		struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
dd               1703 drivers/infiniband/hw/hfi1/verbs.c 		err = init_cntr_names(dd->cntrnames,
dd               1704 drivers/infiniband/hw/hfi1/verbs.c 				      dd->cntrnameslen,
dd               1717 drivers/infiniband/hw/hfi1/verbs.c 		err = init_cntr_names(dd->portcntrnames,
dd               1718 drivers/infiniband/hw/hfi1/verbs.c 				      dd->portcntrnameslen,
dd               1747 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_devdata *dd;
dd               1751 drivers/infiniband/hw/hfi1/verbs.c 	xa_for_each(&hfi1_dev_table, index, dd) {
dd               1752 drivers/infiniband/hw/hfi1/verbs.c 		sps_ints += get_all_cpu_total(dd->int_counter);
dd               1803 drivers/infiniband/hw/hfi1/verbs.c int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd               1805 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_ibdev *dev = &dd->verbs_dev;
dd               1807 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_pportdata *ppd = dd->pport;
dd               1812 drivers/infiniband/hw/hfi1/verbs.c 	for (i = 0; i < dd->num_pports; i++)
dd               1838 drivers/infiniband/hw/hfi1/verbs.c 	ibdev->phys_port_cnt = dd->num_pports;
dd               1839 drivers/infiniband/hw/hfi1/verbs.c 	ibdev->dev.parent = &dd->pcidev->dev;
dd               1849 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd               1850 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd               1851 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
dd               1852 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
dd               1853 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
dd               1854 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
dd               1855 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
dd               1859 drivers/infiniband/hw/hfi1/verbs.c 	hfi1_fill_device_attr(dd);
dd               1862 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
dd               1863 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qpn_start = 0;
dd               1864 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qpn_inc = 1;
dd               1865 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
dd               1866 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
dd               1867 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qpn_res_end =
dd               1868 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
dd               1869 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
dd               1870 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
dd               1871 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
dd               1872 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
dd               1873 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
dd               1875 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
dd               1877 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
dd               1878 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init;
dd               1879 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
dd               1880 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
dd               1881 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
dd               1882 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
dd               1883 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
dd               1884 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
dd               1885 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
dd               1886 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
dd               1887 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
dd               1888 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
dd               1889 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
dd               1890 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
dd               1891 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
dd               1892 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
dd               1893 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd               1894 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd               1895 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
dd               1896 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
dd               1897 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
dd               1901 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
dd               1902 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.node = dd->node;
dd               1905 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
dd               1906 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
dd               1907 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd               1908 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
dd               1909 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
dd               1910 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
dd               1911 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
dd               1912 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.reserved_operations = 1;
dd               1913 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT;
dd               1916 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
dd               1919 drivers/infiniband/hw/hfi1/verbs.c 	dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
dd               1921 drivers/infiniband/hw/hfi1/verbs.c 	ppd = dd->pport;
dd               1922 drivers/infiniband/hw/hfi1/verbs.c 	for (i = 0; i < dd->num_pports; i++, ppd++)
dd               1923 drivers/infiniband/hw/hfi1/verbs.c 		rvt_init_port(&dd->verbs_dev.rdi,
dd               1928 drivers/infiniband/hw/hfi1/verbs.c 	rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
dd               1931 drivers/infiniband/hw/hfi1/verbs.c 	ret = rvt_register_device(&dd->verbs_dev.rdi);
dd               1935 drivers/infiniband/hw/hfi1/verbs.c 	ret = hfi1_verbs_register_sysfs(dd);
dd               1942 drivers/infiniband/hw/hfi1/verbs.c 	rvt_unregister_device(&dd->verbs_dev.rdi);
dd               1945 drivers/infiniband/hw/hfi1/verbs.c 	dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
dd               1949 drivers/infiniband/hw/hfi1/verbs.c void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
dd               1951 drivers/infiniband/hw/hfi1/verbs.c 	struct hfi1_ibdev *dev = &dd->verbs_dev;
dd               1953 drivers/infiniband/hw/hfi1/verbs.c 	hfi1_verbs_unregister_sysfs(dd);
dd               1955 drivers/infiniband/hw/hfi1/verbs.c 	rvt_unregister_device(&dd->verbs_dev.rdi);
dd               1958 drivers/infiniband/hw/hfi1/verbs.c 		dd_dev_err(dd, "txwait list not empty!\n");
dd               1960 drivers/infiniband/hw/hfi1/verbs.c 		dd_dev_err(dd, "memwait list not empty!\n");
dd                125 drivers/infiniband/hw/hfi1/verbs_txreq.c 	struct hfi1_devdata *dd = dd_from_dev(dev);
dd                127 drivers/infiniband/hw/hfi1/verbs_txreq.c 	snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
dd                 84 drivers/infiniband/hw/hfi1/vnic.h 	struct hfi1_devdata *dd;
dd                124 drivers/infiniband/hw/hfi1/vnic.h 	struct hfi1_devdata *dd;
dd                148 drivers/infiniband/hw/hfi1/vnic.h void hfi1_vnic_setup(struct hfi1_devdata *dd);
dd                149 drivers/infiniband/hw/hfi1/vnic.h void hfi1_vnic_cleanup(struct hfi1_devdata *dd);
dd                150 drivers/infiniband/hw/hfi1/vnic.h int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
dd                151 drivers/infiniband/hw/hfi1/vnic.h void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
dd                165 drivers/infiniband/hw/hfi1/vnic.h int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
dd                 65 drivers/infiniband/hw/hfi1/vnic_main.c static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
dd                 73 drivers/infiniband/hw/hfi1/vnic_main.c 	ret = hfi1_create_rcvhdrq(dd, uctxt);
dd                 96 drivers/infiniband/hw/hfi1/vnic_main.c 	hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
dd                101 drivers/infiniband/hw/hfi1/vnic_main.c static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
dd                107 drivers/infiniband/hw/hfi1/vnic_main.c 	if (dd->flags & HFI1_FROZEN)
dd                110 drivers/infiniband/hw/hfi1/vnic_main.c 	ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
dd                112 drivers/infiniband/hw/hfi1/vnic_main.c 		dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
dd                126 drivers/infiniband/hw/hfi1/vnic_main.c 	dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
dd                132 drivers/infiniband/hw/hfi1/vnic_main.c static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd                135 drivers/infiniband/hw/hfi1/vnic_main.c 	dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
dd                142 drivers/infiniband/hw/hfi1/vnic_main.c 	hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
dd                151 drivers/infiniband/hw/hfi1/vnic_main.c 		msix_free_irq(dd, uctxt->msix_intr);
dd                156 drivers/infiniband/hw/hfi1/vnic_main.c 	hfi1_clear_ctxt_pkey(dd, uctxt);
dd                163 drivers/infiniband/hw/hfi1/vnic_main.c void hfi1_vnic_setup(struct hfi1_devdata *dd)
dd                165 drivers/infiniband/hw/hfi1/vnic_main.c 	xa_init(&dd->vnic.vesws);
dd                168 drivers/infiniband/hw/hfi1/vnic_main.c void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
dd                170 drivers/infiniband/hw/hfi1/vnic_main.c 	WARN_ON(!xa_empty(&dd->vnic.vesws));
dd                365 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = vinfo->dd;
dd                401 drivers/infiniband/hw/hfi1/vnic_main.c 	err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len);
dd                433 drivers/infiniband/hw/hfi1/vnic_main.c 	sde = sdma_select_engine_vl(vinfo->dd, mdata->entropy, mdata->vl);
dd                526 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = packet->rcd->dd;
dd                536 drivers/infiniband/hw/hfi1/vnic_main.c 		vinfo = xa_load(&dd->vnic.vesws, vesw_id);
dd                546 drivers/infiniband/hw/hfi1/vnic_main.c 			vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
dd                557 drivers/infiniband/hw/hfi1/vnic_main.c 		dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n",
dd                593 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = vinfo->dd;
dd                601 drivers/infiniband/hw/hfi1/vnic_main.c 	rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
dd                621 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = vinfo->dd;
dd                627 drivers/infiniband/hw/hfi1/vnic_main.c 	xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
dd                630 drivers/infiniband/hw/hfi1/vnic_main.c 	msix_vnic_synchronize_irq(dd);
dd                663 drivers/infiniband/hw/hfi1/vnic_main.c static int hfi1_vnic_allot_ctxt(struct hfi1_devdata *dd,
dd                668 drivers/infiniband/hw/hfi1/vnic_main.c 	rc = allocate_vnic_ctxt(dd, vnic_ctxt);
dd                670 drivers/infiniband/hw/hfi1/vnic_main.c 		dd_dev_err(dd, "vnic ctxt alloc failed %d\n", rc);
dd                674 drivers/infiniband/hw/hfi1/vnic_main.c 	rc = setup_vnic_ctxt(dd, *vnic_ctxt);
dd                676 drivers/infiniband/hw/hfi1/vnic_main.c 		dd_dev_err(dd, "vnic ctxt setup failed %d\n", rc);
dd                677 drivers/infiniband/hw/hfi1/vnic_main.c 		deallocate_vnic_ctxt(dd, *vnic_ctxt);
dd                686 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = vinfo->dd;
dd                690 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!dd->vnic.num_vports) {
dd                691 drivers/infiniband/hw/hfi1/vnic_main.c 		rc = hfi1_vnic_txreq_init(dd);
dd                696 drivers/infiniband/hw/hfi1/vnic_main.c 	for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
dd                697 drivers/infiniband/hw/hfi1/vnic_main.c 		rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]);
dd                700 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_rcd_get(dd->vnic.ctxt[i]);
dd                701 drivers/infiniband/hw/hfi1/vnic_main.c 		dd->vnic.ctxt[i]->vnic_q_idx = i;
dd                710 drivers/infiniband/hw/hfi1/vnic_main.c 		while (i-- > dd->vnic.num_ctxt) {
dd                711 drivers/infiniband/hw/hfi1/vnic_main.c 			deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
dd                712 drivers/infiniband/hw/hfi1/vnic_main.c 			hfi1_rcd_put(dd->vnic.ctxt[i]);
dd                713 drivers/infiniband/hw/hfi1/vnic_main.c 			dd->vnic.ctxt[i] = NULL;
dd                718 drivers/infiniband/hw/hfi1/vnic_main.c 	if (dd->vnic.num_ctxt != i) {
dd                719 drivers/infiniband/hw/hfi1/vnic_main.c 		dd->vnic.num_ctxt = i;
dd                720 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_init_vnic_rsm(dd);
dd                723 drivers/infiniband/hw/hfi1/vnic_main.c 	dd->vnic.num_vports++;
dd                726 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!dd->vnic.num_vports)
dd                727 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_vnic_txreq_deinit(dd);
dd                735 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = vinfo->dd;
dd                739 drivers/infiniband/hw/hfi1/vnic_main.c 	if (--dd->vnic.num_vports == 0) {
dd                740 drivers/infiniband/hw/hfi1/vnic_main.c 		for (i = 0; i < dd->vnic.num_ctxt; i++) {
dd                741 drivers/infiniband/hw/hfi1/vnic_main.c 			deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
dd                742 drivers/infiniband/hw/hfi1/vnic_main.c 			hfi1_rcd_put(dd->vnic.ctxt[i]);
dd                743 drivers/infiniband/hw/hfi1/vnic_main.c 			dd->vnic.ctxt[i] = NULL;
dd                745 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_deinit_vnic_rsm(dd);
dd                746 drivers/infiniband/hw/hfi1/vnic_main.c 		dd->vnic.num_ctxt = 0;
dd                747 drivers/infiniband/hw/hfi1/vnic_main.c 		hfi1_vnic_txreq_deinit(dd);
dd                801 drivers/infiniband/hw/hfi1/vnic_main.c 	struct hfi1_devdata *dd = dd_from_ibdev(device);
dd                807 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!dd->num_vnic_contexts)
dd                810 drivers/infiniband/hw/hfi1/vnic_main.c 	if (!port_num || (port_num > dd->num_pports))
dd                818 drivers/infiniband/hw/hfi1/vnic_main.c 				  dd->num_sdma, dd->num_vnic_contexts);
dd                824 drivers/infiniband/hw/hfi1/vnic_main.c 	vinfo->dd = dd;
dd                825 drivers/infiniband/hw/hfi1/vnic_main.c 	vinfo->num_tx_q = dd->num_sdma;
dd                826 drivers/infiniband/hw/hfi1/vnic_main.c 	vinfo->num_rx_q = dd->num_vnic_contexts;
dd                 86 drivers/infiniband/hw/hfi1/vnic_sdma.c 	sdma_txclean(vnic_sdma->dd, txreq);
dd                 88 drivers/infiniband/hw/hfi1/vnic_sdma.c 	kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
dd                 97 drivers/infiniband/hw/hfi1/vnic_sdma.c 		sde->dd,
dd                108 drivers/infiniband/hw/hfi1/vnic_sdma.c 		ret = sdma_txadd_page(sde->dd,
dd                118 drivers/infiniband/hw/hfi1/vnic_sdma.c 		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
dd                148 drivers/infiniband/hw/hfi1/vnic_sdma.c 		sde->dd,
dd                167 drivers/infiniband/hw/hfi1/vnic_sdma.c int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
dd                182 drivers/infiniband/hw/hfi1/vnic_sdma.c 	tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
dd                209 drivers/infiniband/hw/hfi1/vnic_sdma.c 	sdma_txclean(dd, &tx->txreq);
dd                210 drivers/infiniband/hw/hfi1/vnic_sdma.c 	kmem_cache_free(dd->vnic.txreq_cache, tx);
dd                287 drivers/infiniband/hw/hfi1/vnic_sdma.c 		vnic_sdma->sde = &vinfo->dd->per_sdma[i];
dd                288 drivers/infiniband/hw/hfi1/vnic_sdma.c 		vnic_sdma->dd = vinfo->dd;
dd                305 drivers/infiniband/hw/hfi1/vnic_sdma.c int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
dd                309 drivers/infiniband/hw/hfi1/vnic_sdma.c 	snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
dd                310 drivers/infiniband/hw/hfi1/vnic_sdma.c 	dd->vnic.txreq_cache = kmem_cache_create(buf,
dd                314 drivers/infiniband/hw/hfi1/vnic_sdma.c 	if (!dd->vnic.txreq_cache)
dd                319 drivers/infiniband/hw/hfi1/vnic_sdma.c void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
dd                321 drivers/infiniband/hw/hfi1/vnic_sdma.c 	kmem_cache_destroy(dd->vnic.txreq_cache);
dd                322 drivers/infiniband/hw/hfi1/vnic_sdma.c 	dd->vnic.txreq_cache = NULL;
dd                203 drivers/infiniband/hw/qib/qib.h 	struct qib_devdata *dd;
dd                522 drivers/infiniband/hw/qib/qib.h 	struct qib_devdata *dd;
dd                691 drivers/infiniband/hw/qib/qib.h typedef int (*diag_hook) (struct qib_devdata *dd,
dd                701 drivers/infiniband/hw/qib/qib.h extern int qib_register_observer(struct qib_devdata *dd,
dd                794 drivers/infiniband/hw/qib/qib.h 	int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
dd                797 drivers/infiniband/hw/qib/qib.h 	int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
dd               1116 drivers/infiniband/hw/qib/qib.h int init_chip_wc_pat(struct qib_devdata *dd, u32);
dd               1117 drivers/infiniband/hw/qib/qib.h int qib_enable_wc(struct qib_devdata *dd);
dd               1118 drivers/infiniband/hw/qib/qib.h void qib_disable_wc(struct qib_devdata *dd);
dd               1134 drivers/infiniband/hw/qib/qib.h int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
dd               1155 drivers/infiniband/hw/qib/qib.h int qib_create_ctxts(struct qib_devdata *dd);
dd               1185 drivers/infiniband/hw/qib/qib.h 	return ppd->dd;
dd               1205 drivers/infiniband/hw/qib/qib.h 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd               1208 drivers/infiniband/hw/qib/qib.h 	WARN_ON(pidx >= dd->num_pports);
dd               1209 drivers/infiniband/hw/qib/qib.h 	return &dd->pport[pidx].ibport_data;
dd               1261 drivers/infiniband/hw/qib/qib.h void qib_free_data(struct qib_ctxtdata *dd);
dd               1275 drivers/infiniband/hw/qib/qib.h int qib_twsi_reset(struct qib_devdata *dd);
dd               1276 drivers/infiniband/hw/qib/qib.h int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
dd               1278 drivers/infiniband/hw/qib/qib.h int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
dd               1299 drivers/infiniband/hw/qib/qib.h void qib_user_sdma_send_desc(struct qib_pportdata *dd,
dd               1304 drivers/infiniband/hw/qib/qib.h int qib_sdma_make_progress(struct qib_pportdata *dd);
dd               1369 drivers/infiniband/hw/qib/qib.h 	const struct qib_devdata *dd = rcd->dd;
dd               1372 drivers/infiniband/hw/qib/qib.h 	if (dd->flags & QIB_NODMA_RTAIL) {
dd               1377 drivers/infiniband/hw/qib/qib.h 			rcd->head + dd->rhf_offset;
dd               1414 drivers/infiniband/hw/qib/qib.h int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
dd               1415 drivers/infiniband/hw/qib/qib.h void qib_free_irq(struct qib_devdata *dd);
dd               1416 drivers/infiniband/hw/qib/qib.h int qib_reinit_intr(struct qib_devdata *dd);
dd               1484 drivers/infiniband/hw/qib/qib.h #define qib_dev_err(dd, fmt, ...) \
dd               1485 drivers/infiniband/hw/qib/qib.h 	dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
dd               1486 drivers/infiniband/hw/qib/qib.h 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               1488 drivers/infiniband/hw/qib/qib.h #define qib_dev_warn(dd, fmt, ...) \
dd               1489 drivers/infiniband/hw/qib/qib.h 	dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
dd               1490 drivers/infiniband/hw/qib/qib.h 		 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
dd               1492 drivers/infiniband/hw/qib/qib.h #define qib_dev_porterr(dd, port, fmt, ...) \
dd               1493 drivers/infiniband/hw/qib/qib.h 	dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
dd               1494 drivers/infiniband/hw/qib/qib.h 		rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
dd                 77 drivers/infiniband/hw/qib/qib_7220.h 	struct qib_devdata *dd;
dd                110 drivers/infiniband/hw/qib/qib_7220.h int qib_sd7220_presets(struct qib_devdata *dd);
dd                111 drivers/infiniband/hw/qib/qib_7220.h int qib_sd7220_init(struct qib_devdata *dd);
dd                120 drivers/infiniband/hw/qib/qib_7220.h static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
dd                123 drivers/infiniband/hw/qib/qib_7220.h 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                125 drivers/infiniband/hw/qib/qib_7220.h 	return readl((u32 __iomem *)&dd->kregbase[regno]);
dd                128 drivers/infiniband/hw/qib/qib_7220.h static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
dd                131 drivers/infiniband/hw/qib/qib_7220.h 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                134 drivers/infiniband/hw/qib/qib_7220.h 	return readq(&dd->kregbase[regno]);
dd                137 drivers/infiniband/hw/qib/qib_7220.h static inline void qib_write_kreg(const struct qib_devdata *dd,
dd                140 drivers/infiniband/hw/qib/qib_7220.h 	if (dd->kregbase)
dd                141 drivers/infiniband/hw/qib/qib_7220.h 		writeq(value, &dd->kregbase[regno]);
dd                100 drivers/infiniband/hw/qib/qib_debugfs.c 	struct qib_devdata *dd = dd_from_dev(ibd);
dd                102 drivers/infiniband/hw/qib/qib_debugfs.c 	for (j = 0; j < dd->first_user_ctxt; j++) {
dd                103 drivers/infiniband/hw/qib/qib_debugfs.c 		if (!dd->rcd[j])
dd                105 drivers/infiniband/hw/qib/qib_debugfs.c 		n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
dd                106 drivers/infiniband/hw/qib/qib_debugfs.c 		n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
dd                122 drivers/infiniband/hw/qib/qib_debugfs.c 	struct qib_devdata *dd = dd_from_dev(ibd);
dd                126 drivers/infiniband/hw/qib/qib_debugfs.c 	if (*pos >= dd->first_user_ctxt)
dd                134 drivers/infiniband/hw/qib/qib_debugfs.c 	struct qib_devdata *dd = dd_from_dev(ibd);
dd                140 drivers/infiniband/hw/qib/qib_debugfs.c 	if (*pos >= dd->first_user_ctxt)
dd                156 drivers/infiniband/hw/qib/qib_debugfs.c 	struct qib_devdata *dd = dd_from_dev(ibd);
dd                166 drivers/infiniband/hw/qib/qib_debugfs.c 	if (!dd->rcd[i])
dd                169 drivers/infiniband/hw/qib/qib_debugfs.c 	for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
dd                170 drivers/infiniband/hw/qib/qib_debugfs.c 		n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
dd                 69 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd;
dd                 78 drivers/infiniband/hw/qib/qib_diag.c static struct qib_diag_client *get_client(struct qib_devdata *dd)
dd                 92 drivers/infiniband/hw/qib/qib_diag.c 		dc->dd = dd;
dd                104 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd = dc->dd;
dd                108 drivers/infiniband/hw/qib/qib_diag.c 	if (dc == dd->diag_client) {
dd                109 drivers/infiniband/hw/qib/qib_diag.c 		dd->diag_client = dc->next;
dd                112 drivers/infiniband/hw/qib/qib_diag.c 		tdc = dc->dd->diag_client;
dd                124 drivers/infiniband/hw/qib/qib_diag.c 		rdc->dd = NULL;
dd                160 drivers/infiniband/hw/qib/qib_diag.c int qib_diag_add(struct qib_devdata *dd)
dd                173 drivers/infiniband/hw/qib/qib_diag.c 	snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
dd                174 drivers/infiniband/hw/qib/qib_diag.c 	ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
dd                175 drivers/infiniband/hw/qib/qib_diag.c 			    &diag_file_ops, &dd->diag_cdev,
dd                176 drivers/infiniband/hw/qib/qib_diag.c 			    &dd->diag_device);
dd                181 drivers/infiniband/hw/qib/qib_diag.c static void qib_unregister_observers(struct qib_devdata *dd);
dd                183 drivers/infiniband/hw/qib/qib_diag.c void qib_diag_remove(struct qib_devdata *dd)
dd                190 drivers/infiniband/hw/qib/qib_diag.c 	qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
dd                196 drivers/infiniband/hw/qib/qib_diag.c 	while (dd->diag_client)
dd                197 drivers/infiniband/hw/qib/qib_diag.c 		return_client(dd->diag_client);
dd                206 drivers/infiniband/hw/qib/qib_diag.c 	qib_unregister_observers(dd);
dd                234 drivers/infiniband/hw/qib/qib_diag.c static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
dd                239 drivers/infiniband/hw/qib/qib_diag.c 	u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
dd                245 drivers/infiniband/hw/qib/qib_diag.c 	kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
dd                257 drivers/infiniband/hw/qib/qib_diag.c 	if (dd->userbase) {
dd                259 drivers/infiniband/hw/qib/qib_diag.c 		u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
dd                261 drivers/infiniband/hw/qib/qib_diag.c 		if (!dd->piovl15base)
dd                262 drivers/infiniband/hw/qib/qib_diag.c 			snd_lim = dd->uregbase;
dd                263 drivers/infiniband/hw/qib/qib_diag.c 		krb32 = (u32 __iomem *)dd->userbase;
dd                264 drivers/infiniband/hw/qib/qib_diag.c 		if (offset >= dd->uregbase && offset < ulim) {
dd                265 drivers/infiniband/hw/qib/qib_diag.c 			map = krb32 + (offset - dd->uregbase) / sizeof(u32);
dd                281 drivers/infiniband/hw/qib/qib_diag.c 	snd_bottom = dd->pio2k_bufbase;
dd                283 drivers/infiniband/hw/qib/qib_diag.c 		u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
dd                290 drivers/infiniband/hw/qib/qib_diag.c 	tot4k = dd->piobcnt4k * dd->align4k;
dd                291 drivers/infiniband/hw/qib/qib_diag.c 	offs4k = dd->piobufbase >> 32;
dd                292 drivers/infiniband/hw/qib/qib_diag.c 	if (dd->piobcnt4k) {
dd                297 drivers/infiniband/hw/qib/qib_diag.c 			if (!dd->userbase || dd->piovl15base)
dd                307 drivers/infiniband/hw/qib/qib_diag.c 		map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
dd                311 drivers/infiniband/hw/qib/qib_diag.c 	if (!map && offs4k && dd->piovl15base) {
dd                312 drivers/infiniband/hw/qib/qib_diag.c 		snd_lim = offs4k + tot4k + 2 * dd->align4k;
dd                314 drivers/infiniband/hw/qib/qib_diag.c 			map = (u32 __iomem *)dd->piovl15base +
dd                339 drivers/infiniband/hw/qib/qib_diag.c static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
dd                347 drivers/infiniband/hw/qib/qib_diag.c 	reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
dd                348 drivers/infiniband/hw/qib/qib_diag.c 	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
dd                383 drivers/infiniband/hw/qib/qib_diag.c static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
dd                391 drivers/infiniband/hw/qib/qib_diag.c 	reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
dd                392 drivers/infiniband/hw/qib/qib_diag.c 	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
dd                428 drivers/infiniband/hw/qib/qib_diag.c static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
dd                436 drivers/infiniband/hw/qib/qib_diag.c 	reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
dd                437 drivers/infiniband/hw/qib/qib_diag.c 	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
dd                474 drivers/infiniband/hw/qib/qib_diag.c static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
dd                482 drivers/infiniband/hw/qib/qib_diag.c 	reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
dd                483 drivers/infiniband/hw/qib/qib_diag.c 	if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
dd                511 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd;
dd                517 drivers/infiniband/hw/qib/qib_diag.c 	dd = qib_lookup(unit);
dd                519 drivers/infiniband/hw/qib/qib_diag.c 	if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
dd                520 drivers/infiniband/hw/qib/qib_diag.c 	    !dd->kregbase) {
dd                525 drivers/infiniband/hw/qib/qib_diag.c 	dc = get_client(dd);
dd                530 drivers/infiniband/hw/qib/qib_diag.c 	dc->next = dd->diag_client;
dd                531 drivers/infiniband/hw/qib/qib_diag.c 	dd->diag_client = dc;
dd                555 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd;
dd                568 drivers/infiniband/hw/qib/qib_diag.c 	dd = qib_lookup(dp.unit);
dd                569 drivers/infiniband/hw/qib/qib_diag.c 	if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
dd                573 drivers/infiniband/hw/qib/qib_diag.c 	if (!(dd->flags & QIB_INITTED)) {
dd                580 drivers/infiniband/hw/qib/qib_diag.c 		qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
dd                590 drivers/infiniband/hw/qib/qib_diag.c 	if (!dp.port || dp.port > dd->num_pports) {
dd                594 drivers/infiniband/hw/qib/qib_diag.c 	ppd = &dd->pport[dp.port - 1];
dd                628 drivers/infiniband/hw/qib/qib_diag.c 	piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
dd                634 drivers/infiniband/hw/qib/qib_diag.c 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
dd                637 drivers/infiniband/hw/qib/qib_diag.c 	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
dd                645 drivers/infiniband/hw/qib/qib_diag.c 	if (dd->flags & QIB_PIO_FLUSH_WC) {
dd                653 drivers/infiniband/hw/qib/qib_diag.c 	if (dd->flags & QIB_USE_SPCL_TRIG) {
dd                654 drivers/infiniband/hw/qib/qib_diag.c 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
dd                666 drivers/infiniband/hw/qib/qib_diag.c 	qib_sendbuf_done(dd, pbufn);
dd                667 drivers/infiniband/hw/qib/qib_diag.c 	dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
dd                694 drivers/infiniband/hw/qib/qib_diag.c int qib_register_observer(struct qib_devdata *dd,
dd                700 drivers/infiniband/hw/qib/qib_diag.c 	if (!dd || !op)
dd                706 drivers/infiniband/hw/qib/qib_diag.c 	spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
dd                708 drivers/infiniband/hw/qib/qib_diag.c 	olp->next = dd->diag_observer_list;
dd                709 drivers/infiniband/hw/qib/qib_diag.c 	dd->diag_observer_list = olp;
dd                710 drivers/infiniband/hw/qib/qib_diag.c 	spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
dd                716 drivers/infiniband/hw/qib/qib_diag.c static void qib_unregister_observers(struct qib_devdata *dd)
dd                721 drivers/infiniband/hw/qib/qib_diag.c 	spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
dd                722 drivers/infiniband/hw/qib/qib_diag.c 	olp = dd->diag_observer_list;
dd                725 drivers/infiniband/hw/qib/qib_diag.c 		dd->diag_observer_list = olp->next;
dd                726 drivers/infiniband/hw/qib/qib_diag.c 		spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
dd                729 drivers/infiniband/hw/qib/qib_diag.c 		spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
dd                730 drivers/infiniband/hw/qib/qib_diag.c 		olp = dd->diag_observer_list;
dd                732 drivers/infiniband/hw/qib/qib_diag.c 	spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
dd                740 drivers/infiniband/hw/qib/qib_diag.c static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
dd                746 drivers/infiniband/hw/qib/qib_diag.c 	olp = dd->diag_observer_list;
dd                763 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd = dc->dd;
dd                786 drivers/infiniband/hw/qib/qib_diag.c 		spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
dd                792 drivers/infiniband/hw/qib/qib_diag.c 		op = diag_get_observer(dd, *off);
dd                796 drivers/infiniband/hw/qib/qib_diag.c 			ret = op->hook(dd, op, offset, &data64, 0, use_32);
dd                802 drivers/infiniband/hw/qib/qib_diag.c 		spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
dd                809 drivers/infiniband/hw/qib/qib_diag.c 				ret = qib_read_umem32(dd, data, (u32) *off,
dd                812 drivers/infiniband/hw/qib/qib_diag.c 				ret = qib_read_umem64(dd, data, (u32) *off,
dd                837 drivers/infiniband/hw/qib/qib_diag.c 	struct qib_devdata *dd = dc->dd;
dd                876 drivers/infiniband/hw/qib/qib_diag.c 			spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
dd                877 drivers/infiniband/hw/qib/qib_diag.c 			op = diag_get_observer(dd, *off);
dd                879 drivers/infiniband/hw/qib/qib_diag.c 				ret = op->hook(dd, op, offset, &data64, ~0Ull,
dd                881 drivers/infiniband/hw/qib/qib_diag.c 			spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
dd                890 drivers/infiniband/hw/qib/qib_diag.c 				ret = qib_write_umem32(dd, (u32) *off, data,
dd                893 drivers/infiniband/hw/qib/qib_diag.c 				ret = qib_write_umem64(dd, (u32) *off, data,
dd                 85 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = container_of(ibdev,
dd                 87 drivers/infiniband/hw/qib/qib_driver.c 	return dd->pcidev;
dd                 95 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd;
dd                101 drivers/infiniband/hw/qib/qib_driver.c 	xa_for_each(&qib_dev_table, index, dd) {
dd                102 drivers/infiniband/hw/qib/qib_driver.c 		if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
dd                104 drivers/infiniband/hw/qib/qib_driver.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                105 drivers/infiniband/hw/qib/qib_driver.c 			ppd = dd->pport + pidx;
dd                125 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd;
dd                131 drivers/infiniband/hw/qib/qib_driver.c 	xa_for_each(&qib_dev_table, index, dd) {
dd                133 drivers/infiniband/hw/qib/qib_driver.c 		if ((dd->flags & QIB_PRESENT) && dd->kregbase)
dd                135 drivers/infiniband/hw/qib/qib_driver.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                136 drivers/infiniband/hw/qib/qib_driver.c 			ppd = dd->pport + pidx;
dd                196 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = ppd->dd;
dd                201 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                208 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                215 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                222 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                246 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                260 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
dd                284 drivers/infiniband/hw/qib/qib_driver.c 	return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
dd                304 drivers/infiniband/hw/qib/qib_driver.c 		struct qib_devdata *dd = ppd->dd;
dd                305 drivers/infiniband/hw/qib/qib_driver.c 		struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd                442 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = rcd->dd;
dd                446 drivers/infiniband/hw/qib/qib_driver.c 	const u32 rsize = dd->rcvhdrentsize;        /* words */
dd                447 drivers/infiniband/hw/qib/qib_driver.c 	const u32 maxcnt = dd->rcvhdrcnt * rsize;   /* words */
dd                456 drivers/infiniband/hw/qib/qib_driver.c 	rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
dd                457 drivers/infiniband/hw/qib/qib_driver.c 	if (dd->flags & QIB_NODMA_RTAIL) {
dd                471 drivers/infiniband/hw/qib/qib_driver.c 		hdr = dd->f_get_msgheader(dd, rhf_addr);
dd                477 drivers/infiniband/hw/qib/qib_driver.c 		if ((dd->flags & QIB_NODMA_RTAIL) ?
dd                498 drivers/infiniband/hw/qib/qib_driver.c 		    tlen > (dd->rcvhdrentsize - 2 + 1 -
dd                524 drivers/infiniband/hw/qib/qib_driver.c 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
dd                525 drivers/infiniband/hw/qib/qib_driver.c 		if (dd->flags & QIB_NODMA_RTAIL) {
dd                542 drivers/infiniband/hw/qib/qib_driver.c 			dd->f_update_usrhead(rcd, lval, updegr, etail, i);
dd                581 drivers/infiniband/hw/qib/qib_driver.c 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
dd                582 drivers/infiniband/hw/qib/qib_driver.c 	dd->f_update_usrhead(rcd, lval, updegr, etail, i);
dd                629 drivers/infiniband/hw/qib/qib_driver.c 	ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
dd                639 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = ppd->dd;
dd                644 drivers/infiniband/hw/qib/qib_driver.c 	dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
dd                647 drivers/infiniband/hw/qib/qib_driver.c 	qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
dd                648 drivers/infiniband/hw/qib/qib_driver.c 		    dd->unit, ppd->port, lid);
dd                669 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = ppd->dd;
dd                673 drivers/infiniband/hw/qib/qib_driver.c 	if (!(dd->flags & QIB_INITTED))
dd                680 drivers/infiniband/hw/qib/qib_driver.c 	dd->f_setextled(ppd, 1);
dd                691 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = ppd->dd;
dd                694 drivers/infiniband/hw/qib/qib_driver.c 	if (!(dd->flags & QIB_INITTED))
dd                741 drivers/infiniband/hw/qib/qib_driver.c 	struct qib_devdata *dd = qib_lookup(unit);
dd                746 drivers/infiniband/hw/qib/qib_driver.c 	if (!dd) {
dd                751 drivers/infiniband/hw/qib/qib_driver.c 	qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
dd                753 drivers/infiniband/hw/qib/qib_driver.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
dd                754 drivers/infiniband/hw/qib/qib_driver.c 		qib_devinfo(dd->pcidev,
dd                761 drivers/infiniband/hw/qib/qib_driver.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                762 drivers/infiniband/hw/qib/qib_driver.c 	if (dd->rcd)
dd                763 drivers/infiniband/hw/qib/qib_driver.c 		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
dd                764 drivers/infiniband/hw/qib/qib_driver.c 			if (!dd->rcd[i] || !dd->rcd[i]->cnt)
dd                766 drivers/infiniband/hw/qib/qib_driver.c 			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                770 drivers/infiniband/hw/qib/qib_driver.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                772 drivers/infiniband/hw/qib/qib_driver.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                773 drivers/infiniband/hw/qib/qib_driver.c 		ppd = dd->pport + pidx;
dd                782 drivers/infiniband/hw/qib/qib_driver.c 		dd->f_setextled(ppd, 0);
dd                783 drivers/infiniband/hw/qib/qib_driver.c 		if (dd->flags & QIB_HAS_SEND_DMA)
dd                787 drivers/infiniband/hw/qib/qib_driver.c 	ret = dd->f_reset(dd);
dd                789 drivers/infiniband/hw/qib/qib_driver.c 		ret = qib_init(dd, 1);
dd                793 drivers/infiniband/hw/qib/qib_driver.c 		qib_dev_err(dd,
dd                797 drivers/infiniband/hw/qib/qib_driver.c 		qib_devinfo(dd->pcidev,
dd                 53 drivers/infiniband/hw/qib/qib_eeprom.c int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
dd                 58 drivers/infiniband/hw/qib/qib_eeprom.c 	ret = mutex_lock_interruptible(&dd->eep_lock);
dd                 60 drivers/infiniband/hw/qib/qib_eeprom.c 		ret = qib_twsi_reset(dd);
dd                 62 drivers/infiniband/hw/qib/qib_eeprom.c 			qib_dev_err(dd, "EEPROM Reset for read failed\n");
dd                 64 drivers/infiniband/hw/qib/qib_eeprom.c 			ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
dd                 66 drivers/infiniband/hw/qib/qib_eeprom.c 		mutex_unlock(&dd->eep_lock);
dd                 77 drivers/infiniband/hw/qib/qib_eeprom.c static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
dd                 82 drivers/infiniband/hw/qib/qib_eeprom.c 	pwen = dd->f_eeprom_wen(dd, 1);
dd                 83 drivers/infiniband/hw/qib/qib_eeprom.c 	ret = qib_twsi_reset(dd);
dd                 85 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_dev_err(dd, "EEPROM Reset for write failed\n");
dd                 87 drivers/infiniband/hw/qib/qib_eeprom.c 		ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
dd                 89 drivers/infiniband/hw/qib/qib_eeprom.c 	dd->f_eeprom_wen(dd, pwen);
dd                100 drivers/infiniband/hw/qib/qib_eeprom.c int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
dd                105 drivers/infiniband/hw/qib/qib_eeprom.c 	ret = mutex_lock_interruptible(&dd->eep_lock);
dd                107 drivers/infiniband/hw/qib/qib_eeprom.c 		ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
dd                108 drivers/infiniband/hw/qib/qib_eeprom.c 		mutex_unlock(&dd->eep_lock);
dd                144 drivers/infiniband/hw/qib/qib_eeprom.c void qib_get_eeprom_info(struct qib_devdata *dd)
dd                151 drivers/infiniband/hw/qib/qib_eeprom.c 	int t = dd->unit;
dd                157 drivers/infiniband/hw/qib/qib_eeprom.c 		dd->base_guid = dd0->base_guid;
dd                158 drivers/infiniband/hw/qib/qib_eeprom.c 		bguid = (u8 *) &dd->base_guid;
dd                165 drivers/infiniband/hw/qib/qib_eeprom.c 					qib_dev_err(dd,
dd                167 drivers/infiniband/hw/qib/qib_eeprom.c 					dd->base_guid = 0;
dd                174 drivers/infiniband/hw/qib/qib_eeprom.c 		dd->nguid = 1;
dd                191 drivers/infiniband/hw/qib/qib_eeprom.c 	eep_stat = qib_eeprom_read(dd, 0, buf, len);
dd                194 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_dev_err(dd, "Failed reading GUID from eeprom\n");
dd                201 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_devinfo(dd->pcidev,
dd                208 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_dev_err(dd,
dd                217 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_devinfo(dd->pcidev,
dd                235 drivers/infiniband/hw/qib/qib_eeprom.c 	dd->base_guid = guid;
dd                236 drivers/infiniband/hw/qib/qib_eeprom.c 	dd->nguid = ifp->if_numguid;
dd                244 drivers/infiniband/hw/qib/qib_eeprom.c 		char *snp = dd->serial;
dd                254 drivers/infiniband/hw/qib/qib_eeprom.c 		len = sizeof(dd->serial) - len;
dd                259 drivers/infiniband/hw/qib/qib_eeprom.c 		memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
dd                262 drivers/infiniband/hw/qib/qib_eeprom.c 		qib_dev_err(dd,
dd                264 drivers/infiniband/hw/qib/qib_eeprom.c 			dd->serial, ifp->if_comment);
dd                104 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                135 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = dd->f_get_base_info(rcd, kinfo);
dd                139 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
dd                140 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
dd                142 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
dd                151 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
dd                153 drivers/infiniband/hw/qib/qib_file_ops.c 		kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
dd                158 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_nctxts = dd->cfgctxts;
dd                160 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_unit = dd->unit;
dd                186 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_rhf_offset = dd->rhf_offset;
dd                188 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
dd                192 drivers/infiniband/hw/qib/qib_file_ops.c 		(char *) dd->pioavailregs_dma;
dd                193 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
dd                203 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->palign *
dd                210 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->palign * kinfo->spi_piocnt * slave;
dd                231 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
dd                232 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->palign;
dd                233 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_pioalign = dd->palign;
dd                240 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
dd                246 drivers/infiniband/hw/qib/qib_file_ops.c 	kinfo->spi_hw_version = dd->revision;
dd                290 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                298 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!dd->pageshadow) {
dd                308 drivers/infiniband/hw/qib/qib_file_ops.c 	ctxttid = rcd->ctxt * dd->rcvtidcnt;
dd                310 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = dd->rcvtidcnt;
dd                314 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
dd                315 drivers/infiniband/hw/qib/qib_file_ops.c 			 (dd->rcvtidcnt % rcd->subctxt_cnt);
dd                316 drivers/infiniband/hw/qib/qib_file_ops.c 		tidoff = dd->rcvtidcnt - tidcnt;
dd                320 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
dd                327 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                333 drivers/infiniband/hw/qib/qib_file_ops.c 	tidlist = (u16 *) &pagep[dd->rcvtidcnt];
dd                340 drivers/infiniband/hw/qib/qib_file_ops.c 	tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
dd                341 drivers/infiniband/hw/qib/qib_file_ops.c 				   dd->rcvtidbase +
dd                361 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->pcidev,
dd                372 drivers/infiniband/hw/qib/qib_file_ops.c 			if (!dd->pageshadow[ctxttid + tid])
dd                385 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
dd                391 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->pageshadow[ctxttid + tid] = pagep[i];
dd                392 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->physshadow[ctxttid + tid] = daddr;
dd                397 drivers/infiniband/hw/qib/qib_file_ops.c 		physaddr = dd->physshadow[ctxttid + tid];
dd                399 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->f_put_tid(dd, &tidbase[tid],
dd                421 drivers/infiniband/hw/qib/qib_file_ops.c 			if (dd->pageshadow[ctxttid + tid]) {
dd                424 drivers/infiniband/hw/qib/qib_file_ops.c 				phys = dd->physshadow[ctxttid + tid];
dd                425 drivers/infiniband/hw/qib/qib_file_ops.c 				dd->physshadow[ctxttid + tid] = dd->tidinvalid;
dd                429 drivers/infiniband/hw/qib/qib_file_ops.c 				dd->f_put_tid(dd, &tidbase[tid],
dd                431 drivers/infiniband/hw/qib/qib_file_ops.c 					      dd->tidinvalid);
dd                432 drivers/infiniband/hw/qib/qib_file_ops.c 				pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
dd                434 drivers/infiniband/hw/qib/qib_file_ops.c 				dd->pageshadow[ctxttid + tid] = NULL;
dd                488 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                492 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!dd->pageshadow) {
dd                503 drivers/infiniband/hw/qib/qib_file_ops.c 	ctxttid = rcd->ctxt * dd->rcvtidcnt;
dd                505 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = dd->rcvtidcnt;
dd                507 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
dd                508 drivers/infiniband/hw/qib/qib_file_ops.c 			 (dd->rcvtidcnt % rcd->subctxt_cnt);
dd                509 drivers/infiniband/hw/qib/qib_file_ops.c 		ctxttid += dd->rcvtidcnt - tidcnt;
dd                511 drivers/infiniband/hw/qib/qib_file_ops.c 		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
dd                514 drivers/infiniband/hw/qib/qib_file_ops.c 	tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
dd                515 drivers/infiniband/hw/qib/qib_file_ops.c 				   dd->rcvtidbase +
dd                534 drivers/infiniband/hw/qib/qib_file_ops.c 		if (dd->pageshadow[ctxttid + tid]) {
dd                538 drivers/infiniband/hw/qib/qib_file_ops.c 			p = dd->pageshadow[ctxttid + tid];
dd                539 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->pageshadow[ctxttid + tid] = NULL;
dd                540 drivers/infiniband/hw/qib/qib_file_ops.c 			phys = dd->physshadow[ctxttid + tid];
dd                541 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->physshadow[ctxttid + tid] = dd->tidinvalid;
dd                545 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->f_put_tid(dd, &tidbase[tid],
dd                546 drivers/infiniband/hw/qib/qib_file_ops.c 				      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
dd                547 drivers/infiniband/hw/qib/qib_file_ops.c 			pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
dd                635 drivers/infiniband/hw/qib/qib_file_ops.c 			(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
dd                655 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                675 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
dd                682 drivers/infiniband/hw/qib/qib_file_ops.c 			       struct qib_devdata *dd)
dd                704 drivers/infiniband/hw/qib/qib_file_ops.c 		(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
dd                711 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                716 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                729 drivers/infiniband/hw/qib/qib_file_ops.c 			qib_devinfo(dd->pcidev,
dd                743 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                750 drivers/infiniband/hw/qib/qib_file_ops.c static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
dd                762 drivers/infiniband/hw/qib/qib_file_ops.c 	sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
dd                764 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                769 drivers/infiniband/hw/qib/qib_file_ops.c 		phys = dd->physaddr + ureg;
dd                782 drivers/infiniband/hw/qib/qib_file_ops.c 			struct qib_devdata *dd,
dd                795 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
dd                796 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                803 drivers/infiniband/hw/qib/qib_file_ops.c 	phys = dd->physaddr + piobufs;
dd                817 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!dd->wc_cookie)
dd                830 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                839 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                848 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd                896 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd                937 drivers/infiniband/hw/qib/qib_file_ops.c 			qib_devinfo(dd->pcidev,
dd                978 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd;
dd                988 drivers/infiniband/hw/qib/qib_file_ops.c 	dd = rcd->dd;
dd               1022 drivers/infiniband/hw/qib/qib_file_ops.c 	ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
dd               1032 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->palign * (rcd->piocnt - piocnt);
dd               1038 drivers/infiniband/hw/qib/qib_file_ops.c 		piobufs = rcd->piobufs + dd->palign * piocnt * slave;
dd               1042 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = mmap_ureg(vma, dd, ureg);
dd               1044 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
dd               1045 drivers/infiniband/hw/qib/qib_file_ops.c 	else if (pgaddr == dd->pioavailregs_phys)
dd               1048 drivers/infiniband/hw/qib/qib_file_ops.c 				   (void *) dd->pioavailregs_dma, 0,
dd               1073 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd               1085 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd               1090 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_lock_irq(&dd->uctxt_lock);
dd               1098 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_unlock_irq(&dd->uctxt_lock);
dd               1107 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd               1112 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_lock_irq(&dd->uctxt_lock);
dd               1113 drivers/infiniband/hw/qib/qib_file_ops.c 	if (dd->f_hdrqempty(rcd)) {
dd               1115 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
dd               1119 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_unlock_irq(&dd->uctxt_lock);
dd               1142 drivers/infiniband/hw/qib/qib_file_ops.c static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
dd               1146 drivers/infiniband/hw/qib/qib_file_ops.c 	const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
dd               1176 drivers/infiniband/hw/qib/qib_file_ops.c 			qib_dev_err(dd,
dd               1215 drivers/infiniband/hw/qib/qib_file_ops.c static int init_subctxts(struct qib_devdata *dd,
dd               1234 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_devinfo(dd->pcidev,
dd               1252 drivers/infiniband/hw/qib/qib_file_ops.c 	size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
dd               1288 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = ppd->dd;
dd               1294 drivers/infiniband/hw/qib/qib_file_ops.c 	assign_ctxt_affinity(fp, dd);
dd               1298 drivers/infiniband/hw/qib/qib_file_ops.c 		numa_node_id()) : dd->assigned_node_id;
dd               1307 drivers/infiniband/hw/qib/qib_file_ops.c 		ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
dd               1308 drivers/infiniband/hw/qib/qib_file_ops.c 			       dd->rcvtidcnt * sizeof(struct page **),
dd               1312 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_dev_err(dd,
dd               1318 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = init_subctxts(dd, rcd, uinfo);
dd               1323 drivers/infiniband/hw/qib/qib_file_ops.c 	init_waitqueue_head(&dd->rcd[ctxt]->wait);
dd               1327 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->freectxts--;
dd               1335 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->rcd[ctxt] = NULL;
dd               1344 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = ppd->dd;
dd               1346 drivers/infiniband/hw/qib/qib_file_ops.c 	return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
dd               1354 drivers/infiniband/hw/qib/qib_file_ops.c static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
dd               1361 drivers/infiniband/hw/qib/qib_file_ops.c 		if (!usable(dd->pport + port - 1)) {
dd               1365 drivers/infiniband/hw/qib/qib_file_ops.c 			ppd = dd->pport + port - 1;
dd               1367 drivers/infiniband/hw/qib/qib_file_ops.c 	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
dd               1370 drivers/infiniband/hw/qib/qib_file_ops.c 	if (ctxt == dd->cfgctxts) {
dd               1375 drivers/infiniband/hw/qib/qib_file_ops.c 		u32 pidx = ctxt % dd->num_pports;
dd               1377 drivers/infiniband/hw/qib/qib_file_ops.c 		if (usable(dd->pport + pidx))
dd               1378 drivers/infiniband/hw/qib/qib_file_ops.c 			ppd = dd->pport + pidx;
dd               1380 drivers/infiniband/hw/qib/qib_file_ops.c 			for (pidx = 0; pidx < dd->num_pports && !ppd;
dd               1382 drivers/infiniband/hw/qib/qib_file_ops.c 				if (usable(dd->pport + pidx))
dd               1383 drivers/infiniband/hw/qib/qib_file_ops.c 					ppd = dd->pport + pidx;
dd               1394 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = qib_lookup(unit);
dd               1397 drivers/infiniband/hw/qib/qib_file_ops.c 	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
dd               1400 drivers/infiniband/hw/qib/qib_file_ops.c 		ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
dd               1427 drivers/infiniband/hw/qib/qib_file_ops.c 			struct qib_devdata *dd = qib_lookup(ndev);
dd               1430 drivers/infiniband/hw/qib/qib_file_ops.c 			if (!dd)
dd               1432 drivers/infiniband/hw/qib/qib_file_ops.c 			if (port && port <= dd->num_pports &&
dd               1433 drivers/infiniband/hw/qib/qib_file_ops.c 			    usable(dd->pport + port - 1))
dd               1436 drivers/infiniband/hw/qib/qib_file_ops.c 				for (i = 0; i < dd->num_pports; i++)
dd               1437 drivers/infiniband/hw/qib/qib_file_ops.c 					if (usable(dd->pport + i))
dd               1441 drivers/infiniband/hw/qib/qib_file_ops.c 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
dd               1443 drivers/infiniband/hw/qib/qib_file_ops.c 				if (dd->rcd[ctxt])
dd               1448 drivers/infiniband/hw/qib/qib_file_ops.c 				udd = dd;
dd               1458 drivers/infiniband/hw/qib/qib_file_ops.c 			struct qib_devdata *dd = qib_lookup(ndev);
dd               1460 drivers/infiniband/hw/qib/qib_file_ops.c 			if (dd) {
dd               1461 drivers/infiniband/hw/qib/qib_file_ops.c 				ret = choose_port_ctxt(fp, dd, port, uinfo);
dd               1484 drivers/infiniband/hw/qib/qib_file_ops.c 		struct qib_devdata *dd = qib_lookup(ndev);
dd               1487 drivers/infiniband/hw/qib/qib_file_ops.c 		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
dd               1489 drivers/infiniband/hw/qib/qib_file_ops.c 		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
dd               1490 drivers/infiniband/hw/qib/qib_file_ops.c 			struct qib_ctxtdata *rcd = dd->rcd[i];
dd               1544 drivers/infiniband/hw/qib/qib_file_ops.c 		struct qib_devdata *dd = qib_lookup(ndev);
dd               1546 drivers/infiniband/hw/qib/qib_file_ops.c 		if (dd) {
dd               1547 drivers/infiniband/hw/qib/qib_file_ops.c 			if (pcibus_to_node(dd->pcidev->bus) < 0) {
dd               1552 drivers/infiniband/hw/qib/qib_file_ops.c 				pcibus_to_node(dd->pcidev->bus)) {
dd               1566 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd               1568 drivers/infiniband/hw/qib/qib_file_ops.c 	if (dd->flags & QIB_HAS_SEND_DMA) {
dd               1570 drivers/infiniband/hw/qib/qib_file_ops.c 		fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
dd               1571 drivers/infiniband/hw/qib/qib_file_ops.c 						    dd->unit,
dd               1616 drivers/infiniband/hw/qib/qib_file_ops.c 				assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
dd               1654 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd;
dd               1664 drivers/infiniband/hw/qib/qib_file_ops.c 	dd = rcd->dd;
dd               1667 drivers/infiniband/hw/qib/qib_file_ops.c 	uctxt = rcd->ctxt - dd->first_user_ctxt;
dd               1668 drivers/infiniband/hw/qib/qib_file_ops.c 	if (uctxt < dd->ctxts_extrabuf) {
dd               1669 drivers/infiniband/hw/qib/qib_file_ops.c 		rcd->piocnt = dd->pbufsctxt + 1;
dd               1672 drivers/infiniband/hw/qib/qib_file_ops.c 		rcd->piocnt = dd->pbufsctxt;
dd               1674 drivers/infiniband/hw/qib/qib_file_ops.c 			dd->ctxts_extrabuf;
dd               1683 drivers/infiniband/hw/qib/qib_file_ops.c 	if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
dd               1684 drivers/infiniband/hw/qib/qib_file_ops.c 		if (rcd->pio_base >= dd->piobcnt2k) {
dd               1685 drivers/infiniband/hw/qib/qib_file_ops.c 			qib_dev_err(dd,
dd               1687 drivers/infiniband/hw/qib/qib_file_ops.c 				    dd->unit, rcd->ctxt);
dd               1691 drivers/infiniband/hw/qib/qib_file_ops.c 		rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
dd               1692 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
dd               1696 drivers/infiniband/hw/qib/qib_file_ops.c 	rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
dd               1697 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
dd               1709 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               1717 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = qib_create_rcvhdrq(dd, rcd);
dd               1743 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
dd               1754 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
dd               1769 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd = rcd->dd;
dd               1770 drivers/infiniband/hw/qib/qib_file_ops.c 	int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
dd               1771 drivers/infiniband/hw/qib/qib_file_ops.c 	int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
dd               1774 drivers/infiniband/hw/qib/qib_file_ops.c 		struct page *p = dd->pageshadow[i];
dd               1780 drivers/infiniband/hw/qib/qib_file_ops.c 		phys = dd->physshadow[i];
dd               1781 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->physshadow[i] = dd->tidinvalid;
dd               1782 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->pageshadow[i] = NULL;
dd               1783 drivers/infiniband/hw/qib/qib_file_ops.c 		pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
dd               1794 drivers/infiniband/hw/qib/qib_file_ops.c 	struct qib_devdata *dd;
dd               1808 drivers/infiniband/hw/qib/qib_file_ops.c 	dd = rcd->dd;
dd               1835 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd               1837 drivers/infiniband/hw/qib/qib_file_ops.c 	dd->rcd[ctxt] = NULL;
dd               1839 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd               1851 drivers/infiniband/hw/qib/qib_file_ops.c 	if (dd->kregbase) {
dd               1853 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
dd               1857 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_clean_part_key(rcd, dd);
dd               1858 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
dd               1859 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_chg_pioavailkernel(dd, rcd->pio_base,
dd               1862 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->f_clear_tids(dd, rcd);
dd               1864 drivers/infiniband/hw/qib/qib_file_ops.c 		if (dd->pageshadow)
dd               1867 drivers/infiniband/hw/qib/qib_file_ops.c 		dd->freectxts++;
dd               1871 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
dd               1889 drivers/infiniband/hw/qib/qib_file_ops.c 	info.unit = rcd->dd->unit;
dd               1894 drivers/infiniband/hw/qib/qib_file_ops.c 	info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
dd               1983 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
dd               1984 drivers/infiniband/hw/qib/qib_file_ops.c 	for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
dd               1986 drivers/infiniband/hw/qib/qib_file_ops.c 		rcd = ppd->dd->rcd[ctxt];
dd               2002 drivers/infiniband/hw/qib/qib_file_ops.c 	spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
dd               2205 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_force_pio_avail_update(rcd->dd);
dd               2213 drivers/infiniband/hw/qib/qib_file_ops.c 		rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
dd               2352 drivers/infiniband/hw/qib/qib_file_ops.c static void qib_user_remove(struct qib_devdata *dd)
dd               2357 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
dd               2360 drivers/infiniband/hw/qib/qib_file_ops.c static int qib_user_add(struct qib_devdata *dd)
dd               2372 drivers/infiniband/hw/qib/qib_file_ops.c 	snprintf(name, sizeof(name), "ipath%d", dd->unit);
dd               2373 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
dd               2374 drivers/infiniband/hw/qib/qib_file_ops.c 			    &dd->user_cdev, &dd->user_device);
dd               2376 drivers/infiniband/hw/qib/qib_file_ops.c 		qib_user_remove(dd);
dd               2384 drivers/infiniband/hw/qib/qib_file_ops.c int qib_device_create(struct qib_devdata *dd)
dd               2388 drivers/infiniband/hw/qib/qib_file_ops.c 	r = qib_user_add(dd);
dd               2389 drivers/infiniband/hw/qib/qib_file_ops.c 	ret = qib_diag_add(dd);
dd               2399 drivers/infiniband/hw/qib/qib_file_ops.c void qib_device_remove(struct qib_devdata *dd)
dd               2401 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_user_remove(dd);
dd               2402 drivers/infiniband/hw/qib/qib_file_ops.c 	qib_diag_remove(dd);
dd                151 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                153 drivers/infiniband/hw/qib/qib_fs.c 	avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
dd                163 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                165 drivers/infiniband/hw/qib/qib_fs.c 	avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
dd                185 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                187 drivers/infiniband/hw/qib/qib_fs.c 	avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
dd                197 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                199 drivers/infiniband/hw/qib/qib_fs.c 	avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
dd                209 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                211 drivers/infiniband/hw/qib/qib_fs.c 	avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
dd                227 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                235 drivers/infiniband/hw/qib/qib_fs.c 	ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
dd                248 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd = private2dd(file);
dd                252 drivers/infiniband/hw/qib/qib_fs.c 	if (dd->num_pports < 2)
dd                259 drivers/infiniband/hw/qib/qib_fs.c 	ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
dd                274 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd;
dd                300 drivers/infiniband/hw/qib/qib_fs.c 	dd = private2dd(file);
dd                301 drivers/infiniband/hw/qib/qib_fs.c 	if (qib_eeprom_read(dd, pos, tmp, count)) {
dd                302 drivers/infiniband/hw/qib/qib_fs.c 		qib_dev_err(dd, "failed to read from flash\n");
dd                325 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd;
dd                339 drivers/infiniband/hw/qib/qib_fs.c 	dd = private2dd(file);
dd                340 drivers/infiniband/hw/qib/qib_fs.c 	if (qib_eeprom_write(dd, pos, tmp, count)) {
dd                342 drivers/infiniband/hw/qib/qib_fs.c 		qib_dev_err(dd, "failed to write to flash\n");
dd                360 drivers/infiniband/hw/qib/qib_fs.c static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
dd                367 drivers/infiniband/hw/qib/qib_fs.c 	snprintf(unit, sizeof(unit), "%u", dd->unit);
dd                369 drivers/infiniband/hw/qib/qib_fs.c 			  &simple_dir_operations, dd);
dd                377 drivers/infiniband/hw/qib/qib_fs.c 			  &cntr_ops[0], dd);
dd                384 drivers/infiniband/hw/qib/qib_fs.c 			  &cntr_ops[1], dd);
dd                391 drivers/infiniband/hw/qib/qib_fs.c 			  &portcntr_ops[0], dd);
dd                397 drivers/infiniband/hw/qib/qib_fs.c 	for (i = 1; i <= dd->num_pports; i++) {
dd                403 drivers/infiniband/hw/qib/qib_fs.c 				  &portcntr_ops[i], dd);
dd                409 drivers/infiniband/hw/qib/qib_fs.c 		if (!(dd->flags & QIB_HAS_QSFP))
dd                413 drivers/infiniband/hw/qib/qib_fs.c 				  &qsfp_ops[i - 1], dd);
dd                422 drivers/infiniband/hw/qib/qib_fs.c 			  &flash_ops, dd);
dd                462 drivers/infiniband/hw/qib/qib_fs.c 			       struct qib_devdata *dd)
dd                470 drivers/infiniband/hw/qib/qib_fs.c 	snprintf(unit, sizeof(unit), "%u", dd->unit);
dd                483 drivers/infiniband/hw/qib/qib_fs.c 	for (i = 0; i < dd->num_pports; i++) {
dd                488 drivers/infiniband/hw/qib/qib_fs.c 		if (dd->flags & QIB_HAS_QSFP) {
dd                512 drivers/infiniband/hw/qib/qib_fs.c 	struct qib_devdata *dd;
dd                528 drivers/infiniband/hw/qib/qib_fs.c 	xa_for_each(&qib_dev_table, index, dd) {
dd                529 drivers/infiniband/hw/qib/qib_fs.c 		ret = add_cntr_files(sb, dd);
dd                562 drivers/infiniband/hw/qib/qib_fs.c int qibfs_add(struct qib_devdata *dd)
dd                577 drivers/infiniband/hw/qib/qib_fs.c 		ret = add_cntr_files(qib_super, dd);
dd                581 drivers/infiniband/hw/qib/qib_fs.c int qibfs_remove(struct qib_devdata *dd)
dd                586 drivers/infiniband/hw/qib/qib_fs.c 		ret = remove_device_files(qib_super, dd);
dd                306 drivers/infiniband/hw/qib/qib_iba6120.c static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
dd                309 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                312 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->userbase)
dd                314 drivers/infiniband/hw/qib/qib_iba6120.c 			     ((char __iomem *)dd->userbase +
dd                315 drivers/infiniband/hw/qib/qib_iba6120.c 			      dd->ureg_align * ctxt));
dd                318 drivers/infiniband/hw/qib/qib_iba6120.c 			     (dd->uregbase +
dd                319 drivers/infiniband/hw/qib/qib_iba6120.c 			      (char __iomem *)dd->kregbase +
dd                320 drivers/infiniband/hw/qib/qib_iba6120.c 			      dd->ureg_align * ctxt));
dd                332 drivers/infiniband/hw/qib/qib_iba6120.c static inline void qib_write_ureg(const struct qib_devdata *dd,
dd                337 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->userbase)
dd                339 drivers/infiniband/hw/qib/qib_iba6120.c 			((char __iomem *) dd->userbase +
dd                340 drivers/infiniband/hw/qib/qib_iba6120.c 			 dd->ureg_align * ctxt);
dd                343 drivers/infiniband/hw/qib/qib_iba6120.c 			(dd->uregbase +
dd                344 drivers/infiniband/hw/qib/qib_iba6120.c 			 (char __iomem *) dd->kregbase +
dd                345 drivers/infiniband/hw/qib/qib_iba6120.c 			 dd->ureg_align * ctxt);
dd                347 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
dd                351 drivers/infiniband/hw/qib/qib_iba6120.c static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
dd                354 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                356 drivers/infiniband/hw/qib/qib_iba6120.c 	return readl((u32 __iomem *)&dd->kregbase[regno]);
dd                359 drivers/infiniband/hw/qib/qib_iba6120.c static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
dd                362 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                365 drivers/infiniband/hw/qib/qib_iba6120.c 	return readq(&dd->kregbase[regno]);
dd                368 drivers/infiniband/hw/qib/qib_iba6120.c static inline void qib_write_kreg(const struct qib_devdata *dd,
dd                371 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
dd                372 drivers/infiniband/hw/qib/qib_iba6120.c 		writeq(value, &dd->kregbase[regno]);
dd                382 drivers/infiniband/hw/qib/qib_iba6120.c static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
dd                386 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, regno + ctxt, value);
dd                389 drivers/infiniband/hw/qib/qib_iba6120.c static inline void write_6120_creg(const struct qib_devdata *dd,
dd                392 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
dd                393 drivers/infiniband/hw/qib/qib_iba6120.c 		writeq(value, &dd->cspec->cregbase[regno]);
dd                396 drivers/infiniband/hw/qib/qib_iba6120.c static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
dd                398 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                400 drivers/infiniband/hw/qib/qib_iba6120.c 	return readq(&dd->cspec->cregbase[regno]);
dd                403 drivers/infiniband/hw/qib/qib_iba6120.c static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
dd                405 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                407 drivers/infiniband/hw/qib/qib_iba6120.c 	return readl(&dd->cspec->cregbase[regno]);
dd                670 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_txe_recover(struct qib_devdata *dd)
dd                673 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(dd->pcidev,
dd                678 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
dd                681 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->flags & QIB_BADINTR)
dd                683 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_intmask, ~0ULL);
dd                685 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_intclear, 0ULL);
dd                687 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_intmask, 0ULL);
dd                705 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_clear_freeze(struct qib_devdata *dd)
dd                708 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errmask, 0ULL);
dd                711 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_set_intr_state(dd, 0);
dd                713 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_cancel_sends(dd->pport);
dd                716 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control, dd->control);
dd                717 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_read_kreg32(dd, kr_scratch);
dd                720 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_force_pio_avail_update(dd);
dd                728 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
dd                729 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
dd                730 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd                731 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_set_intr_state(dd, 1);
dd                745 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
dd                753 drivers/infiniband/hw/qib/qib_iba6120.c 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
dd                757 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd                768 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_hwerrclear,
dd                771 drivers/infiniband/hw/qib/qib_iba6120.c 	hwerrs &= dd->cspec->hwerrmask;
dd                778 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(dd->pcidev,
dd                783 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd                787 drivers/infiniband/hw/qib/qib_iba6120.c 	ctrl = qib_read_kreg32(dd, kr_control);
dd                788 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
dd                798 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_6120_txe_recover(dd);
dd                806 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_6120_clear_freeze(dd);
dd                819 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
dd                820 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd                826 drivers/infiniband/hw/qib/qib_iba6120.c 	bitsmsg = dd->cspec->bitsmsgbuf;
dd                832 drivers/infiniband/hw/qib/qib_iba6120.c 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
dd                839 drivers/infiniband/hw/qib/qib_iba6120.c 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
dd                844 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
dd                845 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd                853 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
dd                854 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd                864 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd, "%s hardware error\n", msg);
dd                868 drivers/infiniband/hw/qib/qib_iba6120.c 	if (isfatal && !dd->diag_client) {
dd                869 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd                871 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->serial);
dd                876 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->freezemsg)
dd                877 drivers/infiniband/hw/qib/qib_iba6120.c 			snprintf(dd->freezemsg, dd->freezelen,
dd                879 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_disable_after_error(dd);
dd                889 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
dd                965 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd                971 drivers/infiniband/hw/qib/qib_iba6120.c 	sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
dd                972 drivers/infiniband/hw/qib/qib_iba6120.c 	sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
dd                975 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_disarm_piobufs_set(dd, sbuf,
dd                976 drivers/infiniband/hw/qib/qib_iba6120.c 				       dd->piobcnt2k + dd->piobcnt4k);
dd                979 drivers/infiniband/hw/qib/qib_iba6120.c static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
dd                983 drivers/infiniband/hw/qib/qib_iba6120.c 	u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
dd                985 drivers/infiniband/hw/qib/qib_iba6120.c 	if (linkrecov != dd->cspec->lastlinkrecov) {
dd                987 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->lastlinkrecov = 0;
dd                988 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
dd                992 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->lastlinkrecov =
dd                993 drivers/infiniband/hw/qib/qib_iba6120.c 			read_6120_creg32(dd, cr_iblinkerrrecov);
dd                997 drivers/infiniband/hw/qib/qib_iba6120.c static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
dd               1002 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_pportdata *ppd = dd->pport;
dd               1006 drivers/infiniband/hw/qib/qib_iba6120.c 	errs &= dd->cspec->errormask;
dd               1007 drivers/infiniband/hw/qib/qib_iba6120.c 	msg = dd->cspec->emsgbuf;
dd               1011 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
dd               1014 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               1043 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errclear, errs);
dd               1055 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
dd               1065 drivers/infiniband/hw/qib/qib_iba6120.c 		u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
dd               1069 drivers/infiniband/hw/qib/qib_iba6120.c 		if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
dd               1070 drivers/infiniband/hw/qib/qib_iba6120.c 			handle = chk_6120_linkrecovery(dd, ibcs);
dd               1086 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               1088 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
dd               1090 drivers/infiniband/hw/qib/qib_iba6120.c 		*dd->devstatusp |= QIB_STATUS_HWERROR;
dd               1091 drivers/infiniband/hw/qib/qib_iba6120.c 		*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
dd               1095 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
dd               1108 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_handle_urcv(dd, ~0U);
dd               1128 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_init_hwerrors(struct qib_devdata *dd)
dd               1133 drivers/infiniband/hw/qib/qib_iba6120.c 	extsval = qib_read_kreg64(dd, kr_extstatus);
dd               1136 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd, "MemBIST did not complete!\n");
dd               1140 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->minrev < 2) {
dd               1150 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->hwerrmask = val;
dd               1152 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
dd               1153 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               1156 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errclear, ~0ULL);
dd               1158 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errmask, ~0ULL);
dd               1159 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
dd               1161 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_intclear, ~0ULL);
dd               1163 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_rcvbthqp,
dd               1164 drivers/infiniband/hw/qib/qib_iba6120.c 		       dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
dd               1174 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
dd               1177 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_errclear,
dd               1179 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
dd               1181 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
dd               1182 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               1194 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               1219 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
dd               1221 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               1230 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               1234 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
dd               1235 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control, 0ULL);
dd               1237 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ibdeltainprog = 1;
dd               1238 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
dd               1239 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
dd               1250 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->lli_thresh = 0xf;
dd               1251 drivers/infiniband/hw/qib/qib_iba6120.c 	ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
dd               1261 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
dd               1264 drivers/infiniband/hw/qib/qib_iba6120.c 	val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
dd               1266 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_ibcctrl, val);
dd               1268 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
dd               1269 drivers/infiniband/hw/qib/qib_iba6120.c 	config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
dd               1283 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_serdes_cfg0, val);
dd               1285 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_read_kreg64(dd, kr_scratch);
dd               1303 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_serdes_cfg0, val);
dd               1305 drivers/infiniband/hw/qib/qib_iba6120.c 	(void) qib_read_kreg64(dd, kr_scratch);
dd               1315 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_serdes_cfg0, val);
dd               1317 drivers/infiniband/hw/qib/qib_iba6120.c 	(void) qib_read_kreg64(dd, kr_scratch);
dd               1319 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_xgxs_cfg);
dd               1329 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_xgxs_cfg, val);
dd               1331 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
dd               1339 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_serdes_cfg1, config1);
dd               1342 drivers/infiniband/hw/qib/qib_iba6120.c 	ppd->guid = dd->base_guid;
dd               1349 drivers/infiniband/hw/qib/qib_iba6120.c 	hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
dd               1352 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrclear, hwstat);
dd               1353 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
dd               1356 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->control |= QLOGIC_IB_C_LINKENABLE;
dd               1357 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
dd               1358 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               1370 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               1376 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
dd               1377 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control,
dd               1378 drivers/infiniband/hw/qib/qib_iba6120.c 		       dd->control | QLOGIC_IB_C_FREEZEMODE);
dd               1380 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
dd               1381 drivers/infiniband/hw/qib/qib_iba6120.c 	    dd->cspec->ibdeltainprog) {
dd               1385 drivers/infiniband/hw/qib/qib_iba6120.c 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
dd               1386 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwdiagctrl,
dd               1389 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
dd               1390 drivers/infiniband/hw/qib/qib_iba6120.c 			val = read_6120_creg32(dd, cr_ibsymbolerr);
dd               1391 drivers/infiniband/hw/qib/qib_iba6120.c 			if (dd->cspec->ibdeltainprog)
dd               1392 drivers/infiniband/hw/qib/qib_iba6120.c 				val -= val - dd->cspec->ibsymsnap;
dd               1393 drivers/infiniband/hw/qib/qib_iba6120.c 			val -= dd->cspec->ibsymdelta;
dd               1394 drivers/infiniband/hw/qib/qib_iba6120.c 			write_6120_creg(dd, cr_ibsymbolerr, val);
dd               1396 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
dd               1397 drivers/infiniband/hw/qib/qib_iba6120.c 			val = read_6120_creg32(dd, cr_iblinkerrrecov);
dd               1398 drivers/infiniband/hw/qib/qib_iba6120.c 			if (dd->cspec->ibdeltainprog)
dd               1399 drivers/infiniband/hw/qib/qib_iba6120.c 				val -= val - dd->cspec->iblnkerrsnap;
dd               1400 drivers/infiniband/hw/qib/qib_iba6120.c 			val -= dd->cspec->iblnkerrdelta;
dd               1401 drivers/infiniband/hw/qib/qib_iba6120.c 			write_6120_creg(dd, cr_iblinkerrrecov, val);
dd               1405 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
dd               1408 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_serdes_cfg0);
dd               1410 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_serdes_cfg0, val);
dd               1440 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               1446 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->diag_client)
dd               1456 drivers/infiniband/hw/qib/qib_iba6120.c 		val = qib_read_kreg64(dd, kr_ibcstatus);
dd               1464 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               1465 drivers/infiniband/hw/qib/qib_iba6120.c 	extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
dd               1472 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->extctrl = extctl;
dd               1473 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_extctrl, extctl);
dd               1474 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               1483 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_setup_cleanup(struct qib_devdata *dd)
dd               1485 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_free_irq(dd);
dd               1486 drivers/infiniband/hw/qib/qib_iba6120.c 	kfree(dd->cspec->cntrs);
dd               1487 drivers/infiniband/hw/qib/qib_iba6120.c 	kfree(dd->cspec->portcntrs);
dd               1488 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->cspec->dummy_hdrq) {
dd               1489 drivers/infiniband/hw/qib/qib_iba6120.c 		dma_free_coherent(&dd->pcidev->dev,
dd               1490 drivers/infiniband/hw/qib/qib_iba6120.c 				  ALIGN(dd->rcvhdrcnt *
dd               1491 drivers/infiniband/hw/qib/qib_iba6120.c 					dd->rcvhdrentsize *
dd               1493 drivers/infiniband/hw/qib/qib_iba6120.c 				  dd->cspec->dummy_hdrq,
dd               1494 drivers/infiniband/hw/qib/qib_iba6120.c 				  dd->cspec->dummy_hdrq_phys);
dd               1495 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->dummy_hdrq = NULL;
dd               1499 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
dd               1503 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               1505 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
dd               1507 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
dd               1508 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               1509 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               1510 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               1517 drivers/infiniband/hw/qib/qib_iba6120.c static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
dd               1520 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
dd               1527 drivers/infiniband/hw/qib/qib_iba6120.c 		estat = qib_read_kreg64(dd, kr_errstatus);
dd               1529 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_devinfo(dd->pcidev,
dd               1532 drivers/infiniband/hw/qib/qib_iba6120.c 		handle_6120_errors(dd, estat);
dd               1543 drivers/infiniband/hw/qib/qib_iba6120.c 		gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
dd               1554 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->rxfc_unsupvl_errs++;
dd               1556 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->overrun_thresh_errs++;
dd               1558 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->lli_errs++;
dd               1568 drivers/infiniband/hw/qib/qib_iba6120.c 			const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
dd               1577 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->gpio_mask &= ~(gpiostatus & mask);
dd               1578 drivers/infiniband/hw/qib/qib_iba6120.c 				qib_write_kreg(dd, kr_gpio_mask,
dd               1579 drivers/infiniband/hw/qib/qib_iba6120.c 					       dd->cspec->gpio_mask);
dd               1583 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
dd               1589 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = data;
dd               1594 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
dd               1605 drivers/infiniband/hw/qib/qib_iba6120.c 	istat = qib_read_kreg32(dd, kr_intstatus);
dd               1612 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_bad_intrstatus(dd);
dd               1618 drivers/infiniband/hw/qib/qib_iba6120.c 	this_cpu_inc(*dd->int_counter);
dd               1622 drivers/infiniband/hw/qib/qib_iba6120.c 		unlikely_6120_intr(dd, istat);
dd               1630 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_intclear, istat);
dd               1643 drivers/infiniband/hw/qib/qib_iba6120.c 		for (i = 0; i < dd->first_user_ctxt; i++) {
dd               1646 drivers/infiniband/hw/qib/qib_iba6120.c 				crcs += qib_kreceive(dd->rcd[i],
dd               1647 drivers/infiniband/hw/qib/qib_iba6120.c 						     &dd->cspec->lli_counter,
dd               1653 drivers/infiniband/hw/qib/qib_iba6120.c 			u32 cntr = dd->cspec->lli_counter;
dd               1657 drivers/infiniband/hw/qib/qib_iba6120.c 				if (cntr > dd->cspec->lli_thresh) {
dd               1658 drivers/infiniband/hw/qib/qib_iba6120.c 					dd->cspec->lli_counter = 0;
dd               1659 drivers/infiniband/hw/qib/qib_iba6120.c 					dd->cspec->lli_errs++;
dd               1661 drivers/infiniband/hw/qib/qib_iba6120.c 					dd->cspec->lli_counter += cntr;
dd               1670 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_handle_urcv(dd, ctxtrbits);
dd               1674 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
dd               1675 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_ib_piobufavail(dd);
dd               1687 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_setup_6120_interrupt(struct qib_devdata *dd)
dd               1697 drivers/infiniband/hw/qib/qib_iba6120.c 	if (SYM_FIELD(dd->revision, Revision_R,
dd               1700 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
dd               1701 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               1704 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
dd               1707 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               1709 drivers/infiniband/hw/qib/qib_iba6120.c 			    pci_irq_vector(dd->pcidev, 0), ret);
dd               1718 drivers/infiniband/hw/qib/qib_iba6120.c static void pe_boardname(struct qib_devdata *dd)
dd               1722 drivers/infiniband/hw/qib/qib_iba6120.c 	boardid = SYM_FIELD(dd->revision, Revision,
dd               1727 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->boardname = "InfiniPath_QLE7140";
dd               1730 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
dd               1731 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->boardname = "Unknown_InfiniPath_6120";
dd               1735 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
dd               1736 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               1738 drivers/infiniband/hw/qib/qib_iba6120.c 			    dd->majrev, dd->minrev);
dd               1740 drivers/infiniband/hw/qib/qib_iba6120.c 	snprintf(dd->boardversion, sizeof(dd->boardversion),
dd               1742 drivers/infiniband/hw/qib/qib_iba6120.c 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
dd               1743 drivers/infiniband/hw/qib/qib_iba6120.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd               1744 drivers/infiniband/hw/qib/qib_iba6120.c 		 dd->majrev, dd->minrev,
dd               1745 drivers/infiniband/hw/qib/qib_iba6120.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
dd               1753 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_setup_reset(struct qib_devdata *dd)
dd               1761 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
dd               1764 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
dd               1767 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_set_intr_state(dd, 0);
dd               1769 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ibdeltainprog = 0;
dd               1770 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ibsymdelta = 0;
dd               1771 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->iblnkerrdelta = 0;
dd               1778 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
dd               1780 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->z_int_counter = qib_int_counter(dd);
dd               1781 drivers/infiniband/hw/qib/qib_iba6120.c 	val = dd->control | QLOGIC_IB_C_RESET;
dd               1782 drivers/infiniband/hw/qib/qib_iba6120.c 	writeq(val, &dd->kregbase[kr_control]);
dd               1793 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
dd               1799 drivers/infiniband/hw/qib/qib_iba6120.c 		val = readq(&dd->kregbase[kr_revision]);
dd               1800 drivers/infiniband/hw/qib/qib_iba6120.c 		if (val == dd->revision) {
dd               1801 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->flags |= QIB_PRESENT; /* it's back */
dd               1802 drivers/infiniband/hw/qib/qib_iba6120.c 			ret = qib_reinit_intr(dd);
dd               1810 drivers/infiniband/hw/qib/qib_iba6120.c 		if (qib_pcie_params(dd, dd->lbus_width, NULL))
dd               1811 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd,
dd               1814 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_6120_init_hwerrors(dd);
dd               1816 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               1818 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_6120_init_hwerrors(dd);
dd               1835 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
dd               1843 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase)
dd               1846 drivers/infiniband/hw/qib/qib_iba6120.c 	if (pa != dd->tidinvalid) {
dd               1848 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
dd               1854 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd,
dd               1861 drivers/infiniband/hw/qib/qib_iba6120.c 			pa |= dd->tidtemplate;
dd               1879 drivers/infiniband/hw/qib/qib_iba6120.c 	tidx = tidptr - dd->egrtidbase;
dd               1881 drivers/infiniband/hw/qib/qib_iba6120.c 	tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
dd               1882 drivers/infiniband/hw/qib/qib_iba6120.c 		? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
dd               1884 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
dd               1886 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
dd               1902 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
dd               1907 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase)
dd               1910 drivers/infiniband/hw/qib/qib_iba6120.c 	if (pa != dd->tidinvalid) {
dd               1912 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
dd               1918 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd,
dd               1925 drivers/infiniband/hw/qib/qib_iba6120.c 			pa |= dd->tidtemplate;
dd               1943 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_clear_tids(struct qib_devdata *dd,
dd               1951 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->kregbase || !rcd)
dd               1956 drivers/infiniband/hw/qib/qib_iba6120.c 	tidinv = dd->tidinvalid;
dd               1958 drivers/infiniband/hw/qib/qib_iba6120.c 		((char __iomem *)(dd->kregbase) +
dd               1959 drivers/infiniband/hw/qib/qib_iba6120.c 		 dd->rcvtidbase +
dd               1960 drivers/infiniband/hw/qib/qib_iba6120.c 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
dd               1962 drivers/infiniband/hw/qib/qib_iba6120.c 	for (i = 0; i < dd->rcvtidcnt; i++)
dd               1964 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
dd               1968 drivers/infiniband/hw/qib/qib_iba6120.c 		((char __iomem *)(dd->kregbase) +
dd               1969 drivers/infiniband/hw/qib/qib_iba6120.c 		 dd->rcvegrbase +
dd               1974 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
dd               1984 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_tidtemplate(struct qib_devdata *dd)
dd               1986 drivers/infiniband/hw/qib/qib_iba6120.c 	u32 egrsize = dd->rcvegrbufsize;
dd               1998 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->tidtemplate = 1U << 29;
dd               2000 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->tidtemplate = 2U << 29;
dd               2001 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->tidinvalid = 0;
dd               2030 drivers/infiniband/hw/qib/qib_iba6120.c qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
dd               2036 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_config_ctxts(struct qib_devdata *dd)
dd               2038 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
dd               2040 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
dd               2041 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->first_user_ctxt > dd->ctxtcnt)
dd               2042 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->first_user_ctxt = dd->ctxtcnt;
dd               2043 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
dd               2045 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->first_user_ctxt = dd->num_pports;
dd               2046 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->n_krcv_queues = dd->first_user_ctxt;
dd               2053 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
dd               2054 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
dd               2061 drivers/infiniband/hw/qib/qib_iba6120.c 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
dd               2065 drivers/infiniband/hw/qib/qib_iba6120.c 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
dd               2074 drivers/infiniband/hw/qib/qib_iba6120.c static void alloc_dummy_hdrq(struct qib_devdata *dd)
dd               2076 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
dd               2077 drivers/infiniband/hw/qib/qib_iba6120.c 					dd->rcd[0]->rcvhdrq_size,
dd               2078 drivers/infiniband/hw/qib/qib_iba6120.c 					&dd->cspec->dummy_hdrq_phys,
dd               2080 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->cspec->dummy_hdrq) {
dd               2081 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
dd               2083 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->dummy_hdrq_phys = 0UL;
dd               2097 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               2101 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               2104 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
dd               2106 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
dd               2108 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
dd               2110 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
dd               2112 drivers/infiniband/hw/qib/qib_iba6120.c 		mask = (1ULL << dd->ctxtcnt) - 1;
dd               2117 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
dd               2118 drivers/infiniband/hw/qib/qib_iba6120.c 		if (!(dd->flags & QIB_NODMA_RTAIL))
dd               2119 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
dd               2121 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
dd               2122 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
dd               2123 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
dd               2124 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->rcd[ctxt]->rcvhdrq_phys);
dd               2126 drivers/infiniband/hw/qib/qib_iba6120.c 		if (ctxt == 0 && !dd->cspec->dummy_hdrq)
dd               2127 drivers/infiniband/hw/qib/qib_iba6120.c 			alloc_dummy_hdrq(dd);
dd               2130 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
dd               2132 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
dd               2134 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
dd               2135 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
dd               2136 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
dd               2138 drivers/infiniband/hw/qib/qib_iba6120.c 		val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
dd               2139 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->rhdrhead_intr_off;
dd               2140 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               2149 drivers/infiniband/hw/qib/qib_iba6120.c 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
dd               2150 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
dd               2152 drivers/infiniband/hw/qib/qib_iba6120.c 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
dd               2153 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->rcd[ctxt]->head = val;
dd               2155 drivers/infiniband/hw/qib/qib_iba6120.c 		if (ctxt < dd->first_user_ctxt)
dd               2156 drivers/infiniband/hw/qib/qib_iba6120.c 			val |= dd->rhdrhead_intr_off;
dd               2157 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               2170 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
dd               2171 drivers/infiniband/hw/qib/qib_iba6120.c 					    dd->cspec->dummy_hdrq_phys);
dd               2172 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
dd               2173 drivers/infiniband/hw/qib/qib_iba6120.c 					    dd->cspec->dummy_hdrq_phys);
dd               2177 drivers/infiniband/hw/qib/qib_iba6120.c 			for (i = 0; i < dd->cfgctxts; i++) {
dd               2178 drivers/infiniband/hw/qib/qib_iba6120.c 				qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
dd               2179 drivers/infiniband/hw/qib/qib_iba6120.c 					    i, dd->cspec->dummy_hdrq_phys);
dd               2180 drivers/infiniband/hw/qib/qib_iba6120.c 				qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
dd               2181 drivers/infiniband/hw/qib/qib_iba6120.c 					    i, dd->cspec->dummy_hdrq_phys);
dd               2185 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               2198 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               2202 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               2206 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl = 0;
dd               2208 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
dd               2210 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
dd               2212 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
dd               2214 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
dd               2219 drivers/infiniband/hw/qib/qib_iba6120.c 		tmp_dd_sendctrl = dd->sendctrl;
dd               2224 drivers/infiniband/hw/qib/qib_iba6120.c 		last = dd->piobcnt2k + dd->piobcnt4k;
dd               2229 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
dd               2231 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2235 drivers/infiniband/hw/qib/qib_iba6120.c 	tmp_dd_sendctrl = dd->sendctrl;
dd               2246 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
dd               2247 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               2250 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               2251 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2254 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               2264 drivers/infiniband/hw/qib/qib_iba6120.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               2265 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_scratch, v);
dd               2266 drivers/infiniband/hw/qib/qib_iba6120.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               2267 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_scratch, v);
dd               2268 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_read_kreg32(dd, kr_scratch);
dd               2280 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               2321 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(ppd->dd->pcidev,
dd               2329 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->lli_errs;
dd               2331 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->overrun_thresh_errs;
dd               2336 drivers/infiniband/hw/qib/qib_iba6120.c 		for (i = 0; i < dd->first_user_ctxt; i++)
dd               2337 drivers/infiniband/hw/qib/qib_iba6120.c 			ret += read_6120_creg32(dd, cr_portovfl + i);
dd               2339 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->pma_sample_status;
dd               2349 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = read_6120_creg(dd, creg);
dd               2351 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = read_6120_creg32(dd, creg);
dd               2353 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->cspec->ibdeltainprog)
dd               2354 drivers/infiniband/hw/qib/qib_iba6120.c 			ret -= ret - dd->cspec->ibsymsnap;
dd               2355 drivers/infiniband/hw/qib/qib_iba6120.c 		ret -= dd->cspec->ibsymdelta;
dd               2357 drivers/infiniband/hw/qib/qib_iba6120.c 		if (dd->cspec->ibdeltainprog)
dd               2358 drivers/infiniband/hw/qib/qib_iba6120.c 			ret -= ret - dd->cspec->iblnkerrsnap;
dd               2359 drivers/infiniband/hw/qib/qib_iba6120.c 		ret -= dd->cspec->iblnkerrdelta;
dd               2362 drivers/infiniband/hw/qib/qib_iba6120.c 		ret += dd->cspec->rxfc_unsupvl_errs;
dd               2475 drivers/infiniband/hw/qib/qib_iba6120.c static void init_6120_cntrnames(struct qib_devdata *dd)
dd               2480 drivers/infiniband/hw/qib/qib_iba6120.c 	for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
dd               2489 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ncntrs = i;
dd               2492 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
dd               2494 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->cntrnamelen = 1 + s - cntr6120names;
dd               2495 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
dd               2500 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->nportcntrs = i - 1;
dd               2501 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
dd               2502 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
dd               2507 drivers/infiniband/hw/qib/qib_iba6120.c static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
dd               2513 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->cntrnamelen;
dd               2519 drivers/infiniband/hw/qib/qib_iba6120.c 		u64 *cntr = dd->cspec->cntrs;
dd               2522 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->ncntrs * sizeof(u64);
dd               2533 drivers/infiniband/hw/qib/qib_iba6120.c 		for (i = 0; i < dd->cspec->ncntrs; i++)
dd               2534 drivers/infiniband/hw/qib/qib_iba6120.c 			*cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
dd               2540 drivers/infiniband/hw/qib/qib_iba6120.c static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
dd               2546 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->portcntrnamelen;
dd               2552 drivers/infiniband/hw/qib/qib_iba6120.c 		u64 *cntr = dd->cspec->portcntrs;
dd               2553 drivers/infiniband/hw/qib/qib_iba6120.c 		struct qib_pportdata *ppd = &dd->pport[port];
dd               2556 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = dd->cspec->nportcntrs * sizeof(u64);
dd               2563 drivers/infiniband/hw/qib/qib_iba6120.c 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
dd               2569 drivers/infiniband/hw/qib/qib_iba6120.c 				*cntr++ = read_6120_creg32(dd,
dd               2577 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_chk_6120_errormask(struct qib_devdata *dd)
dd               2584 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
dd               2587 drivers/infiniband/hw/qib/qib_iba6120.c 	errormask = qib_read_kreg64(dd, kr_errmask);
dd               2589 drivers/infiniband/hw/qib/qib_iba6120.c 	if (errormask == dd->cspec->errormask)
dd               2593 drivers/infiniband/hw/qib/qib_iba6120.c 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
dd               2594 drivers/infiniband/hw/qib/qib_iba6120.c 	ctrl = qib_read_kreg32(dd, kr_control);
dd               2596 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_errmask,
dd               2597 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->errormask);
dd               2599 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((hwerrs & dd->cspec->hwerrmask) ||
dd               2601 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrclear, 0ULL);
dd               2602 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_errclear, 0ULL);
dd               2604 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_intclear, 0ULL);
dd               2605 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(dd->pcidev,
dd               2607 drivers/infiniband/hw/qib/qib_iba6120.c 			 fixed, errormask, (unsigned long)dd->cspec->errormask,
dd               2622 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
dd               2623 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_pportdata *ppd = dd->pport;
dd               2631 drivers/infiniband/hw/qib/qib_iba6120.c 	if (!(dd->flags & QIB_INITTED) || dd->diag_client)
dd               2642 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_irqsave(&dd->eep_st_lock, flags);
dd               2643 drivers/infiniband/hw/qib/qib_iba6120.c 	traffic_wds -= dd->traffic_wds;
dd               2644 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->traffic_wds += traffic_wds;
dd               2645 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
dd               2647 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_chk_6120_errormask(dd);
dd               2649 drivers/infiniband/hw/qib/qib_iba6120.c 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
dd               2653 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_nointr_fallback(struct qib_devdata *dd)
dd               2667 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               2669 drivers/infiniband/hw/qib/qib_iba6120.c 	prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
dd               2672 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control,
dd               2673 drivers/infiniband/hw/qib/qib_iba6120.c 		       dd->control & ~QLOGIC_IB_C_LINKENABLE);
dd               2674 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_xgxs_cfg, val);
dd               2675 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_read_kreg32(dd, kr_scratch);
dd               2676 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
dd               2677 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               2714 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
dd               2719 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
dd               2725 drivers/infiniband/hw/qib/qib_iba6120.c 		ret = (ppd->dd->cspec->ibcctrl &
dd               2750 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               2765 drivers/infiniband/hw/qib/qib_iba6120.c 		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
dd               2768 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl &=
dd               2770 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl |= (u64) val <<
dd               2772 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
dd               2773 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2778 drivers/infiniband/hw/qib/qib_iba6120.c 		val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
dd               2781 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl &=
dd               2783 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl |= (u64) val <<
dd               2785 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
dd               2786 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2794 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_partitionkey, val64);
dd               2800 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl &=
dd               2803 drivers/infiniband/hw/qib/qib_iba6120.c 			dd->cspec->ibcctrl |=
dd               2805 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
dd               2806 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2818 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
dd               2819 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->ibcctrl |= (u64)val <<
dd               2821 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
dd               2822 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2829 drivers/infiniband/hw/qib/qib_iba6120.c 			if (!dd->cspec->ibdeltainprog) {
dd               2830 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->ibdeltainprog = 1;
dd               2831 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->ibsymsnap =
dd               2832 drivers/infiniband/hw/qib/qib_iba6120.c 					read_6120_creg32(dd, cr_ibsymbolerr);
dd               2833 drivers/infiniband/hw/qib/qib_iba6120.c 				dd->cspec->iblnkerrsnap =
dd               2834 drivers/infiniband/hw/qib/qib_iba6120.c 					read_6120_creg32(dd, cr_iblinkerrrecov);
dd               2848 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
dd               2870 drivers/infiniband/hw/qib/qib_iba6120.c 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
dd               2893 drivers/infiniband/hw/qib/qib_iba6120.c 		ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
dd               2894 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
dd               2895 drivers/infiniband/hw/qib/qib_iba6120.c 			 ppd->dd->unit, ppd->port);
dd               2897 drivers/infiniband/hw/qib/qib_iba6120.c 		ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
dd               2898 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_devinfo(ppd->dd->pcidev,
dd               2900 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->unit, ppd->port);
dd               2904 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
dd               2905 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               2945 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_chip_specific *cs = ppd->dd->cspec;
dd               3005 drivers/infiniband/hw/qib/qib_iba6120.c 		if (ppd->dd->cspec->ibdeltainprog) {
dd               3006 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->ibdeltainprog = 0;
dd               3007 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->ibsymdelta +=
dd               3008 drivers/infiniband/hw/qib/qib_iba6120.c 				read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
dd               3009 drivers/infiniband/hw/qib/qib_iba6120.c 					ppd->dd->cspec->ibsymsnap;
dd               3010 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->iblnkerrdelta +=
dd               3011 drivers/infiniband/hw/qib/qib_iba6120.c 				read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
dd               3012 drivers/infiniband/hw/qib/qib_iba6120.c 					ppd->dd->cspec->iblnkerrsnap;
dd               3016 drivers/infiniband/hw/qib/qib_iba6120.c 		ppd->dd->cspec->lli_counter = 0;
dd               3017 drivers/infiniband/hw/qib/qib_iba6120.c 		if (!ppd->dd->cspec->ibdeltainprog) {
dd               3018 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->ibdeltainprog = 1;
dd               3019 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->ibsymsnap =
dd               3020 drivers/infiniband/hw/qib/qib_iba6120.c 				read_6120_creg32(ppd->dd, cr_ibsymbolerr);
dd               3021 drivers/infiniband/hw/qib/qib_iba6120.c 			ppd->dd->cspec->iblnkerrsnap =
dd               3022 drivers/infiniband/hw/qib/qib_iba6120.c 				read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
dd               3038 drivers/infiniband/hw/qib/qib_iba6120.c static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
dd               3047 drivers/infiniband/hw/qib/qib_iba6120.c 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               3048 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
dd               3049 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
dd               3050 drivers/infiniband/hw/qib/qib_iba6120.c 		new_out = (dd->cspec->gpio_out & ~mask) | out;
dd               3052 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
dd               3053 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_gpio_out, new_out);
dd               3054 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->cspec->gpio_out = new_out;
dd               3055 drivers/infiniband/hw/qib/qib_iba6120.c 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               3065 drivers/infiniband/hw/qib/qib_iba6120.c 	read_val = qib_read_kreg64(dd, kr_extstatus);
dd               3074 drivers/infiniband/hw/qib/qib_iba6120.c static void get_6120_chip_params(struct qib_devdata *dd)
dd               3080 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
dd               3082 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
dd               3083 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
dd               3084 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
dd               3085 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->palign = qib_read_kreg32(dd, kr_palign);
dd               3086 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
dd               3087 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
dd               3089 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
dd               3091 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_sendpiosize);
dd               3092 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piosize2k = val & ~0U;
dd               3093 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piosize4k = val >> 32;
dd               3098 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pport->ibmtu = (u32)mtu;
dd               3100 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
dd               3101 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piobcnt2k = val & ~0U;
dd               3102 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piobcnt4k = val >> 32;
dd               3103 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
dd               3105 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pio2kbase = (u32 __iomem *)
dd               3106 drivers/infiniband/hw/qib/qib_iba6120.c 		(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
dd               3107 drivers/infiniband/hw/qib/qib_iba6120.c 	if (dd->piobcnt4k) {
dd               3108 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->pio4kbase = (u32 __iomem *)
dd               3109 drivers/infiniband/hw/qib/qib_iba6120.c 			(((char __iomem *) dd->kregbase) +
dd               3110 drivers/infiniband/hw/qib/qib_iba6120.c 			 (dd->piobufbase >> 32));
dd               3116 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->align4k = ALIGN(dd->piosize4k, dd->palign);
dd               3119 drivers/infiniband/hw/qib/qib_iba6120.c 	piobufs = dd->piobcnt4k + dd->piobcnt2k;
dd               3121 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
dd               3130 drivers/infiniband/hw/qib/qib_iba6120.c static void set_6120_baseaddrs(struct qib_devdata *dd)
dd               3134 drivers/infiniband/hw/qib/qib_iba6120.c 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd               3135 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->cregbase = (u64 __iomem *)
dd               3136 drivers/infiniband/hw/qib/qib_iba6120.c 		((char __iomem *) dd->kregbase + cregbase);
dd               3138 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->egrtidbase = (u64 __iomem *)
dd               3139 drivers/infiniband/hw/qib/qib_iba6120.c 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
dd               3147 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_late_6120_initreg(struct qib_devdata *dd)
dd               3152 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
dd               3153 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
dd               3154 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
dd               3155 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
dd               3156 drivers/infiniband/hw/qib/qib_iba6120.c 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
dd               3157 drivers/infiniband/hw/qib/qib_iba6120.c 	if (val != dd->pioavailregs_phys) {
dd               3158 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               3160 drivers/infiniband/hw/qib/qib_iba6120.c 			(unsigned long) dd->pioavailregs_phys,
dd               3167 drivers/infiniband/hw/qib/qib_iba6120.c static int init_6120_variables(struct qib_devdata *dd)
dd               3173 drivers/infiniband/hw/qib/qib_iba6120.c 	ppd = (struct qib_pportdata *)(dd + 1);
dd               3174 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pport = ppd;
dd               3175 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->num_pports = 1;
dd               3177 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
dd               3178 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->cspec->ppd = ppd;
dd               3181 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_init(&dd->cspec->kernel_tid_lock);
dd               3182 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_init(&dd->cspec->user_tid_lock);
dd               3183 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_init(&dd->cspec->rcvmod_lock);
dd               3184 drivers/infiniband/hw/qib/qib_iba6120.c 	spin_lock_init(&dd->cspec->gpio_lock);
dd               3187 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->revision = readq(&dd->kregbase[kr_revision]);
dd               3189 drivers/infiniband/hw/qib/qib_iba6120.c 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
dd               3190 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               3195 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->flags |= QIB_PRESENT;  /* now register routines work */
dd               3197 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
dd               3199 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
dd               3202 drivers/infiniband/hw/qib/qib_iba6120.c 	get_6120_chip_params(dd);
dd               3203 drivers/infiniband/hw/qib/qib_iba6120.c 	pe_boardname(dd); /* fill in boardname */
dd               3209 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
dd               3210 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
dd               3211 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
dd               3214 drivers/infiniband/hw/qib/qib_iba6120.c 		dd->flags |= QIB_PIO_FLUSH_WC;
dd               3216 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = qib_init_pportdata(ppd, dd, 0, 1);
dd               3229 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
dd               3230 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
dd               3231 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rhf_offset = 0;
dd               3235 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
dd               3236 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
dd               3238 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_tidtemplate(dd);
dd               3245 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->rhdrhead_intr_off = 1ULL << 32;
dd               3248 drivers/infiniband/hw/qib/qib_iba6120.c 	timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
dd               3249 drivers/infiniband/hw/qib/qib_iba6120.c 	timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
dd               3251 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->ureg_align = qib_read_kreg32(dd, kr_palign);
dd               3253 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
dd               3254 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_config_ctxts(dd);
dd               3255 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_set_ctxtcnt(dd);
dd               3257 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = init_chip_wc_pat(dd, 0);
dd               3260 drivers/infiniband/hw/qib/qib_iba6120.c 	set_6120_baseaddrs(dd); /* set chip access pointers now */
dd               3268 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = qib_create_ctxts(dd);
dd               3269 drivers/infiniband/hw/qib/qib_iba6120.c 	init_6120_cntrnames(dd);
dd               3272 drivers/infiniband/hw/qib/qib_iba6120.c 	sbufs = dd->piobcnt4k ?  dd->piobcnt4k : 16;
dd               3274 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
dd               3275 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->pbufsctxt = dd->lastctxt_piobuf /
dd               3276 drivers/infiniband/hw/qib/qib_iba6120.c 		(dd->cfgctxts - dd->first_user_ctxt);
dd               3302 drivers/infiniband/hw/qib/qib_iba6120.c 	u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
dd               3308 drivers/infiniband/hw/qib/qib_iba6120.c 	sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               3309 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
dd               3310 drivers/infiniband/hw/qib/qib_iba6120.c 	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
dd               3316 drivers/infiniband/hw/qib/qib_iba6120.c 	ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
dd               3317 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
dd               3318 drivers/infiniband/hw/qib/qib_iba6120.c 	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
dd               3327 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd = ppd->dd;
dd               3335 drivers/infiniband/hw/qib/qib_iba6120.c 		if ((plen + 1) > dd->piosize2kmax_dwords)
dd               3336 drivers/infiniband/hw/qib/qib_iba6120.c 			first = dd->piobcnt2k;
dd               3340 drivers/infiniband/hw/qib/qib_iba6120.c 		last = dd->piobcnt2k + dd->piobcnt4k - 1;
dd               3341 drivers/infiniband/hw/qib/qib_iba6120.c 		buf = qib_getsendbuf_range(dd, pbufnum, first, last);
dd               3383 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
dd               3389 drivers/infiniband/hw/qib/qib_iba6120.c 	rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
dd               3393 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
dd               3398 drivers/infiniband/hw/qib/qib_iba6120.c static void writescratch(struct qib_devdata *dd, u32 val)
dd               3400 drivers/infiniband/hw/qib/qib_iba6120.c 	(void) qib_write_kreg(dd, kr_scratch, val);
dd               3403 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
dd               3409 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
dd               3416 drivers/infiniband/hw/qib/qib_iba6120.c static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
dd               3435 drivers/infiniband/hw/qib/qib_iba6120.c 	struct qib_devdata *dd;
dd               3438 drivers/infiniband/hw/qib/qib_iba6120.c 	dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
dd               3440 drivers/infiniband/hw/qib/qib_iba6120.c 	if (IS_ERR(dd))
dd               3443 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_bringup_serdes    = qib_6120_bringup_serdes;
dd               3444 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_cleanup           = qib_6120_setup_cleanup;
dd               3445 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_clear_tids        = qib_6120_clear_tids;
dd               3446 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_free_irq          = qib_free_irq;
dd               3447 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_get_base_info     = qib_6120_get_base_info;
dd               3448 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_get_msgheader     = qib_6120_get_msgheader;
dd               3449 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_getsendbuf        = qib_6120_getsendbuf;
dd               3450 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_gpio_mod          = gpio_6120_mod;
dd               3451 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_eeprom_wen	= qib_6120_eeprom_wen;
dd               3452 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_hdrqempty         = qib_6120_hdrqempty;
dd               3453 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_ib_updown         = qib_6120_ib_updown;
dd               3454 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_init_ctxt         = qib_6120_init_ctxt;
dd               3455 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_initvl15_bufs     = qib_6120_initvl15_bufs;
dd               3456 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_intr_fallback     = qib_6120_nointr_fallback;
dd               3457 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_late_initreg      = qib_late_6120_initreg;
dd               3458 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_setpbc_control    = qib_6120_setpbc_control;
dd               3459 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_portcntr          = qib_portcntr_6120;
dd               3460 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_put_tid           = (dd->minrev >= 2) ?
dd               3463 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_quiet_serdes      = qib_6120_quiet_serdes;
dd               3464 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_rcvctrl           = rcvctrl_6120_mod;
dd               3465 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_read_cntrs        = qib_read_6120cntrs;
dd               3466 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_read_portcntrs    = qib_read_6120portcntrs;
dd               3467 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_reset             = qib_6120_setup_reset;
dd               3468 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_init_sdma_regs    = init_sdma_6120_regs;
dd               3469 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sdma_busy         = qib_sdma_6120_busy;
dd               3470 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sdma_gethead      = qib_sdma_6120_gethead;
dd               3471 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sdma_sendctrl     = qib_6120_sdma_sendctrl;
dd               3472 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
dd               3473 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sdma_update_tail  = qib_sdma_update_6120_tail;
dd               3474 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_sendctrl          = sendctrl_6120_mod;
dd               3475 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_set_armlaunch     = qib_set_6120_armlaunch;
dd               3476 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_set_cntr_sample   = qib_set_cntr_6120_sample;
dd               3477 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_iblink_state      = qib_6120_iblink_state;
dd               3478 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_ibphys_portstate  = qib_6120_phys_portstate;
dd               3479 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_get_ib_cfg        = qib_6120_get_ib_cfg;
dd               3480 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_set_ib_cfg        = qib_6120_set_ib_cfg;
dd               3481 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_set_ib_loopback   = qib_6120_set_loopback;
dd               3482 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_set_intr_state    = qib_6120_set_intr_state;
dd               3483 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_setextled         = qib_6120_setup_setextled;
dd               3484 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_txchk_change      = qib_6120_txchk_change;
dd               3485 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_update_usrhead    = qib_update_6120_usrhead;
dd               3486 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_6120_intr;
dd               3487 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_xgxs_reset        = qib_6120_xgxs_reset;
dd               3488 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_writescratch      = writescratch;
dd               3489 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_tempsense_rd	= qib_6120_tempsense_rd;
dd               3491 drivers/infiniband/hw/qib/qib_iba6120.c 	dd->f_notify_dca = qib_6120_notify_dca;
dd               3500 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = qib_pcie_ddinit(dd, pdev, ent);
dd               3505 drivers/infiniband/hw/qib/qib_iba6120.c 	ret = init_6120_variables(dd);
dd               3512 drivers/infiniband/hw/qib/qib_iba6120.c 	if (qib_pcie_params(dd, 8, NULL))
dd               3513 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_dev_err(dd,
dd               3516 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
dd               3518 drivers/infiniband/hw/qib/qib_iba6120.c 	if (qib_read_kreg64(dd, kr_hwerrstatus) &
dd               3520 drivers/infiniband/hw/qib/qib_iba6120.c 		qib_write_kreg(dd, kr_hwerrclear,
dd               3524 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_setup_6120_interrupt(dd);
dd               3526 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_6120_init_hwerrors(dd);
dd               3531 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_pcie_ddcleanup(dd);
dd               3533 drivers/infiniband/hw/qib/qib_iba6120.c 	qib_free_devdata(dd);
dd               3534 drivers/infiniband/hw/qib/qib_iba6120.c 	dd = ERR_PTR(ret);
dd               3536 drivers/infiniband/hw/qib/qib_iba6120.c 	return dd;
dd                230 drivers/infiniband/hw/qib/qib_iba7220.c static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
dd                233 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                236 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->userbase)
dd                238 drivers/infiniband/hw/qib/qib_iba7220.c 			     ((char __iomem *)dd->userbase +
dd                239 drivers/infiniband/hw/qib/qib_iba7220.c 			      dd->ureg_align * ctxt));
dd                242 drivers/infiniband/hw/qib/qib_iba7220.c 			     (dd->uregbase +
dd                243 drivers/infiniband/hw/qib/qib_iba7220.c 			      (char __iomem *)dd->kregbase +
dd                244 drivers/infiniband/hw/qib/qib_iba7220.c 			      dd->ureg_align * ctxt));
dd                256 drivers/infiniband/hw/qib/qib_iba7220.c static inline void qib_write_ureg(const struct qib_devdata *dd,
dd                261 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->userbase)
dd                263 drivers/infiniband/hw/qib/qib_iba7220.c 			((char __iomem *) dd->userbase +
dd                264 drivers/infiniband/hw/qib/qib_iba7220.c 			 dd->ureg_align * ctxt);
dd                267 drivers/infiniband/hw/qib/qib_iba7220.c 			(dd->uregbase +
dd                268 drivers/infiniband/hw/qib/qib_iba7220.c 			 (char __iomem *) dd->kregbase +
dd                269 drivers/infiniband/hw/qib/qib_iba7220.c 			 dd->ureg_align * ctxt);
dd                271 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
dd                282 drivers/infiniband/hw/qib/qib_iba7220.c static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
dd                286 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, regno + ctxt, value);
dd                289 drivers/infiniband/hw/qib/qib_iba7220.c static inline void write_7220_creg(const struct qib_devdata *dd,
dd                292 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
dd                293 drivers/infiniband/hw/qib/qib_iba7220.c 		writeq(value, &dd->cspec->cregbase[regno]);
dd                296 drivers/infiniband/hw/qib/qib_iba7220.c static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
dd                298 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                300 drivers/infiniband/hw/qib/qib_iba7220.c 	return readq(&dd->cspec->cregbase[regno]);
dd                303 drivers/infiniband/hw/qib/qib_iba7220.c static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
dd                305 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                307 drivers/infiniband/hw/qib/qib_iba7220.c 	return readl(&dd->cspec->cregbase[regno]);
dd                754 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd                761 drivers/infiniband/hw/qib/qib_iba7220.c 	sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
dd                762 drivers/infiniband/hw/qib/qib_iba7220.c 	sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
dd                763 drivers/infiniband/hw/qib/qib_iba7220.c 	sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
dd                766 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_disarm_piobufs_set(dd, sbuf,
dd                767 drivers/infiniband/hw/qib/qib_iba7220.c 				       dd->piobcnt2k + dd->piobcnt4k);
dd                770 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_txe_recover(struct qib_devdata *dd)
dd                772 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
dd                773 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_disarm_7220_senderrbufs(dd->pport);
dd                781 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd                800 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock(&dd->sendctrl_lock);
dd                802 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->sendctrl |= set_sendctrl;
dd                803 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->sendctrl &= ~clr_sendctrl;
dd                805 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd                806 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd                808 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock(&dd->sendctrl_lock);
dd                862 drivers/infiniband/hw/qib/qib_iba7220.c 	ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
dd                872 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
dd                873 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_senddmalengen,
dd                900 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd                905 drivers/infiniband/hw/qib/qib_iba7220.c 	msg = dd->cspec->sdmamsgbuf;
dd                907 drivers/infiniband/hw/qib/qib_iba7220.c 		sizeof(dd->cspec->sdmamsgbuf));
dd                913 drivers/infiniband/hw/qib/qib_iba7220.c 		sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
dd                914 drivers/infiniband/hw/qib/qib_iba7220.c 		sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
dd                915 drivers/infiniband/hw/qib/qib_iba7220.c 		sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
dd                917 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(ppd->dd,
dd                919 drivers/infiniband/hw/qib/qib_iba7220.c 			    ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
dd                924 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
dd                970 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
dd               1038 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
dd               1094 drivers/infiniband/hw/qib/qib_iba7220.c static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
dd               1099 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_pportdata *ppd = dd->pport;
dd               1103 drivers/infiniband/hw/qib/qib_iba7220.c 	errs &= dd->cspec->errormask;
dd               1104 drivers/infiniband/hw/qib/qib_iba7220.c 	msg = dd->cspec->emsgbuf;
dd               1108 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
dd               1114 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1143 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errclear, errs);
dd               1158 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
dd               1170 drivers/infiniband/hw/qib/qib_iba7220.c 		ibcs = qib_read_kreg64(dd, kr_ibcstatus);
dd               1195 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1197 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
dd               1199 drivers/infiniband/hw/qib/qib_iba7220.c 		*dd->devstatusp |= QIB_STATUS_HWERROR;
dd               1200 drivers/infiniband/hw/qib/qib_iba7220.c 		*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
dd               1204 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
dd               1217 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_handle_urcv(dd, ~0U);
dd               1228 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
dd               1231 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->flags & QIB_BADINTR)
dd               1233 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_intmask, ~0ULL);
dd               1235 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_intclear, 0ULL);
dd               1237 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_intmask, 0ULL);
dd               1255 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_clear_freeze(struct qib_devdata *dd)
dd               1258 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errmask, 0ULL);
dd               1261 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_set_intr_state(dd, 0);
dd               1263 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_cancel_sends(dd->pport);
dd               1266 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               1267 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_read_kreg32(dd, kr_scratch);
dd               1270 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_force_pio_avail_update(dd);
dd               1278 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
dd               1279 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
dd               1280 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               1281 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_set_intr_state(dd, 1);
dd               1295 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
dd               1303 drivers/infiniband/hw/qib/qib_iba7220.c 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
dd               1307 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1320 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hwerrclear,
dd               1323 drivers/infiniband/hw/qib/qib_iba7220.c 	hwerrs &= dd->cspec->hwerrmask;
dd               1327 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_devinfo(dd->pcidev,
dd               1332 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1337 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_sd7220_clr_ibpar(dd);
dd               1339 drivers/infiniband/hw/qib/qib_iba7220.c 	ctrl = qib_read_kreg32(dd, kr_control);
dd               1340 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
dd               1347 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_7220_txe_recover(dd);
dd               1354 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_7220_clear_freeze(dd);
dd               1365 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
dd               1366 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               1372 drivers/infiniband/hw/qib/qib_iba7220.c 	bitsmsg = dd->cspec->bitsmsgbuf;
dd               1378 drivers/infiniband/hw/qib/qib_iba7220.c 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
dd               1388 drivers/infiniband/hw/qib/qib_iba7220.c 		snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
dd               1393 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
dd               1394 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               1402 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
dd               1403 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               1406 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_dev_err(dd, "%s hardware error\n", msg);
dd               1408 drivers/infiniband/hw/qib/qib_iba7220.c 	if (isfatal && !dd->diag_client) {
dd               1409 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1411 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->serial);
dd               1416 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->freezemsg)
dd               1417 drivers/infiniband/hw/qib/qib_iba7220.c 			snprintf(dd->freezemsg, dd->freezelen,
dd               1419 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_disable_after_error(dd);
dd               1434 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_init_hwerrors(struct qib_devdata *dd)
dd               1439 drivers/infiniband/hw/qib/qib_iba7220.c 	extsval = qib_read_kreg64(dd, kr_extstatus);
dd               1443 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "MemBIST did not complete!\n");
dd               1445 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
dd               1450 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->hwerrmask = val;
dd               1452 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
dd               1453 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               1456 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errclear, ~0ULL);
dd               1458 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errmask, ~0ULL);
dd               1459 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
dd               1461 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_intclear, ~0ULL);
dd               1470 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
dd               1473 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
dd               1474 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
dd               1476 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
dd               1477 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               1489 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               1514 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
dd               1516 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               1532 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               1537 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
dd               1538 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_control, 0ULL);
dd               1542 drivers/infiniband/hw/qib/qib_iba7220.c 		ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
dd               1544 drivers/infiniband/hw/qib/qib_iba7220.c 			read_7220_creg32(dd, cr_iblinkerrrecov);
dd               1571 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_ibcctrl, val);
dd               1575 drivers/infiniband/hw/qib/qib_iba7220.c 		ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
dd               1604 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               1606 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
dd               1607 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               1609 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
dd               1610 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               1612 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_sd7220_init(dd);
dd               1614 drivers/infiniband/hw/qib/qib_iba7220.c 	val = qib_read_kreg64(dd, kr_xgxs_cfg);
dd               1618 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_xgxs_cfg, val);
dd               1619 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_read_kreg32(dd, kr_scratch);
dd               1624 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_xgxs_cfg, val);
dd               1628 drivers/infiniband/hw/qib/qib_iba7220.c 		ppd->guid = dd->base_guid;
dd               1631 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hrtbt_guid, guid);
dd               1633 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->control |= QLOGIC_IB_C_LINKENABLE;
dd               1634 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_control, dd->control);
dd               1637 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               1649 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               1653 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->control &= ~QLOGIC_IB_C_LINKENABLE;
dd               1654 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_control,
dd               1655 drivers/infiniband/hw/qib/qib_iba7220.c 		       dd->control | QLOGIC_IB_C_FREEZEMODE);
dd               1666 drivers/infiniband/hw/qib/qib_iba7220.c 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
dd               1667 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwdiagctrl,
dd               1671 drivers/infiniband/hw/qib/qib_iba7220.c 			val = read_7220_creg32(dd, cr_ibsymbolerr);
dd               1675 drivers/infiniband/hw/qib/qib_iba7220.c 			write_7220_creg(dd, cr_ibsymbolerr, val);
dd               1678 drivers/infiniband/hw/qib/qib_iba7220.c 			val = read_7220_creg32(dd, cr_iblinkerrrecov);
dd               1682 drivers/infiniband/hw/qib/qib_iba7220.c 			write_7220_creg(dd, cr_iblinkerrrecov, val);
dd               1686 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
dd               1696 drivers/infiniband/hw/qib/qib_iba7220.c 	shutdown_7220_relock_poll(ppd->dd);
dd               1697 drivers/infiniband/hw/qib/qib_iba7220.c 	val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
dd               1699 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
dd               1727 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               1735 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->diag_client)
dd               1744 drivers/infiniband/hw/qib/qib_iba7220.c 		val = qib_read_kreg64(dd, kr_ibcstatus);
dd               1752 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               1753 drivers/infiniband/hw/qib/qib_iba7220.c 	extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
dd               1767 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->extctrl = extctl;
dd               1768 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_extctrl, extctl);
dd               1769 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               1772 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
dd               1782 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_setup_7220_cleanup(struct qib_devdata *dd)
dd               1784 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_free_irq(dd);
dd               1785 drivers/infiniband/hw/qib/qib_iba7220.c 	kfree(dd->cspec->cntrs);
dd               1786 drivers/infiniband/hw/qib/qib_iba7220.c 	kfree(dd->cspec->portcntrs);
dd               1828 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
dd               1832 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               1834 drivers/infiniband/hw/qib/qib_iba7220.c 		if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
dd               1841 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
dd               1843 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0ULL);
dd               1844 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
dd               1846 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
dd               1847 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               1848 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               1850 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               1857 drivers/infiniband/hw/qib/qib_iba7220.c static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
dd               1860 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               1874 drivers/infiniband/hw/qib/qib_iba7220.c 		gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
dd               1882 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
dd               1885 drivers/infiniband/hw/qib/qib_iba7220.c 			const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
dd               1899 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->gpio_mask &= ~gpio_irq;
dd               1900 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               1908 drivers/infiniband/hw/qib/qib_iba7220.c 		estat = qib_read_kreg64(dd, kr_errstatus);
dd               1910 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_devinfo(dd->pcidev,
dd               1914 drivers/infiniband/hw/qib/qib_iba7220.c 			handle_7220_errors(dd, estat);
dd               1920 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = data;
dd               1927 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
dd               1938 drivers/infiniband/hw/qib/qib_iba7220.c 	istat = qib_read_kreg64(dd, kr_intstatus);
dd               1945 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_bad_intrstatus(dd);
dd               1951 drivers/infiniband/hw/qib/qib_iba7220.c 	this_cpu_inc(*dd->int_counter);
dd               1954 drivers/infiniband/hw/qib/qib_iba7220.c 		unlikely_7220_intr(dd, istat);
dd               1962 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_intclear, istat);
dd               1975 drivers/infiniband/hw/qib/qib_iba7220.c 		for (i = 0; i < dd->first_user_ctxt; i++) {
dd               1978 drivers/infiniband/hw/qib/qib_iba7220.c 				qib_kreceive(dd->rcd[i], NULL, NULL);
dd               1986 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_handle_urcv(dd, ctxtrbits);
dd               1992 drivers/infiniband/hw/qib/qib_iba7220.c 		sdma_7220_intr(dd->pport, istat);
dd               1994 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
dd               1995 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_ib_piobufavail(dd);
dd               2010 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_setup_7220_interrupt(struct qib_devdata *dd)
dd               2014 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
dd               2017 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
dd               2018 drivers/infiniband/hw/qib/qib_iba7220.c 			    dd->pcidev->msi_enabled ?  "MSI" : "INTx",
dd               2019 drivers/infiniband/hw/qib/qib_iba7220.c 			    pci_irq_vector(dd->pcidev, 0), ret);
dd               2028 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_boardname(struct qib_devdata *dd)
dd               2032 drivers/infiniband/hw/qib/qib_iba7220.c 	boardid = SYM_FIELD(dd->revision, Revision,
dd               2037 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->boardname = "InfiniPath_QLE7240";
dd               2040 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->boardname = "InfiniPath_QLE7280";
dd               2043 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
dd               2044 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->boardname = "Unknown_InfiniPath_7220";
dd               2048 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
dd               2049 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               2051 drivers/infiniband/hw/qib/qib_iba7220.c 			    dd->majrev, dd->minrev);
dd               2053 drivers/infiniband/hw/qib/qib_iba7220.c 	snprintf(dd->boardversion, sizeof(dd->boardversion),
dd               2055 drivers/infiniband/hw/qib/qib_iba7220.c 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
dd               2056 drivers/infiniband/hw/qib/qib_iba7220.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd               2057 drivers/infiniband/hw/qib/qib_iba7220.c 		 dd->majrev, dd->minrev,
dd               2058 drivers/infiniband/hw/qib/qib_iba7220.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
dd               2065 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_setup_7220_reset(struct qib_devdata *dd)
dd               2074 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
dd               2077 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
dd               2080 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_set_intr_state(dd, 0);
dd               2082 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pport->cpspec->ibdeltainprog = 0;
dd               2083 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pport->cpspec->ibsymdelta = 0;
dd               2084 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pport->cpspec->iblnkerrdelta = 0;
dd               2091 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
dd               2093 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->z_int_counter = qib_int_counter(dd);
dd               2094 drivers/infiniband/hw/qib/qib_iba7220.c 	val = dd->control | QLOGIC_IB_C_RESET;
dd               2095 drivers/infiniband/hw/qib/qib_iba7220.c 	writeq(val, &dd->kregbase[kr_control]);
dd               2106 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
dd               2112 drivers/infiniband/hw/qib/qib_iba7220.c 		val = readq(&dd->kregbase[kr_revision]);
dd               2113 drivers/infiniband/hw/qib/qib_iba7220.c 		if (val == dd->revision) {
dd               2114 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->flags |= QIB_PRESENT; /* it's back */
dd               2115 drivers/infiniband/hw/qib/qib_iba7220.c 			ret = qib_reinit_intr(dd);
dd               2123 drivers/infiniband/hw/qib/qib_iba7220.c 		if (qib_pcie_params(dd, dd->lbus_width, NULL))
dd               2124 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd,
dd               2128 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_control, 0ULL);
dd               2131 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_7220_init_hwerrors(dd);
dd               2134 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
dd               2135 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->presets_needed = 1;
dd               2136 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_lock_irqsave(&dd->pport->lflags_lock, flags);
dd               2137 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
dd               2138 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
dd               2139 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
dd               2152 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
dd               2155 drivers/infiniband/hw/qib/qib_iba7220.c 	if (pa != dd->tidinvalid) {
dd               2160 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
dd               2165 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd,
dd               2172 drivers/infiniband/hw/qib/qib_iba7220.c 			chippa |= dd->tidtemplate;
dd               2190 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_clear_tids(struct qib_devdata *dd,
dd               2198 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->kregbase || !rcd)
dd               2203 drivers/infiniband/hw/qib/qib_iba7220.c 	tidinv = dd->tidinvalid;
dd               2205 drivers/infiniband/hw/qib/qib_iba7220.c 		((char __iomem *)(dd->kregbase) +
dd               2206 drivers/infiniband/hw/qib/qib_iba7220.c 		 dd->rcvtidbase +
dd               2207 drivers/infiniband/hw/qib/qib_iba7220.c 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
dd               2209 drivers/infiniband/hw/qib/qib_iba7220.c 	for (i = 0; i < dd->rcvtidcnt; i++)
dd               2210 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
dd               2214 drivers/infiniband/hw/qib/qib_iba7220.c 		((char __iomem *)(dd->kregbase) +
dd               2215 drivers/infiniband/hw/qib/qib_iba7220.c 		 dd->rcvegrbase +
dd               2219 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
dd               2229 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_tidtemplate(struct qib_devdata *dd)
dd               2231 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->rcvegrbufsize == 2048)
dd               2232 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->tidtemplate = IBA7220_TID_SZ_2K;
dd               2233 drivers/infiniband/hw/qib/qib_iba7220.c 	else if (dd->rcvegrbufsize == 4096)
dd               2234 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->tidtemplate = IBA7220_TID_SZ_4K;
dd               2235 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->tidinvalid = 0;
dd               2252 drivers/infiniband/hw/qib/qib_iba7220.c 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
dd               2259 drivers/infiniband/hw/qib/qib_iba7220.c qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
dd               2264 drivers/infiniband/hw/qib/qib_iba7220.c 		(rhf_addr - dd->rhf_offset + offset);
dd               2267 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_config_ctxts(struct qib_devdata *dd)
dd               2272 drivers/infiniband/hw/qib/qib_iba7220.c 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
dd               2273 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->numctxts = nchipctxts;
dd               2275 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->qpn_mask = 0x3e;
dd               2276 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
dd               2277 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->first_user_ctxt > nchipctxts)
dd               2278 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->first_user_ctxt = nchipctxts;
dd               2280 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->first_user_ctxt = dd->num_pports;
dd               2281 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->n_krcv_queues = dd->first_user_ctxt;
dd               2284 drivers/infiniband/hw/qib/qib_iba7220.c 		int nctxts = dd->first_user_ctxt + num_online_cpus();
dd               2287 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->ctxtcnt = 5;
dd               2289 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->ctxtcnt = 9;
dd               2291 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->ctxtcnt = nchipctxts;
dd               2293 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->ctxtcnt = qib_cfgctxts;
dd               2294 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->ctxtcnt) /* none of the above, set to max */
dd               2295 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->ctxtcnt = nchipctxts;
dd               2302 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               2303 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->ctxtcnt > 9)
dd               2304 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
dd               2305 drivers/infiniband/hw/qib/qib_iba7220.c 	else if (dd->ctxtcnt > 5)
dd               2306 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
dd               2308 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->qpn_mask)
dd               2309 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
dd               2310 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
dd               2311 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               2314 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
dd               2315 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
dd               2351 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
dd               2408 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               2462 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->presets_needed = 1;
dd               2508 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
dd               2509 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2521 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
dd               2522 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2530 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_partitionkey, maskr);
dd               2541 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
dd               2542 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2556 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
dd               2557 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2568 drivers/infiniband/hw/qib/qib_iba7220.c 					read_7220_creg32(dd, cr_ibsymbolerr);
dd               2570 drivers/infiniband/hw/qib/qib_iba7220.c 					read_7220_creg32(dd, cr_iblinkerrrecov);
dd               2584 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
dd               2615 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
dd               2635 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_ibcddrctrl,
dd               2637 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2659 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
dd               2660 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               2678 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
dd               2679 drivers/infiniband/hw/qib/qib_iba7220.c 			 ppd->dd->unit, ppd->port);
dd               2684 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_devinfo(ppd->dd->pcidev,
dd               2686 drivers/infiniband/hw/qib/qib_iba7220.c 			ppd->dd->unit, ppd->port);
dd               2690 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
dd               2694 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(ppd->dd, kr_ibcddrctrl,
dd               2696 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               2705 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
dd               2706 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
dd               2713 drivers/infiniband/hw/qib/qib_iba7220.c 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
dd               2717 drivers/infiniband/hw/qib/qib_iba7220.c 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
dd               2731 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               2735 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               2737 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
dd               2739 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
dd               2741 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
dd               2743 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
dd               2745 drivers/infiniband/hw/qib/qib_iba7220.c 		mask = (1ULL << dd->ctxtcnt) - 1;
dd               2750 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
dd               2751 drivers/infiniband/hw/qib/qib_iba7220.c 		if (!(dd->flags & QIB_NODMA_RTAIL))
dd               2752 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
dd               2754 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
dd               2755 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
dd               2756 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
dd               2757 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->rcd[ctxt]->rcvhdrq_phys);
dd               2758 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcd[ctxt]->seq_cnt = 1;
dd               2761 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
dd               2763 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
dd               2765 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
dd               2766 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
dd               2767 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
dd               2769 drivers/infiniband/hw/qib/qib_iba7220.c 		val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
dd               2770 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->rhdrhead_intr_off;
dd               2771 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               2780 drivers/infiniband/hw/qib/qib_iba7220.c 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
dd               2781 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
dd               2783 drivers/infiniband/hw/qib/qib_iba7220.c 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
dd               2784 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcd[ctxt]->head = val;
dd               2786 drivers/infiniband/hw/qib/qib_iba7220.c 		if (ctxt < dd->first_user_ctxt)
dd               2787 drivers/infiniband/hw/qib/qib_iba7220.c 			val |= dd->rhdrhead_intr_off;
dd               2788 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               2792 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
dd               2793 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
dd               2797 drivers/infiniband/hw/qib/qib_iba7220.c 			for (i = 0; i < dd->cfgctxts; i++) {
dd               2798 drivers/infiniband/hw/qib/qib_iba7220.c 				qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
dd               2800 drivers/infiniband/hw/qib/qib_iba7220.c 				qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
dd               2804 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               2817 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               2821 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               2825 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl = 0;
dd               2827 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
dd               2829 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
dd               2830 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->flags & QIB_USE_SPCL_TRIG)
dd               2831 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->sendctrl |= SYM_MASK(SendCtrl,
dd               2835 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
dd               2837 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
dd               2842 drivers/infiniband/hw/qib/qib_iba7220.c 		tmp_dd_sendctrl = dd->sendctrl;
dd               2847 drivers/infiniband/hw/qib/qib_iba7220.c 		last = dd->piobcnt2k + dd->piobcnt4k;
dd               2852 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_sendctrl,
dd               2855 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               2859 drivers/infiniband/hw/qib/qib_iba7220.c 	tmp_dd_sendctrl = dd->sendctrl;
dd               2868 drivers/infiniband/hw/qib/qib_iba7220.c 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
dd               2871 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
dd               2872 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               2875 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               2876 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               2879 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               2889 drivers/infiniband/hw/qib/qib_iba7220.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               2890 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, v);
dd               2891 drivers/infiniband/hw/qib/qib_iba7220.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               2892 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, v);
dd               2893 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_read_kreg32(dd, kr_scratch);
dd               2905 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               2946 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_devinfo(ppd->dd->pcidev,
dd               2956 drivers/infiniband/hw/qib/qib_iba7220.c 		for (i = 0; i < dd->first_user_ctxt; i++)
dd               2957 drivers/infiniband/hw/qib/qib_iba7220.c 			ret += read_7220_creg32(dd, cr_portovfl + i);
dd               2968 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = read_7220_creg(dd, creg);
dd               2970 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = read_7220_creg32(dd, creg);
dd               2972 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->pport->cpspec->ibdeltainprog)
dd               2974 drivers/infiniband/hw/qib/qib_iba7220.c 		ret -= dd->pport->cpspec->ibsymdelta;
dd               2976 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->pport->cpspec->ibdeltainprog)
dd               2978 drivers/infiniband/hw/qib/qib_iba7220.c 		ret -= dd->pport->cpspec->iblnkerrdelta;
dd               3127 drivers/infiniband/hw/qib/qib_iba7220.c static void init_7220_cntrnames(struct qib_devdata *dd)
dd               3132 drivers/infiniband/hw/qib/qib_iba7220.c 	for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
dd               3141 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->ncntrs = i;
dd               3144 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
dd               3146 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->cntrnamelen = 1 + s - cntr7220names;
dd               3147 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
dd               3152 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->nportcntrs = i - 1;
dd               3153 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
dd               3154 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
dd               3159 drivers/infiniband/hw/qib/qib_iba7220.c static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
dd               3164 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->cspec->cntrs) {
dd               3171 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = dd->cspec->cntrnamelen;
dd               3175 drivers/infiniband/hw/qib/qib_iba7220.c 		u64 *cntr = dd->cspec->cntrs;
dd               3178 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = dd->cspec->ncntrs * sizeof(u64);
dd               3186 drivers/infiniband/hw/qib/qib_iba7220.c 		for (i = 0; i < dd->cspec->ncntrs; i++)
dd               3187 drivers/infiniband/hw/qib/qib_iba7220.c 			*cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
dd               3193 drivers/infiniband/hw/qib/qib_iba7220.c static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
dd               3198 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->cspec->portcntrs) {
dd               3204 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = dd->cspec->portcntrnamelen;
dd               3208 drivers/infiniband/hw/qib/qib_iba7220.c 		u64 *cntr = dd->cspec->portcntrs;
dd               3209 drivers/infiniband/hw/qib/qib_iba7220.c 		struct qib_pportdata *ppd = &dd->pport[port];
dd               3212 drivers/infiniband/hw/qib/qib_iba7220.c 		ret = dd->cspec->nportcntrs * sizeof(u64);
dd               3219 drivers/infiniband/hw/qib/qib_iba7220.c 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
dd               3225 drivers/infiniband/hw/qib/qib_iba7220.c 				*cntr++ = read_7220_creg32(dd,
dd               3243 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
dd               3244 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_pportdata *ppd = dd->pport;
dd               3252 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!(dd->flags & QIB_INITTED) || dd->diag_client)
dd               3263 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->eep_st_lock, flags);
dd               3264 drivers/infiniband/hw/qib/qib_iba7220.c 	traffic_wds -= dd->traffic_wds;
dd               3265 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->traffic_wds += traffic_wds;
dd               3266 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
dd               3268 drivers/infiniband/hw/qib/qib_iba7220.c 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
dd               3274 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_7220_intr_fallback(struct qib_devdata *dd)
dd               3276 drivers/infiniband/hw/qib/qib_iba7220.c 	if (!dd->msi_lo)
dd               3279 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_devinfo(dd->pcidev,
dd               3282 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_free_irq(dd);
dd               3283 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->msi_lo = 0;
dd               3284 drivers/infiniband/hw/qib/qib_iba7220.c 	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
dd               3285 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "Failed to enable INTx\n");
dd               3286 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_setup_7220_interrupt(dd);
dd               3299 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               3301 drivers/infiniband/hw/qib/qib_iba7220.c 	prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
dd               3304 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_control,
dd               3305 drivers/infiniband/hw/qib/qib_iba7220.c 		       dd->control & ~QLOGIC_IB_C_LINKENABLE);
dd               3306 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_xgxs_cfg, val);
dd               3307 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_read_kreg32(dd, kr_scratch);
dd               3308 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
dd               3309 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               3330 drivers/infiniband/hw/qib/qib_iba7220.c 	u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
dd               3338 drivers/infiniband/hw/qib/qib_iba7220.c 	sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               3339 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
dd               3340 drivers/infiniband/hw/qib/qib_iba7220.c 	buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
dd               3356 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
dd               3357 drivers/infiniband/hw/qib/qib_iba7220.c 		buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
dd               3378 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               3388 drivers/infiniband/hw/qib/qib_iba7220.c 	sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
dd               3393 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->flags & QIB_USE_SPCL_TRIG) {
dd               3394 drivers/infiniband/hw/qib/qib_iba7220.c 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
dd               3400 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_sendbuf_done(dd, pnum);
dd               3408 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               3443 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_read_kreg64(dd, kr_scratch);
dd               3446 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_read_kreg64(dd, kr_scratch);
dd               3476 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
dd               3477 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               3495 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
dd               3503 drivers/infiniband/hw/qib/qib_iba7220.c 	toggle_7220_rclkrls(ppd->dd);
dd               3516 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd;
dd               3522 drivers/infiniband/hw/qib/qib_iba7220.c 	dd = ppd->dd;
dd               3546 drivers/infiniband/hw/qib/qib_iba7220.c 	toggle_7220_rclkrls(dd);
dd               3555 drivers/infiniband/hw/qib/qib_iba7220.c 	toggle_7220_rclkrls(dd);
dd               3568 drivers/infiniband/hw/qib/qib_iba7220.c 		if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
dd               3570 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->autoneg_tries = 0;
dd               3611 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               3628 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_sd7220_presets(dd);
dd               3637 drivers/infiniband/hw/qib/qib_iba7220.c 		set_7220_relock_poll(dd, ibup);
dd               3645 drivers/infiniband/hw/qib/qib_iba7220.c 		    dd->cspec->autoneg_tries < AUTONEG_TRIES) {
dd               3647 drivers/infiniband/hw/qib/qib_iba7220.c 			++dd->cspec->autoneg_tries;
dd               3650 drivers/infiniband/hw/qib/qib_iba7220.c 				ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
dd               3652 drivers/infiniband/hw/qib/qib_iba7220.c 				ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
dd               3662 drivers/infiniband/hw/qib/qib_iba7220.c 			toggle_7220_rclkrls(dd);
dd               3672 drivers/infiniband/hw/qib/qib_iba7220.c 				dd->cspec->autoneg_tries = 0;
dd               3691 drivers/infiniband/hw/qib/qib_iba7220.c 				qib_write_kreg(dd, kr_ncmodectrl, 0);
dd               3704 drivers/infiniband/hw/qib/qib_iba7220.c 			set_7220_relock_poll(dd, ibup);
dd               3722 drivers/infiniband/hw/qib/qib_iba7220.c 			ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
dd               3724 drivers/infiniband/hw/qib/qib_iba7220.c 			ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
dd               3731 drivers/infiniband/hw/qib/qib_iba7220.c 		ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
dd               3733 drivers/infiniband/hw/qib/qib_iba7220.c 		ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
dd               3749 drivers/infiniband/hw/qib/qib_iba7220.c static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
dd               3758 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               3759 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
dd               3760 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
dd               3761 drivers/infiniband/hw/qib/qib_iba7220.c 		new_out = (dd->cspec->gpio_out & ~mask) | out;
dd               3763 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
dd               3764 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_gpio_out, new_out);
dd               3765 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->gpio_out = new_out;
dd               3766 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               3776 drivers/infiniband/hw/qib/qib_iba7220.c 	read_val = qib_read_kreg64(dd, kr_extstatus);
dd               3785 drivers/infiniband/hw/qib/qib_iba7220.c static void get_7220_chip_params(struct qib_devdata *dd)
dd               3791 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
dd               3793 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
dd               3794 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
dd               3795 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
dd               3796 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->palign = qib_read_kreg32(dd, kr_palign);
dd               3797 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
dd               3798 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
dd               3800 drivers/infiniband/hw/qib/qib_iba7220.c 	val = qib_read_kreg64(dd, kr_sendpiosize);
dd               3801 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piosize2k = val & ~0U;
dd               3802 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piosize4k = val >> 32;
dd               3807 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pport->ibmtu = (u32)mtu;
dd               3809 drivers/infiniband/hw/qib/qib_iba7220.c 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
dd               3810 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piobcnt2k = val & ~0U;
dd               3811 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piobcnt4k = val >> 32;
dd               3813 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pio2kbase = (u32 __iomem *)
dd               3814 drivers/infiniband/hw/qib/qib_iba7220.c 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
dd               3815 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->piobcnt4k) {
dd               3816 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->pio4kbase = (u32 __iomem *)
dd               3817 drivers/infiniband/hw/qib/qib_iba7220.c 			((char __iomem *) dd->kregbase +
dd               3818 drivers/infiniband/hw/qib/qib_iba7220.c 			 (dd->piobufbase >> 32));
dd               3824 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->align4k = ALIGN(dd->piosize4k, dd->palign);
dd               3827 drivers/infiniband/hw/qib/qib_iba7220.c 	piobufs = dd->piobcnt4k + dd->piobcnt2k;
dd               3829 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
dd               3838 drivers/infiniband/hw/qib/qib_iba7220.c static void set_7220_baseaddrs(struct qib_devdata *dd)
dd               3842 drivers/infiniband/hw/qib/qib_iba7220.c 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd               3843 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->cregbase = (u64 __iomem *)
dd               3844 drivers/infiniband/hw/qib/qib_iba7220.c 		((char __iomem *) dd->kregbase + cregbase);
dd               3846 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->egrtidbase = (u64 __iomem *)
dd               3847 drivers/infiniband/hw/qib/qib_iba7220.c 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
dd               3861 drivers/infiniband/hw/qib/qib_iba7220.c static int sendctrl_hook(struct qib_devdata *dd,
dd               3870 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
dd               3878 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               3888 drivers/infiniband/hw/qib/qib_iba7220.c 			local_data = (u64)qib_read_kreg32(dd, idx);
dd               3890 drivers/infiniband/hw/qib/qib_iba7220.c 			local_data = qib_read_kreg64(dd, idx);
dd               3891 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
dd               3892 drivers/infiniband/hw/qib/qib_iba7220.c 			    (u32)local_data, (u32)dd->sendctrl);
dd               3894 drivers/infiniband/hw/qib/qib_iba7220.c 		    (dd->sendctrl & SENDCTRL_SHADOWED))
dd               3895 drivers/infiniband/hw/qib/qib_iba7220.c 			qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
dd               3896 drivers/infiniband/hw/qib/qib_iba7220.c 				(u32)local_data, (u32) dd->sendctrl);
dd               3910 drivers/infiniband/hw/qib/qib_iba7220.c 		sval = (dd->sendctrl & ~mask);
dd               3912 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->sendctrl = sval;
dd               3914 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
dd               3916 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_sendctrl, tval);
dd               3917 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_scratch, 0Ull);
dd               3919 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               3934 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_late_7220_initreg(struct qib_devdata *dd)
dd               3939 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
dd               3940 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
dd               3941 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
dd               3942 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
dd               3943 drivers/infiniband/hw/qib/qib_iba7220.c 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
dd               3944 drivers/infiniband/hw/qib/qib_iba7220.c 	if (val != dd->pioavailregs_phys) {
dd               3945 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               3947 drivers/infiniband/hw/qib/qib_iba7220.c 			(unsigned long) dd->pioavailregs_phys,
dd               3951 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_register_observer(dd, &sendctrl_observer);
dd               3955 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_init_7220_variables(struct qib_devdata *dd)
dd               3962 drivers/infiniband/hw/qib/qib_iba7220.c 	cpspec = (struct qib_chippport_specific *)(dd + 1);
dd               3964 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pport = ppd;
dd               3965 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->num_pports = 1;
dd               3967 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
dd               3968 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->dd = dd;
dd               3971 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_init(&dd->cspec->sdepb_lock);
dd               3972 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_init(&dd->cspec->rcvmod_lock);
dd               3973 drivers/infiniband/hw/qib/qib_iba7220.c 	spin_lock_init(&dd->cspec->gpio_lock);
dd               3976 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->revision = readq(&dd->kregbase[kr_revision]);
dd               3978 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
dd               3979 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               3984 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->flags |= QIB_PRESENT;  /* now register routines work */
dd               3986 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
dd               3988 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
dd               3991 drivers/infiniband/hw/qib/qib_iba7220.c 	get_7220_chip_params(dd);
dd               3992 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_boardname(dd);
dd               3998 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
dd               3999 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
dd               4000 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
dd               4002 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
dd               4004 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->flags |= qib_special_trigger ?
dd               4010 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_init_pportdata(ppd, dd, 0, 1);
dd               4029 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
dd               4035 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
dd               4036 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
dd               4037 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rhf_offset =
dd               4038 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
dd               4042 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
dd               4043 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
dd               4045 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_tidtemplate(dd);
dd               4052 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->rhdrhead_intr_off = 1ULL << 32;
dd               4055 drivers/infiniband/hw/qib/qib_iba7220.c 	timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
dd               4056 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
dd               4064 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->control |= 1 << 4;
dd               4066 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->ureg_align = 0x10000;  /* 64KB alignment */
dd               4068 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
dd               4069 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_config_ctxts(dd);
dd               4070 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_set_ctxtcnt(dd);  /* needed for PAT setup */
dd               4072 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = init_chip_wc_pat(dd, 0);
dd               4075 drivers/infiniband/hw/qib/qib_iba7220.c 	set_7220_baseaddrs(dd); /* set chip access pointers now */
dd               4081 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_create_ctxts(dd);
dd               4082 drivers/infiniband/hw/qib/qib_iba7220.c 	init_7220_cntrnames(dd);
dd               4095 drivers/infiniband/hw/qib/qib_iba7220.c 	if (dd->flags & QIB_HAS_SEND_DMA) {
dd               4096 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->sdmabufcnt =  dd->piobcnt4k;
dd               4099 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->sdmabufcnt = 0;
dd               4100 drivers/infiniband/hw/qib/qib_iba7220.c 		sbufs = dd->piobcnt4k;
dd               4103 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
dd               4104 drivers/infiniband/hw/qib/qib_iba7220.c 		dd->cspec->sdmabufcnt;
dd               4105 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
dd               4106 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
dd               4107 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->last_pio = dd->cspec->lastbuf_for_pio;
dd               4108 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->pbufsctxt = dd->lastctxt_piobuf /
dd               4109 drivers/infiniband/hw/qib/qib_iba7220.c 		(dd->cfgctxts - dd->first_user_ctxt);
dd               4117 drivers/infiniband/hw/qib/qib_iba7220.c 	if ((dd->pbufsctxt - 2) < updthresh)
dd               4118 drivers/infiniband/hw/qib/qib_iba7220.c 		updthresh = dd->pbufsctxt - 2;
dd               4120 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->updthresh_dflt = updthresh;
dd               4121 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->cspec->updthresh = updthresh;
dd               4124 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
dd               4127 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->psxmitwait_supported = 1;
dd               4128 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
dd               4137 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               4144 drivers/infiniband/hw/qib/qib_iba7220.c 		if ((plen + 1) > dd->piosize2kmax_dwords)
dd               4145 drivers/infiniband/hw/qib/qib_iba7220.c 			first = dd->piobcnt2k;
dd               4149 drivers/infiniband/hw/qib/qib_iba7220.c 		last = dd->cspec->lastbuf_for_pio;
dd               4150 drivers/infiniband/hw/qib/qib_iba7220.c 		buf = qib_getsendbuf_range(dd, pbufnum, first, last);
dd               4159 drivers/infiniband/hw/qib/qib_iba7220.c 	write_7220_creg(ppd->dd, cr_psinterval, intv);
dd               4160 drivers/infiniband/hw/qib/qib_iba7220.c 	write_7220_creg(ppd->dd, cr_psstart, start);
dd               4175 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(ppd->dd, kr_senddmatail, tail);
dd               4229 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               4234 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
dd               4238 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
dd               4244 drivers/infiniband/hw/qib/qib_iba7220.c 	n = dd->piobcnt2k + dd->piobcnt4k;
dd               4245 drivers/infiniband/hw/qib/qib_iba7220.c 	i = n - dd->cspec->sdmabufcnt;
dd               4253 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
dd               4254 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
dd               4255 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
dd               4266 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd = ppd->dd;
dd               4275 drivers/infiniband/hw/qib/qib_iba7220.c 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
dd               4279 drivers/infiniband/hw/qib/qib_iba7220.c 		(u16)qib_read_kreg32(dd, kr_senddmahead);
dd               4312 drivers/infiniband/hw/qib/qib_iba7220.c 	u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
dd               4344 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
dd               4354 drivers/infiniband/hw/qib/qib_iba7220.c 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
dd               4360 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
dd               4369 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_lock_irqsave(&dd->uctxt_lock, flags);
dd               4370 drivers/infiniband/hw/qib/qib_iba7220.c 		for (i = dd->first_user_ctxt;
dd               4371 drivers/infiniband/hw/qib/qib_iba7220.c 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
dd               4372 drivers/infiniband/hw/qib/qib_iba7220.c 		     && i < dd->cfgctxts; i++)
dd               4373 drivers/infiniband/hw/qib/qib_iba7220.c 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
dd               4374 drivers/infiniband/hw/qib/qib_iba7220.c 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
dd               4375 drivers/infiniband/hw/qib/qib_iba7220.c 			   < dd->cspec->updthresh_dflt)
dd               4377 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd               4378 drivers/infiniband/hw/qib/qib_iba7220.c 		if (i == dd->cfgctxts) {
dd               4379 drivers/infiniband/hw/qib/qib_iba7220.c 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               4380 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
dd               4381 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd               4382 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->sendctrl |= (dd->cspec->updthresh &
dd               4385 drivers/infiniband/hw/qib/qib_iba7220.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               4386 drivers/infiniband/hw/qib/qib_iba7220.c 			sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               4390 drivers/infiniband/hw/qib/qib_iba7220.c 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               4392 drivers/infiniband/hw/qib/qib_iba7220.c 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
dd               4393 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->cspec->updthresh = (rcd->piocnt /
dd               4395 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd               4396 drivers/infiniband/hw/qib/qib_iba7220.c 			dd->sendctrl |= (dd->cspec->updthresh &
dd               4399 drivers/infiniband/hw/qib/qib_iba7220.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               4400 drivers/infiniband/hw/qib/qib_iba7220.c 			sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               4402 drivers/infiniband/hw/qib/qib_iba7220.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               4407 drivers/infiniband/hw/qib/qib_iba7220.c static void writescratch(struct qib_devdata *dd, u32 val)
dd               4409 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_scratch, val);
dd               4420 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
dd               4436 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = mutex_lock_interruptible(&dd->eep_lock);
dd               4440 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
dd               4444 drivers/infiniband/hw/qib/qib_iba7220.c 	mutex_unlock(&dd->eep_lock);
dd               4457 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
dd               4464 drivers/infiniband/hw/qib/qib_iba7220.c static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
dd               4480 drivers/infiniband/hw/qib/qib_iba7220.c 	struct qib_devdata *dd;
dd               4484 drivers/infiniband/hw/qib/qib_iba7220.c 	dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
dd               4486 drivers/infiniband/hw/qib/qib_iba7220.c 	if (IS_ERR(dd))
dd               4489 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_bringup_serdes    = qib_7220_bringup_serdes;
dd               4490 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_cleanup           = qib_setup_7220_cleanup;
dd               4491 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_clear_tids        = qib_7220_clear_tids;
dd               4492 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_free_irq          = qib_free_irq;
dd               4493 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_get_base_info     = qib_7220_get_base_info;
dd               4494 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_get_msgheader     = qib_7220_get_msgheader;
dd               4495 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_getsendbuf        = qib_7220_getsendbuf;
dd               4496 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_gpio_mod          = gpio_7220_mod;
dd               4497 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_eeprom_wen        = qib_7220_eeprom_wen;
dd               4498 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_hdrqempty         = qib_7220_hdrqempty;
dd               4499 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_ib_updown         = qib_7220_ib_updown;
dd               4500 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_init_ctxt         = qib_7220_init_ctxt;
dd               4501 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_initvl15_bufs     = qib_7220_initvl15_bufs;
dd               4502 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_intr_fallback     = qib_7220_intr_fallback;
dd               4503 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_late_initreg      = qib_late_7220_initreg;
dd               4504 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_setpbc_control    = qib_7220_setpbc_control;
dd               4505 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_portcntr          = qib_portcntr_7220;
dd               4506 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_put_tid           = qib_7220_put_tid;
dd               4507 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_quiet_serdes      = qib_7220_quiet_serdes;
dd               4508 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_rcvctrl           = rcvctrl_7220_mod;
dd               4509 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_read_cntrs        = qib_read_7220cntrs;
dd               4510 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_read_portcntrs    = qib_read_7220portcntrs;
dd               4511 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_reset             = qib_setup_7220_reset;
dd               4512 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_init_sdma_regs    = init_sdma_7220_regs;
dd               4513 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_busy         = qib_sdma_7220_busy;
dd               4514 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_gethead      = qib_sdma_7220_gethead;
dd               4515 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_sendctrl     = qib_7220_sdma_sendctrl;
dd               4516 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
dd               4517 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_update_tail  = qib_sdma_update_7220_tail;
dd               4518 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_hw_clean_up  = qib_7220_sdma_hw_clean_up;
dd               4519 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_hw_start_up  = qib_7220_sdma_hw_start_up;
dd               4520 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sdma_init_early   = qib_7220_sdma_init_early;
dd               4521 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_sendctrl          = sendctrl_7220_mod;
dd               4522 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_set_armlaunch     = qib_set_7220_armlaunch;
dd               4523 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_set_cntr_sample   = qib_set_cntr_7220_sample;
dd               4524 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_iblink_state      = qib_7220_iblink_state;
dd               4525 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_ibphys_portstate  = qib_7220_phys_portstate;
dd               4526 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_get_ib_cfg        = qib_7220_get_ib_cfg;
dd               4527 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_set_ib_cfg        = qib_7220_set_ib_cfg;
dd               4528 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_set_ib_loopback   = qib_7220_set_loopback;
dd               4529 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_set_intr_state    = qib_7220_set_intr_state;
dd               4530 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_setextled         = qib_setup_7220_setextled;
dd               4531 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_txchk_change      = qib_7220_txchk_change;
dd               4532 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_update_usrhead    = qib_update_7220_usrhead;
dd               4533 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7220_intr;
dd               4534 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_xgxs_reset        = qib_7220_xgxs_reset;
dd               4535 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_writescratch      = writescratch;
dd               4536 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_tempsense_rd	= qib_7220_tempsense_rd;
dd               4538 drivers/infiniband/hw/qib/qib_iba7220.c 	dd->f_notify_dca = qib_7220_notify_dca;
dd               4546 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_pcie_ddinit(dd, pdev, ent);
dd               4551 drivers/infiniband/hw/qib/qib_iba7220.c 	ret = qib_init_7220_variables(dd);
dd               4558 drivers/infiniband/hw/qib/qib_iba7220.c 	boardid = SYM_FIELD(dd->revision, Revision,
dd               4571 drivers/infiniband/hw/qib/qib_iba7220.c 	if (qib_pcie_params(dd, minwidth, NULL))
dd               4572 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_dev_err(dd,
dd               4575 drivers/infiniband/hw/qib/qib_iba7220.c 	if (qib_read_kreg64(dd, kr_hwerrstatus) &
dd               4577 drivers/infiniband/hw/qib/qib_iba7220.c 		qib_write_kreg(dd, kr_hwerrclear,
dd               4581 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_setup_7220_interrupt(dd);
dd               4582 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_7220_init_hwerrors(dd);
dd               4585 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
dd               4590 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_pcie_ddcleanup(dd);
dd               4592 drivers/infiniband/hw/qib/qib_iba7220.c 	qib_free_devdata(dd);
dd               4593 drivers/infiniband/hw/qib/qib_iba7220.c 	dd = ERR_PTR(ret);
dd               4595 drivers/infiniband/hw/qib/qib_iba7220.c 	return dd;
dd                162 drivers/infiniband/hw/qib/qib_iba7322.c #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
dd                164 drivers/infiniband/hw/qib/qib_iba7322.c #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
dd                758 drivers/infiniband/hw/qib/qib_iba7322.c static inline void qib_write_kreg(const struct qib_devdata *dd,
dd                767 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_setup_dca(struct qib_devdata *dd);
dd                768 drivers/infiniband/hw/qib/qib_iba7322.c static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
dd                769 drivers/infiniband/hw/qib/qib_iba7322.c static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
dd                782 drivers/infiniband/hw/qib/qib_iba7322.c static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
dd                785 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                788 drivers/infiniband/hw/qib/qib_iba7322.c 		(dd->ureg_align * ctxt) + (dd->userbase ?
dd                789 drivers/infiniband/hw/qib/qib_iba7322.c 		 (char __iomem *)dd->userbase :
dd                790 drivers/infiniband/hw/qib/qib_iba7322.c 		 (char __iomem *)dd->kregbase + dd->uregbase)));
dd                803 drivers/infiniband/hw/qib/qib_iba7322.c static inline u64 qib_read_ureg(const struct qib_devdata *dd,
dd                807 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                810 drivers/infiniband/hw/qib/qib_iba7322.c 		(dd->ureg_align * ctxt) + (dd->userbase ?
dd                811 drivers/infiniband/hw/qib/qib_iba7322.c 		 (char __iomem *)dd->userbase :
dd                812 drivers/infiniband/hw/qib/qib_iba7322.c 		 (char __iomem *)dd->kregbase + dd->uregbase)));
dd                824 drivers/infiniband/hw/qib/qib_iba7322.c static inline void qib_write_ureg(const struct qib_devdata *dd,
dd                829 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->userbase)
dd                831 drivers/infiniband/hw/qib/qib_iba7322.c 			((char __iomem *) dd->userbase +
dd                832 drivers/infiniband/hw/qib/qib_iba7322.c 			 dd->ureg_align * ctxt);
dd                835 drivers/infiniband/hw/qib/qib_iba7322.c 			(dd->uregbase +
dd                836 drivers/infiniband/hw/qib/qib_iba7322.c 			 (char __iomem *) dd->kregbase +
dd                837 drivers/infiniband/hw/qib/qib_iba7322.c 			 dd->ureg_align * ctxt);
dd                839 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
dd                843 drivers/infiniband/hw/qib/qib_iba7322.c static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
dd                846 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                848 drivers/infiniband/hw/qib/qib_iba7322.c 	return readl((u32 __iomem *) &dd->kregbase[regno]);
dd                851 drivers/infiniband/hw/qib/qib_iba7322.c static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
dd                854 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
dd                856 drivers/infiniband/hw/qib/qib_iba7322.c 	return readq(&dd->kregbase[regno]);
dd                859 drivers/infiniband/hw/qib/qib_iba7322.c static inline void qib_write_kreg(const struct qib_devdata *dd,
dd                862 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
dd                863 drivers/infiniband/hw/qib/qib_iba7322.c 		writeq(value, &dd->kregbase[regno]);
dd                873 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
dd                881 drivers/infiniband/hw/qib/qib_iba7322.c 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
dd                882 drivers/infiniband/hw/qib/qib_iba7322.c 	    (ppd->dd->flags & QIB_PRESENT))
dd                893 drivers/infiniband/hw/qib/qib_iba7322.c static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
dd                897 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, regno + ctxt, value);
dd                900 drivers/infiniband/hw/qib/qib_iba7322.c static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
dd                902 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                904 drivers/infiniband/hw/qib/qib_iba7322.c 	return readq(&dd->cspec->cregbase[regno]);
dd                909 drivers/infiniband/hw/qib/qib_iba7322.c static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
dd                911 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
dd                913 drivers/infiniband/hw/qib/qib_iba7322.c 	return readl(&dd->cspec->cregbase[regno]);
dd                922 drivers/infiniband/hw/qib/qib_iba7322.c 	    (ppd->dd->flags & QIB_PRESENT))
dd                930 drivers/infiniband/hw/qib/qib_iba7322.c 	    !(ppd->dd->flags & QIB_PRESENT))
dd                939 drivers/infiniband/hw/qib/qib_iba7322.c 	    !(ppd->dd->flags & QIB_PRESENT))
dd               1341 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               1344 drivers/infiniband/hw/qib/qib_iba7322.c 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
dd               1354 drivers/infiniband/hw/qib/qib_iba7322.c 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
dd               1357 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
dd               1362 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
dd               1418 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               1449 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->flags & QIB_PIO_FLUSH_WC) {
dd               1457 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_sendbuf_done(dd, bufn);
dd               1465 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               1493 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock(&dd->sendctrl_lock);
dd               1499 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               1511 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               1516 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               1519 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock(&dd->sendctrl_lock);
dd               1521 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
dd               1589 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               1596 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
dd               1603 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_porterr(dd, ppd->port,
dd               1650 drivers/infiniband/hw/qib/qib_iba7322.c static noinline void handle_7322_errors(struct qib_devdata *dd)
dd               1658 drivers/infiniband/hw/qib/qib_iba7322.c 	errs = qib_read_kreg64(dd, kr_errstatus);
dd               1660 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               1666 drivers/infiniband/hw/qib/qib_iba7322.c 	errs &= dd->cspec->errormask;
dd               1667 drivers/infiniband/hw/qib/qib_iba7322.c 	msg = dd->cspec->emsgbuf;
dd               1672 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
dd               1676 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_disarm_7322_senderrbufs(dd->pport);
dd               1682 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_disarm_7322_senderrbufs(dd->pport);
dd               1684 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errclear, errs);
dd               1694 drivers/infiniband/hw/qib/qib_iba7322.c 	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
dd               1704 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               1706 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
dd               1708 drivers/infiniband/hw/qib/qib_iba7322.c 		*dd->devstatusp |= QIB_STATUS_HWERROR;
dd               1709 drivers/infiniband/hw/qib/qib_iba7322.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               1710 drivers/infiniband/hw/qib/qib_iba7322.c 			if (dd->pport[pidx].link_speed_supported)
dd               1711 drivers/infiniband/hw/qib/qib_iba7322.c 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
dd               1715 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "%s error\n", msg);
dd               1725 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_handle_urcv(dd, ~0U);
dd               1738 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = (struct qib_devdata *)data;
dd               1740 drivers/infiniband/hw/qib/qib_iba7322.c 	handle_7322_errors(dd);
dd               1741 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               1804 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!ppd->dd->cspec->r1)
dd               1813 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
dd               1825 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!ppd->dd->cspec->r1 &&
dd               1840 drivers/infiniband/hw/qib/qib_iba7322.c 					    ppd->dd->cspec->r1 ?
dd               1845 drivers/infiniband/hw/qib/qib_iba7322.c 				ppd->dd->unit, ppd->port, ibclt);
dd               1861 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               1864 drivers/infiniband/hw/qib/qib_iba7322.c 	fmask = qib_read_kreg64(dd, kr_act_fmask);
dd               1870 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               1887 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_porterr(dd, ppd->port,
dd               2004 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
dd               2013 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
dd               2016 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_BADINTR)
dd               2018 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
dd               2020 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intclear, 0ULL);
dd               2021 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->cspec->num_msix_entries) {
dd               2023 drivers/infiniband/hw/qib/qib_iba7322.c 			u64 val = qib_read_kreg64(dd, kr_intgranted);
dd               2026 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_write_kreg(dd, kr_intgranted, val);
dd               2029 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intmask, 0ULL);
dd               2047 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_clear_freeze(struct qib_devdata *dd)
dd               2052 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errmask, 0ULL);
dd               2054 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               2055 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->pport[pidx].link_speed_supported)
dd               2056 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
dd               2060 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_set_intr_state(dd, 0);
dd               2063 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               2064 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               2072 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
dd               2073 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
dd               2074 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               2076 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               2077 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!dd->pport[pidx].link_speed_supported)
dd               2079 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
dd               2080 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
dd               2082 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_set_intr_state(dd, 1);
dd               2097 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
dd               2104 drivers/infiniband/hw/qib/qib_iba7322.c 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
dd               2108 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               2115 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
dd               2118 drivers/infiniband/hw/qib/qib_iba7322.c 	hwerrs &= dd->cspec->hwerrmask;
dd               2123 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               2127 drivers/infiniband/hw/qib/qib_iba7322.c 	ctrl = qib_read_kreg32(dd, kr_control);
dd               2128 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
dd               2133 drivers/infiniband/hw/qib/qib_iba7322.c 		    dd->cspec->stay_in_freeze) {
dd               2141 drivers/infiniband/hw/qib/qib_iba7322.c 			if (dd->flags & QIB_INITTED)
dd               2144 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_7322_clear_freeze(dd);
dd               2153 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
dd               2154 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               2161 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_err(dd, "%s hardware error\n", msg);
dd               2169 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_pportdata *ppd = dd->pport;
dd               2171 drivers/infiniband/hw/qib/qib_iba7322.c 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
dd               2187 drivers/infiniband/hw/qib/qib_iba7322.c 	if (isfatal && !dd->diag_client) {
dd               2188 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               2190 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->serial);
dd               2195 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->freezemsg)
dd               2196 drivers/infiniband/hw/qib/qib_iba7322.c 			snprintf(dd->freezemsg, dd->freezelen,
dd               2198 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_disable_after_error(dd);
dd               2213 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_init_hwerrors(struct qib_devdata *dd)
dd               2218 drivers/infiniband/hw/qib/qib_iba7322.c 	extsval = qib_read_kreg64(dd, kr_extstatus);
dd               2221 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "MemBIST did not complete!\n");
dd               2224 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
dd               2225 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               2228 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errclear, ~0ULL);
dd               2230 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errmask, ~0ULL);
dd               2231 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
dd               2232 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               2233 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->pport[pidx].link_speed_supported)
dd               2234 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
dd               2244 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
dd               2247 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
dd               2248 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
dd               2250 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
dd               2251 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
dd               2263 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               2301 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               2314 drivers/infiniband/hw/qib/qib_iba7322.c #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
dd               2319 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               2331 drivers/infiniband/hw/qib/qib_iba7322.c 	totcred = NUM_RCV_BUF_UNITS(dd);
dd               2347 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               2360 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               2375 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               2388 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               2480 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->base_guid)
dd               2481 drivers/infiniband/hw/qib/qib_iba7322.c 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
dd               2487 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               2497 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               2502 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               2505 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               2533 drivers/infiniband/hw/qib/qib_iba7322.c 	if (ppd->dd->cspec->r1)
dd               2556 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_devdata *dd = ppd->dd;
dd               2560 drivers/infiniband/hw/qib/qib_iba7322.c 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
dd               2561 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_hwdiagctrl,
dd               2589 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
dd               2617 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               2626 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->diag_client)
dd               2643 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               2644 drivers/infiniband/hw/qib/qib_iba7322.c 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
dd               2658 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->extctrl = extctl;
dd               2659 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
dd               2660 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               2668 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
dd               2672 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_DCA_ENABLED)
dd               2674 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!dca_add_requester(&dd->pcidev->dev)) {
dd               2675 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_devinfo(dd->pcidev, "DCA enabled\n");
dd               2676 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->flags |= QIB_DCA_ENABLED;
dd               2677 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_setup_dca(dd);
dd               2681 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_DCA_ENABLED) {
dd               2682 drivers/infiniband/hw/qib/qib_iba7322.c 			dca_remove_requester(&dd->pcidev->dev);
dd               2683 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->flags &= ~QIB_DCA_ENABLED;
dd               2684 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->cspec->dca_ctrl = 0;
dd               2685 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
dd               2686 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->dca_ctrl);
dd               2695 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = rcd->dd;
dd               2696 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_chip_specific *cspec = dd->cspec;
dd               2698 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!(dd->flags & QIB_DCA_ENABLED))
dd               2707 drivers/infiniband/hw/qib/qib_iba7322.c 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
dd               2708 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               2711 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, rmp->regno,
dd               2714 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
dd               2720 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               2721 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_chip_specific *cspec = dd->cspec;
dd               2724 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!(dd->flags & QIB_DCA_ENABLED))
dd               2732 drivers/infiniband/hw/qib/qib_iba7322.c 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
dd               2736 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               2739 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
dd               2744 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
dd               2748 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_setup_dca(struct qib_devdata *dd)
dd               2750 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_chip_specific *cspec = dd->cspec;
dd               2781 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
dd               2784 drivers/infiniband/hw/qib/qib_iba7322.c 		setup_dca_notifier(dd, i);
dd               2809 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd;
dd               2814 drivers/infiniband/hw/qib/qib_iba7322.c 		dd = rcd->dd;
dd               2818 drivers/infiniband/hw/qib/qib_iba7322.c 		dd = ppd->dd;
dd               2820 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_devinfo(dd->pcidev,
dd               2826 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_free_irq(struct qib_devdata *dd)
dd               2831 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->main_int_mask = ~0ULL;
dd               2833 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->cspec->num_msix_entries; i++) {
dd               2835 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->cspec->msix_entries[i].arg) {
dd               2837 drivers/infiniband/hw/qib/qib_iba7322.c 			reset_dca_notifier(dd, i);
dd               2839 drivers/infiniband/hw/qib/qib_iba7322.c 			irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
dd               2841 drivers/infiniband/hw/qib/qib_iba7322.c 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
dd               2842 drivers/infiniband/hw/qib/qib_iba7322.c 			pci_free_irq(dd->pcidev, i,
dd               2843 drivers/infiniband/hw/qib/qib_iba7322.c 				     dd->cspec->msix_entries[i].arg);
dd               2848 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->num_msix_entries)
dd               2849 drivers/infiniband/hw/qib/qib_iba7322.c 		pci_free_irq(dd->pcidev, 0, dd);
dd               2851 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->num_msix_entries = 0;
dd               2853 drivers/infiniband/hw/qib/qib_iba7322.c 	pci_free_irq_vectors(dd->pcidev);
dd               2856 drivers/infiniband/hw/qib/qib_iba7322.c 	intgranted = qib_read_kreg64(dd, kr_intgranted);
dd               2858 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intgranted, intgranted);
dd               2861 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_setup_7322_cleanup(struct qib_devdata *dd)
dd               2866 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->flags & QIB_DCA_ENABLED) {
dd               2867 drivers/infiniband/hw/qib/qib_iba7322.c 		dca_remove_requester(&dd->pcidev->dev);
dd               2868 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags &= ~QIB_DCA_ENABLED;
dd               2869 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->dca_ctrl = 0;
dd               2870 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
dd               2874 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_free_irq(dd);
dd               2875 drivers/infiniband/hw/qib/qib_iba7322.c 	kfree(dd->cspec->cntrs);
dd               2876 drivers/infiniband/hw/qib/qib_iba7322.c 	kfree(dd->cspec->sendchkenable);
dd               2877 drivers/infiniband/hw/qib/qib_iba7322.c 	kfree(dd->cspec->sendgrhchk);
dd               2878 drivers/infiniband/hw/qib/qib_iba7322.c 	kfree(dd->cspec->sendibchk);
dd               2879 drivers/infiniband/hw/qib/qib_iba7322.c 	kfree(dd->cspec->msix_entries);
dd               2880 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->num_pports; i++) {
dd               2885 drivers/infiniband/hw/qib/qib_iba7322.c 		kfree(dd->pport[i].cpspec->portcntrs);
dd               2886 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_HAS_QSFP) {
dd               2887 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               2888 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->cspec->gpio_mask &= ~mask;
dd               2889 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               2890 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               2896 drivers/infiniband/hw/qib/qib_iba7322.c static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
dd               2898 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_pportdata *ppd0 = &dd->pport[0];
dd               2899 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_pportdata *ppd1 = &dd->pport[1];
dd               2919 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
dd               2923 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               2925 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
dd               2927 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
dd               2928 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               2929 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               2930 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               2938 drivers/infiniband/hw/qib/qib_iba7322.c static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
dd               2944 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_err(dd,
dd               2947 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
dd               2951 drivers/infiniband/hw/qib/qib_iba7322.c static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
dd               2964 drivers/infiniband/hw/qib/qib_iba7322.c 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
dd               2972 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
dd               2977 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
dd               2983 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!dd->pport[pidx].link_speed_supported)
dd               2986 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd = dd->pport + pidx;
dd               2988 drivers/infiniband/hw/qib/qib_iba7322.c 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
dd               2993 drivers/infiniband/hw/qib/qib_iba7322.c 			pins = qib_read_kreg64(dd, kr_extstatus);
dd               3004 drivers/infiniband/hw/qib/qib_iba7322.c 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
dd               3010 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->gpio_mask &= ~gpio_irq;
dd               3011 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               3019 drivers/infiniband/hw/qib/qib_iba7322.c static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
dd               3022 drivers/infiniband/hw/qib/qib_iba7322.c 		unknown_7322_ibits(dd, istat);
dd               3024 drivers/infiniband/hw/qib/qib_iba7322.c 		unknown_7322_gpio_intr(dd);
dd               3026 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_errmask, 0ULL);
dd               3027 drivers/infiniband/hw/qib/qib_iba7322.c 		tasklet_schedule(&dd->error_tasklet);
dd               3029 drivers/infiniband/hw/qib/qib_iba7322.c 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
dd               3030 drivers/infiniband/hw/qib/qib_iba7322.c 		handle_7322_p_errors(dd->rcd[0]->ppd);
dd               3031 drivers/infiniband/hw/qib/qib_iba7322.c 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
dd               3032 drivers/infiniband/hw/qib/qib_iba7322.c 		handle_7322_p_errors(dd->rcd[1]->ppd);
dd               3041 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = rcd->dd;
dd               3042 drivers/infiniband/hw/qib/qib_iba7322.c 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
dd               3055 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
dd               3056 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
dd               3069 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = data;
dd               3077 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
dd               3088 drivers/infiniband/hw/qib/qib_iba7322.c 	istat = qib_read_kreg64(dd, kr_intstatus);
dd               3091 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_bad_intrstatus(dd);
dd               3092 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
dd               3098 drivers/infiniband/hw/qib/qib_iba7322.c 	istat &= dd->cspec->main_int_mask;
dd               3105 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3111 drivers/infiniband/hw/qib/qib_iba7322.c 		unlikely_7322_intr(dd, istat);
dd               3119 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, istat);
dd               3130 drivers/infiniband/hw/qib/qib_iba7322.c 		for (i = 0; i < dd->first_user_ctxt; i++) {
dd               3133 drivers/infiniband/hw/qib/qib_iba7322.c 				if (dd->rcd[i])
dd               3134 drivers/infiniband/hw/qib/qib_iba7322.c 					qib_kreceive(dd->rcd[i], NULL, &npkts);
dd               3141 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_handle_urcv(dd, ctxtrbits);
dd               3146 drivers/infiniband/hw/qib/qib_iba7322.c 		sdma_7322_intr(dd, istat);
dd               3148 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
dd               3149 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_ib_piobufavail(dd);
dd               3162 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = rcd->dd;
dd               3165 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3174 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3177 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
dd               3190 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = data;
dd               3192 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3201 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3204 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
dd               3207 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->flags & QIB_INITTED)
dd               3208 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_ib_piobufavail(dd);
dd               3210 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_wantpiobuf_7322_intr(dd, 0);
dd               3221 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               3223 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3232 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3235 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
dd               3248 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               3250 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3259 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3262 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
dd               3275 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               3277 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3286 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3289 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
dd               3303 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               3305 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
dd               3314 drivers/infiniband/hw/qib/qib_iba7322.c 	this_cpu_inc(*dd->int_counter);
dd               3317 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
dd               3327 drivers/infiniband/hw/qib/qib_iba7322.c static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
dd               3329 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->msix_entries[msixnum].dca)
dd               3332 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
dd               3333 drivers/infiniband/hw/qib/qib_iba7322.c 		    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
dd               3334 drivers/infiniband/hw/qib/qib_iba7322.c 	irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
dd               3335 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->msix_entries[msixnum].notifier = NULL;
dd               3338 drivers/infiniband/hw/qib/qib_iba7322.c static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
dd               3340 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
dd               3350 drivers/infiniband/hw/qib/qib_iba7322.c 		n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
dd               3355 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               3378 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
dd               3386 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->num_pports)
dd               3395 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_set_intr_state(dd, 0);
dd               3398 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_init_hwerrors(dd);
dd               3401 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intclear, ~0ULL);
dd               3404 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
dd               3405 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
dd               3408 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->num_msix_entries) {
dd               3411 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
dd               3415 drivers/infiniband/hw/qib/qib_iba7322.c 				dd,
dd               3417 drivers/infiniband/hw/qib/qib_iba7322.c 				pci_irq_vector(dd->pcidev, 0), ret);
dd               3420 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->main_int_mask = ~0ULL;
dd               3428 drivers/infiniband/hw/qib/qib_iba7322.c 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
dd               3441 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
dd               3451 drivers/infiniband/hw/qib/qib_iba7322.c 				if (irq_table[i].port > dd->num_pports)
dd               3453 drivers/infiniband/hw/qib/qib_iba7322.c 				arg = dd->pport + irq_table[i].port - 1;
dd               3455 drivers/infiniband/hw/qib/qib_iba7322.c 				arg = dd;
dd               3461 drivers/infiniband/hw/qib/qib_iba7322.c 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
dd               3463 drivers/infiniband/hw/qib/qib_iba7322.c 					      dd->unit,
dd               3470 drivers/infiniband/hw/qib/qib_iba7322.c 			arg = dd->rcd[ctxt];
dd               3480 drivers/infiniband/hw/qib/qib_iba7322.c 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
dd               3483 drivers/infiniband/hw/qib/qib_iba7322.c 					      dd->unit);
dd               3491 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd,
dd               3494 drivers/infiniband/hw/qib/qib_iba7322.c 				    pci_irq_vector(dd->pcidev, msixnum),
dd               3496 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_7322_free_irq(dd);
dd               3497 drivers/infiniband/hw/qib/qib_iba7322.c 			pci_alloc_irq_vectors(dd->pcidev, 1, 1,
dd               3501 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->msix_entries[msixnum].arg = arg;
dd               3503 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->msix_entries[msixnum].dca = dca;
dd               3504 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->msix_entries[msixnum].rcv =
dd               3514 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg64(dd, 2 * msixnum + 1 +
dd               3518 drivers/infiniband/hw/qib/qib_iba7322.c 				&dd->cspec->msix_entries[msixnum].mask,
dd               3522 drivers/infiniband/hw/qib/qib_iba7322.c 					dd->cspec->msix_entries[msixnum].mask);
dd               3529 drivers/infiniband/hw/qib/qib_iba7322.c 					dd->cspec->msix_entries[msixnum].mask);
dd               3532 drivers/infiniband/hw/qib/qib_iba7322.c 				pci_irq_vector(dd->pcidev, msixnum),
dd               3533 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->msix_entries[msixnum].mask);
dd               3539 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd               3540 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->main_int_mask = mask;
dd               3541 drivers/infiniband/hw/qib/qib_iba7322.c 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
dd               3542 drivers/infiniband/hw/qib/qib_iba7322.c 		(unsigned long)dd);
dd               3551 drivers/infiniband/hw/qib/qib_iba7322.c static unsigned qib_7322_boardname(struct qib_devdata *dd)
dd               3557 drivers/infiniband/hw/qib/qib_iba7322.c 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
dd               3561 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QLE7342_Emulation";
dd               3564 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QLE7340";
dd               3565 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_HAS_QSFP;
dd               3569 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QLE7342";
dd               3570 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_HAS_QSFP;
dd               3573 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QMI7342";
dd               3576 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_Unsupported7342";
dd               3577 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
dd               3581 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QMH7342";
dd               3585 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QME7342";
dd               3588 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QME7362";
dd               3589 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_HAS_QSFP;
dd               3592 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
dd               3593 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_HAS_QSFP;
dd               3596 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QLE7342_TEST";
dd               3597 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_HAS_QSFP;
dd               3600 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
dd               3601 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
dd               3604 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->board_atten = 1; /* index into txdds_Xdr */
dd               3606 drivers/infiniband/hw/qib/qib_iba7322.c 	snprintf(dd->boardversion, sizeof(dd->boardversion),
dd               3608 drivers/infiniband/hw/qib/qib_iba7322.c 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
dd               3609 drivers/infiniband/hw/qib/qib_iba7322.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd               3610 drivers/infiniband/hw/qib/qib_iba7322.c 		 dd->majrev, dd->minrev,
dd               3611 drivers/infiniband/hw/qib/qib_iba7322.c 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
dd               3614 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev,
dd               3616 drivers/infiniband/hw/qib/qib_iba7322.c 			    dd->unit);
dd               3627 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_do_7322_reset(struct qib_devdata *dd)
dd               3637 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
dd               3639 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
dd               3641 drivers/infiniband/hw/qib/qib_iba7322.c 	msix_entries = dd->cspec->num_msix_entries;
dd               3644 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_set_intr_state(dd, 0);
dd               3646 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_free_irq(dd);
dd               3650 drivers/infiniband/hw/qib/qib_iba7322.c 		msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
dd               3665 drivers/infiniband/hw/qib/qib_iba7322.c 		vecaddr = qib_read_kreg64(dd, 2 * i +
dd               3667 drivers/infiniband/hw/qib/qib_iba7322.c 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
dd               3676 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport->cpspec->ibdeltainprog = 0;
dd               3677 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport->cpspec->ibsymdelta = 0;
dd               3678 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport->cpspec->iblnkerrdelta = 0;
dd               3679 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport->cpspec->ibmalfdelta = 0;
dd               3681 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->z_int_counter = qib_int_counter(dd);
dd               3688 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
dd               3689 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags |= QIB_DOING_RESET;
dd               3690 drivers/infiniband/hw/qib/qib_iba7322.c 	val = dd->control | QLOGIC_IB_C_RESET;
dd               3691 drivers/infiniband/hw/qib/qib_iba7322.c 	writeq(val, &dd->kregbase[kr_control]);
dd               3701 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
dd               3707 drivers/infiniband/hw/qib/qib_iba7322.c 		val = readq(&dd->kregbase[kr_revision]);
dd               3708 drivers/infiniband/hw/qib/qib_iba7322.c 		if (val == dd->revision)
dd               3711 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd,
dd               3718 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags |= QIB_PRESENT; /* it's back */
dd               3725 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, 2 * i +
dd               3728 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, 1 + 2 * i +
dd               3735 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->num_pports; ++i)
dd               3736 drivers/infiniband/hw/qib/qib_iba7322.c 		write_7322_init_portregs(&dd->pport[i]);
dd               3737 drivers/infiniband/hw/qib/qib_iba7322.c 	write_7322_initregs(dd);
dd               3739 drivers/infiniband/hw/qib/qib_iba7322.c 	if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
dd               3740 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               3743 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->num_msix_entries = msix_entries;
dd               3744 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_setup_7322_interrupt(dd, 1);
dd               3746 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->num_pports; ++i) {
dd               3747 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_pportdata *ppd = &dd->pport[i];
dd               3756 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
dd               3768 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
dd               3771 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!(dd->flags & QIB_PRESENT))
dd               3773 drivers/infiniband/hw/qib/qib_iba7322.c 	if (pa != dd->tidinvalid) {
dd               3778 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
dd               3783 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd,
dd               3790 drivers/infiniband/hw/qib/qib_iba7322.c 			chippa |= dd->tidtemplate;
dd               3806 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_clear_tids(struct qib_devdata *dd,
dd               3814 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->kregbase || !rcd)
dd               3819 drivers/infiniband/hw/qib/qib_iba7322.c 	tidinv = dd->tidinvalid;
dd               3821 drivers/infiniband/hw/qib/qib_iba7322.c 		((char __iomem *) dd->kregbase +
dd               3822 drivers/infiniband/hw/qib/qib_iba7322.c 		 dd->rcvtidbase +
dd               3823 drivers/infiniband/hw/qib/qib_iba7322.c 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
dd               3825 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->rcvtidcnt; i++)
dd               3826 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
dd               3830 drivers/infiniband/hw/qib/qib_iba7322.c 		((char __iomem *) dd->kregbase +
dd               3831 drivers/infiniband/hw/qib/qib_iba7322.c 		 dd->rcvegrbase +
dd               3835 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
dd               3845 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_tidtemplate(struct qib_devdata *dd)
dd               3856 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->rcvegrbufsize == 2048)
dd               3857 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->tidtemplate = IBA7322_TID_SZ_2K;
dd               3858 drivers/infiniband/hw/qib/qib_iba7322.c 	else if (dd->rcvegrbufsize == 4096)
dd               3859 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->tidtemplate = IBA7322_TID_SZ_4K;
dd               3860 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->tidinvalid = 0;
dd               3878 drivers/infiniband/hw/qib/qib_iba7322.c 	if (rcd->dd->cspec->r1)
dd               3880 drivers/infiniband/hw/qib/qib_iba7322.c 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
dd               3887 drivers/infiniband/hw/qib/qib_iba7322.c qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
dd               3892 drivers/infiniband/hw/qib/qib_iba7322.c 		(rhf_addr - dd->rhf_offset + offset);
dd               3898 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_config_ctxts(struct qib_devdata *dd)
dd               3903 drivers/infiniband/hw/qib/qib_iba7322.c 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
dd               3904 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->numctxts = nchipctxts;
dd               3905 drivers/infiniband/hw/qib/qib_iba7322.c 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
dd               3906 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->first_user_ctxt = NUM_IB_PORTS +
dd               3907 drivers/infiniband/hw/qib/qib_iba7322.c 			(qib_n_krcv_queues - 1) * dd->num_pports;
dd               3908 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->first_user_ctxt > nchipctxts)
dd               3909 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->first_user_ctxt = nchipctxts;
dd               3910 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
dd               3912 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->first_user_ctxt = NUM_IB_PORTS;
dd               3913 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->n_krcv_queues = 1;
dd               3917 drivers/infiniband/hw/qib/qib_iba7322.c 		int nctxts = dd->first_user_ctxt + num_online_cpus();
dd               3920 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->ctxtcnt = 6;
dd               3922 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->ctxtcnt = 10;
dd               3924 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->ctxtcnt = nchipctxts;
dd               3925 drivers/infiniband/hw/qib/qib_iba7322.c 	} else if (qib_cfgctxts < dd->num_pports)
dd               3926 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->ctxtcnt = dd->num_pports;
dd               3928 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->ctxtcnt = qib_cfgctxts;
dd               3929 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->ctxtcnt) /* none of the above, set to max */
dd               3930 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->ctxtcnt = nchipctxts;
dd               3937 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               3938 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->ctxtcnt > 10)
dd               3939 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
dd               3940 drivers/infiniband/hw/qib/qib_iba7322.c 	else if (dd->ctxtcnt > 6)
dd               3941 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
dd               3945 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
dd               3951 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
dd               3952 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               3955 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
dd               3957 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
dd               3959 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
dd               3960 drivers/infiniband/hw/qib/qib_iba7322.c 				    dd->num_pports > 1 ? 1024U : 2048U);
dd               4068 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               4153 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_scratch, 0ULL);
dd               4167 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_scratch, 0ULL);
dd               4187 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0ULL);
dd               4204 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0ULL);
dd               4243 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
dd               4274 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
dd               4303 drivers/infiniband/hw/qib/qib_iba7322.c 		if (ppd->dd->cspec->r1) {
dd               4316 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0);
dd               4331 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
dd               4332 drivers/infiniband/hw/qib/qib_iba7322.c 			 ppd->dd->unit, ppd->port);
dd               4338 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(ppd->dd->pcidev,
dd               4340 drivers/infiniband/hw/qib/qib_iba7322.c 			ppd->dd->unit, ppd->port);
dd               4351 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               4386 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_devdata *dd = ppd->dd;
dd               4389 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               4392 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               4393 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               4441 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
dd               4442 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
dd               4443 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
dd               4450 drivers/infiniband/hw/qib/qib_iba7322.c 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
dd               4454 drivers/infiniband/hw/qib/qib_iba7322.c 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
dd               4484 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               4489 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               4492 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
dd               4494 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
dd               4496 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
dd               4498 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
dd               4504 drivers/infiniband/hw/qib/qib_iba7322.c 		mask = (1ULL << dd->ctxtcnt) - 1;
dd               4508 drivers/infiniband/hw/qib/qib_iba7322.c 		rcd = dd->rcd[ctxt];
dd               4513 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
dd               4515 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
dd               4518 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
dd               4520 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
dd               4528 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
dd               4530 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
dd               4532 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
dd               4534 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
dd               4541 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
dd               4544 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
dd               4551 drivers/infiniband/hw/qib/qib_iba7322.c 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
dd               4552 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
dd               4555 drivers/infiniband/hw/qib/qib_iba7322.c 		(void) qib_read_kreg32(dd, kr_scratch);
dd               4556 drivers/infiniband/hw/qib/qib_iba7322.c 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
dd               4557 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcd[ctxt]->head = val;
dd               4559 drivers/infiniband/hw/qib/qib_iba7322.c 		if (ctxt < dd->first_user_ctxt)
dd               4560 drivers/infiniband/hw/qib/qib_iba7322.c 			val |= dd->rhdrhead_intr_off;
dd               4561 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               4563 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
dd               4565 drivers/infiniband/hw/qib/qib_iba7322.c 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
dd               4566 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
dd               4573 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
dd               4574 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
dd               4576 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_write_ureg(dd, ur_rcvflowtable + f,
dd               4581 drivers/infiniband/hw/qib/qib_iba7322.c 			for (i = 0; i < dd->cfgctxts; i++) {
dd               4582 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
dd               4584 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
dd               4586 drivers/infiniband/hw/qib/qib_iba7322.c 					qib_write_ureg(dd, ur_rcvflowtable + f,
dd               4591 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               4623 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               4627 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               4631 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->sendctrl = 0;
dd               4633 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
dd               4635 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
dd               4636 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_USE_SPCL_TRIG)
dd               4637 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
dd               4649 drivers/infiniband/hw/qib/qib_iba7322.c 		tmp_dd_sendctrl = dd->sendctrl;
dd               4650 drivers/infiniband/hw/qib/qib_iba7322.c 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
dd               4657 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_sendctrl,
dd               4660 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_kreg(dd, kr_scratch, 0);
dd               4676 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               4679 drivers/infiniband/hw/qib/qib_iba7322.c 	tmp_dd_sendctrl = dd->sendctrl;
dd               4686 drivers/infiniband/hw/qib/qib_iba7322.c 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
dd               4690 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
dd               4691 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               4696 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               4700 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
dd               4701 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0);
dd               4704 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               4714 drivers/infiniband/hw/qib/qib_iba7322.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               4715 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, v);
dd               4716 drivers/infiniband/hw/qib/qib_iba7322.c 		v = qib_read_kreg32(dd, kr_scratch);
dd               4717 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, v);
dd               4718 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg32(dd, kr_scratch);
dd               4733 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               4781 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(ppd->dd->pcidev,
dd               4792 drivers/infiniband/hw/qib/qib_iba7322.c 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
dd               4793 drivers/infiniband/hw/qib/qib_iba7322.c 			struct qib_ctxtdata *rcd = dd->rcd[i];
dd               4797 drivers/infiniband/hw/qib/qib_iba7322.c 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
dd               4989 drivers/infiniband/hw/qib/qib_iba7322.c static void init_7322_cntrnames(struct qib_devdata *dd)
dd               4994 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
dd               5003 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->ncntrs = i;
dd               5006 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
dd               5008 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
dd               5009 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
dd               5014 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->nportcntrs = i - 1;
dd               5015 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
dd               5016 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->num_pports; ++i) {
dd               5017 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->pport[i].cpspec->portcntrs =
dd               5018 drivers/infiniband/hw/qib/qib_iba7322.c 			kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
dd               5023 drivers/infiniband/hw/qib/qib_iba7322.c static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
dd               5029 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = dd->cspec->cntrnamelen;
dd               5035 drivers/infiniband/hw/qib/qib_iba7322.c 		u64 *cntr = dd->cspec->cntrs;
dd               5038 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = dd->cspec->ncntrs * sizeof(u64);
dd               5045 drivers/infiniband/hw/qib/qib_iba7322.c 		for (i = 0; i < dd->cspec->ncntrs; i++)
dd               5047 drivers/infiniband/hw/qib/qib_iba7322.c 				*cntr++ = read_7322_creg(dd,
dd               5051 drivers/infiniband/hw/qib/qib_iba7322.c 				*cntr++ = read_7322_creg32(dd,
dd               5058 drivers/infiniband/hw/qib/qib_iba7322.c static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
dd               5064 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = dd->cspec->portcntrnamelen;
dd               5070 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_pportdata *ppd = &dd->pport[port];
dd               5074 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = dd->cspec->nportcntrs * sizeof(u64);
dd               5081 drivers/infiniband/hw/qib/qib_iba7322.c 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
dd               5112 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
dd               5118 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               5119 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd = dd->pport + pidx;
dd               5126 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
dd               5127 drivers/infiniband/hw/qib/qib_iba7322.c 		    || dd->diag_client)
dd               5137 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
dd               5138 drivers/infiniband/hw/qib/qib_iba7322.c 		traffic_wds -= ppd->dd->traffic_wds;
dd               5139 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd->dd->traffic_wds += traffic_wds;
dd               5140 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
dd               5150 drivers/infiniband/hw/qib/qib_iba7322.c 					    ppd->dd->cspec->r1 ?
dd               5156 drivers/infiniband/hw/qib/qib_iba7322.c 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
dd               5162 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_intr_fallback(struct qib_devdata *dd)
dd               5164 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->num_msix_entries)
dd               5167 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_devinfo(dd->pcidev,
dd               5169 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_free_irq(dd);
dd               5170 drivers/infiniband/hw/qib/qib_iba7322.c 	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
dd               5171 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "Failed to enable INTx\n");
dd               5172 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_setup_7322_interrupt(dd, 0);
dd               5188 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               5194 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrmask,
dd               5195 drivers/infiniband/hw/qib/qib_iba7322.c 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
dd               5201 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               5204 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, 0ULL);
dd               5205 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrclear,
dd               5207 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
dd               5225 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               5237 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
dd               5242 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->flags & QIB_USE_SPCL_TRIG) {
dd               5243 drivers/infiniband/hw/qib/qib_iba7322.c 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
dd               5249 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_sendbuf_done(dd, pnum);
dd               5251 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
dd               5259 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               5294 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg64(dd, kr_scratch);
dd               5297 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg64(dd, kr_scratch);
dd               5338 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               5585 drivers/infiniband/hw/qib/qib_iba7322.c 			if (ppd->dd->flags & QIB_HAS_QSFP) {
dd               5653 drivers/infiniband/hw/qib/qib_iba7322.c 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
dd               5696 drivers/infiniband/hw/qib/qib_iba7322.c static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
dd               5705 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               5706 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
dd               5707 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
dd               5708 drivers/infiniband/hw/qib/qib_iba7322.c 		new_out = (dd->cspec->gpio_out & ~mask) | out;
dd               5710 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
dd               5711 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_gpio_out, new_out);
dd               5712 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->gpio_out = new_out;
dd               5713 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               5723 drivers/infiniband/hw/qib/qib_iba7322.c 	read_val = qib_read_kreg64(dd, kr_extstatus);
dd               5728 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
dd               5734 drivers/infiniband/hw/qib/qib_iba7322.c 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
dd               5735 drivers/infiniband/hw/qib/qib_iba7322.c 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
dd               5745 drivers/infiniband/hw/qib/qib_iba7322.c static void get_7322_chip_params(struct qib_devdata *dd)
dd               5751 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
dd               5753 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
dd               5755 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
dd               5756 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
dd               5757 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
dd               5758 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
dd               5759 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
dd               5761 drivers/infiniband/hw/qib/qib_iba7322.c 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
dd               5762 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piobcnt2k = val & ~0U;
dd               5763 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piobcnt4k = val >> 32;
dd               5764 drivers/infiniband/hw/qib/qib_iba7322.c 	val = qib_read_kreg64(dd, kr_sendpiosize);
dd               5765 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piosize2k = val & ~0U;
dd               5766 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piosize4k = val >> 32;
dd               5771 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[0].ibmtu = (u32)mtu;
dd               5772 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[1].ibmtu = (u32)mtu;
dd               5775 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pio2kbase = (u32 __iomem *)
dd               5776 drivers/infiniband/hw/qib/qib_iba7322.c 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
dd               5777 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pio4kbase = (u32 __iomem *)
dd               5778 drivers/infiniband/hw/qib/qib_iba7322.c 		((char __iomem *) dd->kregbase +
dd               5779 drivers/infiniband/hw/qib/qib_iba7322.c 		 (dd->piobufbase >> 32));
dd               5785 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
dd               5787 drivers/infiniband/hw/qib/qib_iba7322.c 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
dd               5789 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
dd               5798 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
dd               5802 drivers/infiniband/hw/qib/qib_iba7322.c 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
dd               5804 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
dd               5805 drivers/infiniband/hw/qib/qib_iba7322.c 		(char __iomem *)dd->kregbase);
dd               5807 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->egrtidbase = (u64 __iomem *)
dd               5808 drivers/infiniband/hw/qib/qib_iba7322.c 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
dd               5811 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[0].cpspec->kpregbase =
dd               5812 drivers/infiniband/hw/qib/qib_iba7322.c 		(u64 __iomem *)((char __iomem *)dd->kregbase);
dd               5813 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[1].cpspec->kpregbase =
dd               5814 drivers/infiniband/hw/qib/qib_iba7322.c 		(u64 __iomem *)(dd->palign +
dd               5815 drivers/infiniband/hw/qib/qib_iba7322.c 		(char __iomem *)dd->kregbase);
dd               5816 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[0].cpspec->cpregbase =
dd               5817 drivers/infiniband/hw/qib/qib_iba7322.c 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
dd               5818 drivers/infiniband/hw/qib/qib_iba7322.c 		kr_counterregbase) + (char __iomem *)dd->kregbase);
dd               5819 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport[1].cpspec->cpregbase =
dd               5820 drivers/infiniband/hw/qib/qib_iba7322.c 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
dd               5821 drivers/infiniband/hw/qib/qib_iba7322.c 		kr_counterregbase) + (char __iomem *)dd->kregbase);
dd               5837 drivers/infiniband/hw/qib/qib_iba7322.c static int sendctrl_hook(struct qib_devdata *dd,
dd               5853 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               5857 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd = dd->pport + pidx;
dd               5862 drivers/infiniband/hw/qib/qib_iba7322.c 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
dd               5868 drivers/infiniband/hw/qib/qib_iba7322.c 	if (pidx >= dd->num_pports)
dd               5878 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               5888 drivers/infiniband/hw/qib/qib_iba7322.c 			local_data = (u64)qib_read_kreg32(dd, idx);
dd               5890 drivers/infiniband/hw/qib/qib_iba7322.c 			local_data = qib_read_kreg64(dd, idx);
dd               5911 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, idx, tval);
dd               5912 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_scratch, 0Ull);
dd               5914 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               5980 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!ret && !ppd->dd->cspec->r1) {
dd               6022 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               6028 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
dd               6029 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
dd               6030 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->gpio_mask |= mod_prs_bit;
dd               6031 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
dd               6032 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
dd               6033 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
dd               6049 drivers/infiniband/hw/qib/qib_iba7322.c static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
dd               6061 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               6062 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->pport[pidx].cpspec->no_eep = deflt;
dd               6065 drivers/infiniband/hw/qib/qib_iba7322.c 	if (IS_QME(dd) || IS_QMH(dd))
dd               6103 drivers/infiniband/hw/qib/qib_iba7322.c 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
dd               6105 drivers/infiniband/hw/qib/qib_iba7322.c 			struct qib_pportdata *ppd = &dd->pport[pidx];
dd               6117 drivers/infiniband/hw/qib/qib_iba7322.c 			if (IS_QMH(dd) || IS_QME(dd))
dd               6130 drivers/infiniband/hw/qib/qib_iba7322.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               6131 drivers/infiniband/hw/qib/qib_iba7322.c 			if (dd->pport[pidx].link_speed_supported)
dd               6132 drivers/infiniband/hw/qib/qib_iba7322.c 				init_txdds_table(&dd->pport[pidx], 0);
dd               6139 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd;
dd               6156 drivers/infiniband/hw/qib/qib_iba7322.c 	xa_for_each(&qib_dev_table, index, dd)
dd               6157 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
dd               6158 drivers/infiniband/hw/qib/qib_iba7322.c 			set_no_qsfp_atten(dd, 1);
dd               6167 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_late_7322_initreg(struct qib_devdata *dd)
dd               6172 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
dd               6173 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
dd               6174 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
dd               6175 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
dd               6176 drivers/infiniband/hw/qib/qib_iba7322.c 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
dd               6177 drivers/infiniband/hw/qib/qib_iba7322.c 	if (val != dd->pioavailregs_phys) {
dd               6178 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               6180 drivers/infiniband/hw/qib/qib_iba7322.c 			(unsigned long) dd->pioavailregs_phys,
dd               6185 drivers/infiniband/hw/qib/qib_iba7322.c 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
dd               6186 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
dd               6188 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
dd               6190 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_register_observer(dd, &sendctrl_0_observer);
dd               6191 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_register_observer(dd, &sendctrl_1_observer);
dd               6193 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
dd               6194 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               6201 drivers/infiniband/hw/qib/qib_iba7322.c 	set_no_qsfp_atten(dd, 0);
dd               6202 drivers/infiniband/hw/qib/qib_iba7322.c 	for (n = 0; n < dd->num_pports; ++n) {
dd               6203 drivers/infiniband/hw/qib/qib_iba7322.c 		struct qib_pportdata *ppd = dd->pport + n;
dd               6208 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->flags & QIB_HAS_QSFP)
dd               6211 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
dd               6212 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               6241 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               6272 drivers/infiniband/hw/qib/qib_iba7322.c 	if (ppd->dd->cspec->r1)
dd               6283 drivers/infiniband/hw/qib/qib_iba7322.c static void write_7322_initregs(struct qib_devdata *dd)
dd               6290 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
dd               6292 drivers/infiniband/hw/qib/qib_iba7322.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               6296 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->n_krcv_queues < 2 ||
dd               6297 drivers/infiniband/hw/qib/qib_iba7322.c 			!dd->pport[pidx].link_speed_supported)
dd               6300 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd = &dd->pport[pidx];
dd               6303 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
dd               6305 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
dd               6310 drivers/infiniband/hw/qib/qib_iba7322.c 		if (dd->num_pports > 1)
dd               6311 drivers/infiniband/hw/qib/qib_iba7322.c 			n = dd->first_user_ctxt / dd->num_pports;
dd               6313 drivers/infiniband/hw/qib/qib_iba7322.c 			n = dd->first_user_ctxt - 1;
dd               6317 drivers/infiniband/hw/qib/qib_iba7322.c 			if (dd->num_pports > 1)
dd               6318 drivers/infiniband/hw/qib/qib_iba7322.c 				ctxt = (i % n) * dd->num_pports + pidx;
dd               6340 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->first_user_ctxt; i++) {
dd               6341 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
dd               6342 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
dd               6351 drivers/infiniband/hw/qib/qib_iba7322.c 	for (i = 0; i < dd->cfgctxts; i++) {
dd               6355 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
dd               6363 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->num_pports)
dd               6364 drivers/infiniband/hw/qib/qib_iba7322.c 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
dd               6367 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_init_7322_variables(struct qib_devdata *dd)
dd               6376 drivers/infiniband/hw/qib/qib_iba7322.c 	ppd = (struct qib_pportdata *)(dd + 1);
dd               6377 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pport = ppd;
dd               6378 drivers/infiniband/hw/qib/qib_iba7322.c 	ppd[0].dd = dd;
dd               6379 drivers/infiniband/hw/qib/qib_iba7322.c 	ppd[1].dd = dd;
dd               6381 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
dd               6383 drivers/infiniband/hw/qib/qib_iba7322.c 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
dd               6388 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_init(&dd->cspec->rcvmod_lock);
dd               6389 drivers/infiniband/hw/qib/qib_iba7322.c 	spin_lock_init(&dd->cspec->gpio_lock);
dd               6392 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->revision = readq(&dd->kregbase[kr_revision]);
dd               6394 drivers/infiniband/hw/qib/qib_iba7322.c 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
dd               6395 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               6400 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags |= QIB_PRESENT;  /* now register routines work */
dd               6402 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
dd               6403 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
dd               6404 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->r1 = dd->minrev == 1;
dd               6406 drivers/infiniband/hw/qib/qib_iba7322.c 	get_7322_chip_params(dd);
dd               6407 drivers/infiniband/hw/qib/qib_iba7322.c 	features = qib_7322_boardname(dd);
dd               6410 drivers/infiniband/hw/qib/qib_iba7322.c 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
dd               6413 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->sendchkenable =
dd               6414 drivers/infiniband/hw/qib/qib_iba7322.c 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
dd               6416 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->sendgrhchk =
dd               6417 drivers/infiniband/hw/qib/qib_iba7322.c 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
dd               6419 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->sendibchk =
dd               6420 drivers/infiniband/hw/qib/qib_iba7322.c 		kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
dd               6422 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
dd               6423 drivers/infiniband/hw/qib/qib_iba7322.c 		!dd->cspec->sendibchk) {
dd               6428 drivers/infiniband/hw/qib/qib_iba7322.c 	ppd = dd->pport;
dd               6434 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
dd               6435 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
dd               6436 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
dd               6438 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
dd               6442 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->flags |= qib_special_trigger ?
dd               6449 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_set_baseaddrs(dd);
dd               6455 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
dd               6457 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->hwerrmask = ~0ULL;
dd               6460 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->hwerrmask &=
dd               6472 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->skip_kctxt_mask |= 1 << pidx;
dd               6478 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
dd               6482 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->int_enable_mask &= ~(
dd               6493 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
dd               6497 drivers/infiniband/hw/qib/qib_iba7322.c 				dd->cspec->int_enable_mask &= ~(
dd               6508 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->num_pports++;
dd               6509 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
dd               6511 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->num_pports--;
dd               6533 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_devinfo(dd->pcidev,
dd               6545 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_devinfo(dd->pcidev,
dd               6558 drivers/infiniband/hw/qib/qib_iba7322.c 		if (ppd->dd->cspec->r1)
dd               6566 drivers/infiniband/hw/qib/qib_iba7322.c 		if (!(dd->flags & QIB_HAS_QSFP)) {
dd               6567 drivers/infiniband/hw/qib/qib_iba7322.c 			if (!IS_QMH(dd) && !IS_QME(dd))
dd               6568 drivers/infiniband/hw/qib/qib_iba7322.c 				qib_devinfo(dd->pcidev,
dd               6570 drivers/infiniband/hw/qib/qib_iba7322.c 					dd->unit, ppd->port);
dd               6571 drivers/infiniband/hw/qib/qib_iba7322.c 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
dd               6576 drivers/infiniband/hw/qib/qib_iba7322.c 			ppd->cpspec->no_eep = IS_QMH(dd) ?
dd               6590 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
dd               6592 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvhdrsize = qib_rcvhdrsize ?
dd               6594 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
dd               6597 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvegrbufsize = max(mtu, 2048);
dd               6598 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
dd               6600 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_tidtemplate(dd);
dd               6606 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->rhdrhead_intr_off =
dd               6610 drivers/infiniband/hw/qib/qib_iba7322.c 	timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
dd               6612 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->ureg_align = 0x10000;  /* 64KB alignment */
dd               6614 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
dd               6616 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_config_ctxts(dd);
dd               6617 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_set_ctxtcnt(dd);
dd               6626 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = init_chip_wc_pat(dd, 0);
dd               6631 drivers/infiniband/hw/qib/qib_iba7322.c 	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd               6632 drivers/infiniband/hw/qib/qib_iba7322.c 		  dd->piobcnt4k * dd->align4k;
dd               6633 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->piovl15base	= ioremap_nocache(vl15off,
dd               6634 drivers/infiniband/hw/qib/qib_iba7322.c 					  NUM_VL15_BUFS * dd->align4k);
dd               6635 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->piovl15base) {
dd               6640 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
dd               6645 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->num_pports) {
dd               6646 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
dd               6650 drivers/infiniband/hw/qib/qib_iba7322.c 	write_7322_initregs(dd);
dd               6651 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_create_ctxts(dd);
dd               6652 drivers/infiniband/hw/qib/qib_iba7322.c 	init_7322_cntrnames(dd);
dd               6666 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->flags & QIB_HAS_SEND_DMA) {
dd               6667 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
dd               6670 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->sdmabufcnt = 0;
dd               6671 drivers/infiniband/hw/qib/qib_iba7322.c 		sbufs = dd->piobcnt4k;
dd               6673 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
dd               6674 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->sdmabufcnt;
dd               6675 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
dd               6676 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
dd               6677 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->last_pio = dd->cspec->lastbuf_for_pio;
dd               6678 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
dd               6679 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
dd               6687 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
dd               6688 drivers/infiniband/hw/qib/qib_iba7322.c 		updthresh = dd->pbufsctxt - 2;
dd               6689 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->updthresh_dflt = updthresh;
dd               6690 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->updthresh = updthresh;
dd               6693 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
dd               6697 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->psxmitwait_supported = 1;
dd               6698 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
dd               6700 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->ctxtcnt)
dd               6701 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->ctxtcnt = 1; /* for other initialization code */
dd               6710 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               6714 drivers/infiniband/hw/qib/qib_iba7322.c 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
dd               6717 drivers/infiniband/hw/qib/qib_iba7322.c 		if ((plen + 1) > dd->piosize2kmax_dwords)
dd               6718 drivers/infiniband/hw/qib/qib_iba7322.c 			first = dd->piobcnt2k;
dd               6721 drivers/infiniband/hw/qib/qib_iba7322.c 		last = dd->cspec->lastbuf_for_pio;
dd               6723 drivers/infiniband/hw/qib/qib_iba7322.c 	return qib_getsendbuf_range(dd, pbufnum, first, last);
dd               6749 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6753 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6757 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6763 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6775 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6782 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6787 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6791 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6795 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6799 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6803 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6807 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6811 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6815 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd               6875 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               6887 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->num_pports)
dd               6888 drivers/infiniband/hw/qib/qib_iba7322.c 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
dd               6890 drivers/infiniband/hw/qib/qib_iba7322.c 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
dd               6891 drivers/infiniband/hw/qib/qib_iba7322.c 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
dd               6892 drivers/infiniband/hw/qib/qib_iba7322.c 		((dd->num_pports == 1 || ppd->port == 2) ? n :
dd               6893 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->sdmabufcnt);
dd               6913 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               6922 drivers/infiniband/hw/qib/qib_iba7322.c 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
dd               6998 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
dd               7002 drivers/infiniband/hw/qib/qib_iba7322.c 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
dd               7003 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
dd               7010 drivers/infiniband/hw/qib/qib_iba7322.c 		if (rcd->dd->num_pports > 1) {
dd               7018 drivers/infiniband/hw/qib/qib_iba7322.c 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
dd               7025 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
dd               7052 drivers/infiniband/hw/qib/qib_iba7322.c 					le64_to_cpu(dd->pioavailregs_dma[i]);
dd               7067 drivers/infiniband/hw/qib/qib_iba7322.c 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               7079 drivers/infiniband/hw/qib/qib_iba7322.c 			clear_bit(i, dd->cspec->sendchkenable);
dd               7089 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg32(dd, kr_scratch);
dd               7091 drivers/infiniband/hw/qib/qib_iba7322.c 			set_bit(i, dd->cspec->sendchkenable);
dd               7097 drivers/infiniband/hw/qib/qib_iba7322.c 			set_bit(i, dd->cspec->sendibchk);
dd               7098 drivers/infiniband/hw/qib/qib_iba7322.c 			clear_bit(i, dd->cspec->sendgrhchk);
dd               7100 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&dd->uctxt_lock, flags);
dd               7102 drivers/infiniband/hw/qib/qib_iba7322.c 		for (i = dd->first_user_ctxt;
dd               7103 drivers/infiniband/hw/qib/qib_iba7322.c 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
dd               7104 drivers/infiniband/hw/qib/qib_iba7322.c 		     && i < dd->cfgctxts; i++)
dd               7105 drivers/infiniband/hw/qib/qib_iba7322.c 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
dd               7106 drivers/infiniband/hw/qib/qib_iba7322.c 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
dd               7107 drivers/infiniband/hw/qib/qib_iba7322.c 			   < dd->cspec->updthresh_dflt)
dd               7109 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd               7110 drivers/infiniband/hw/qib/qib_iba7322.c 		if (i == dd->cfgctxts) {
dd               7111 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               7112 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
dd               7113 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd               7114 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->sendctrl |= (dd->cspec->updthresh &
dd               7117 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               7118 drivers/infiniband/hw/qib/qib_iba7322.c 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               7125 drivers/infiniband/hw/qib/qib_iba7322.c 			clear_bit(i, dd->cspec->sendibchk);
dd               7126 drivers/infiniband/hw/qib/qib_iba7322.c 			set_bit(i, dd->cspec->sendgrhchk);
dd               7128 drivers/infiniband/hw/qib/qib_iba7322.c 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
dd               7130 drivers/infiniband/hw/qib/qib_iba7322.c 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
dd               7131 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->cspec->updthresh = (rcd->piocnt /
dd               7133 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
dd               7134 drivers/infiniband/hw/qib/qib_iba7322.c 			dd->sendctrl |= (dd->cspec->updthresh &
dd               7137 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               7138 drivers/infiniband/hw/qib/qib_iba7322.c 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd               7140 drivers/infiniband/hw/qib/qib_iba7322.c 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
dd               7148 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_sendcheckmask + i,
dd               7149 drivers/infiniband/hw/qib/qib_iba7322.c 			       dd->cspec->sendchkenable[i]);
dd               7152 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
dd               7153 drivers/infiniband/hw/qib/qib_iba7322.c 			       dd->cspec->sendgrhchk[i]);
dd               7154 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_sendibpktmask + i,
dd               7155 drivers/infiniband/hw/qib/qib_iba7322.c 			       dd->cspec->sendibchk[i]);
dd               7162 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               7167 drivers/infiniband/hw/qib/qib_iba7322.c static void writescratch(struct qib_devdata *dd, u32 val)
dd               7169 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_scratch, val);
dd               7173 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
dd               7192 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd;
dd               7196 drivers/infiniband/hw/qib/qib_iba7322.c 	dd = qib_alloc_devdata(pdev,
dd               7200 drivers/infiniband/hw/qib/qib_iba7322.c 	if (IS_ERR(dd))
dd               7203 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
dd               7204 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_cleanup           = qib_setup_7322_cleanup;
dd               7205 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_clear_tids        = qib_7322_clear_tids;
dd               7206 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_free_irq          = qib_7322_free_irq;
dd               7207 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_get_base_info     = qib_7322_get_base_info;
dd               7208 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_get_msgheader     = qib_7322_get_msgheader;
dd               7209 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_getsendbuf        = qib_7322_getsendbuf;
dd               7210 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_gpio_mod          = gpio_7322_mod;
dd               7211 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
dd               7212 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_hdrqempty         = qib_7322_hdrqempty;
dd               7213 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_ib_updown         = qib_7322_ib_updown;
dd               7214 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_init_ctxt         = qib_7322_init_ctxt;
dd               7215 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
dd               7216 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_intr_fallback     = qib_7322_intr_fallback;
dd               7217 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_late_initreg      = qib_late_7322_initreg;
dd               7218 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_setpbc_control    = qib_7322_setpbc_control;
dd               7219 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_portcntr          = qib_portcntr_7322;
dd               7220 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_put_tid           = qib_7322_put_tid;
dd               7221 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
dd               7222 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_rcvctrl           = rcvctrl_7322_mod;
dd               7223 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_read_cntrs        = qib_read_7322cntrs;
dd               7224 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
dd               7225 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_reset             = qib_do_7322_reset;
dd               7226 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
dd               7227 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_busy         = qib_sdma_7322_busy;
dd               7228 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
dd               7229 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
dd               7230 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
dd               7231 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
dd               7232 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sendctrl          = sendctrl_7322_mod;
dd               7233 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
dd               7234 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
dd               7235 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_iblink_state      = qib_7322_iblink_state;
dd               7236 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
dd               7237 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
dd               7238 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
dd               7239 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
dd               7240 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_get_ib_table      = qib_7322_get_ib_table;
dd               7241 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_ib_table      = qib_7322_set_ib_table;
dd               7242 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_set_intr_state    = qib_7322_set_intr_state;
dd               7243 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_setextled         = qib_setup_7322_setextled;
dd               7244 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_txchk_change      = qib_7322_txchk_change;
dd               7245 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_update_usrhead    = qib_update_7322_usrhead;
dd               7246 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
dd               7247 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
dd               7248 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
dd               7249 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
dd               7250 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
dd               7251 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_writescratch      = writescratch;
dd               7252 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
dd               7254 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->f_notify_dca	= qib_7322_notify_dca;
dd               7262 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_pcie_ddinit(dd, pdev, ent);
dd               7267 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_init_7322_variables(dd);
dd               7271 drivers/infiniband/hw/qib/qib_iba7322.c 	if (qib_mini_init || !dd->num_pports)
dd               7280 drivers/infiniband/hw/qib/qib_iba7322.c 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
dd               7283 drivers/infiniband/hw/qib/qib_iba7322.c 		     irq_table[i].port <= dd->num_pports) ||
dd               7285 drivers/infiniband/hw/qib/qib_iba7322.c 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
dd               7289 drivers/infiniband/hw/qib/qib_iba7322.c 		actual_cnt -= dd->num_pports;
dd               7292 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->msix_entries = kcalloc(tabsize,
dd               7295 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!dd->cspec->msix_entries)
dd               7298 drivers/infiniband/hw/qib/qib_iba7322.c 	if (qib_pcie_params(dd, 8, &tabsize))
dd               7299 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd,
dd               7302 drivers/infiniband/hw/qib/qib_iba7322.c 	dd->cspec->num_msix_entries = tabsize;
dd               7305 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_setup_7322_interrupt(dd, 1);
dd               7308 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
dd               7311 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_devinfo(dd->pcidev, "DCA enabled\n");
dd               7312 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->flags |= QIB_DCA_ENABLED;
dd               7313 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_setup_dca(dd);
dd               7319 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_pcie_ddcleanup(dd);
dd               7321 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_free_devdata(dd);
dd               7322 drivers/infiniband/hw/qib/qib_iba7322.c 	dd = ERR_PTR(ret);
dd               7324 drivers/infiniband/hw/qib/qib_iba7322.c 	return dd;
dd               7348 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               7360 drivers/infiniband/hw/qib/qib_iba7322.c 		regidx += (dd->palign / sizeof(u64));
dd               7366 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, regidx, pack_ent);
dd               7368 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(ppd->dd, kr_scratch, 0);
dd               7641 drivers/infiniband/hw/qib/qib_iba7322.c 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
dd               7642 drivers/infiniband/hw/qib/qib_iba7322.c 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
dd               7643 drivers/infiniband/hw/qib/qib_iba7322.c 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
dd               7670 drivers/infiniband/hw/qib/qib_iba7322.c 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
dd               7675 drivers/infiniband/hw/qib/qib_iba7322.c 			ppd->dd->unit, ppd->port, idx);
dd               7697 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
dd               7736 drivers/infiniband/hw/qib/qib_iba7322.c static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
dd               7744 drivers/infiniband/hw/qib/qib_iba7322.c 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
dd               7747 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, KR_AHB_ACC, acc);
dd               7750 drivers/infiniband/hw/qib/qib_iba7322.c 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
dd               7755 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
dd               7766 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
dd               7769 drivers/infiniband/hw/qib/qib_iba7322.c 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
dd               7774 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
dd               7779 drivers/infiniband/hw/qib/qib_iba7322.c 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
dd               7789 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
dd               7792 drivers/infiniband/hw/qib/qib_iba7322.c 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
dd               7797 drivers/infiniband/hw/qib/qib_iba7322.c 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
dd               7804 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
dd               7811 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               7815 drivers/infiniband/hw/qib/qib_iba7322.c 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
dd               7817 drivers/infiniband/hw/qib/qib_iba7322.c 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
dd               7829 drivers/infiniband/hw/qib/qib_iba7322.c 			ppd->dd->unit, ppd->port);
dd               7833 drivers/infiniband/hw/qib/qib_iba7322.c 			ppd->dd->unit, ppd->port);
dd               7843 drivers/infiniband/hw/qib/qib_iba7322.c 	if (ppd->dd->cspec->r1)
dd               7875 drivers/infiniband/hw/qib/qib_iba7322.c 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
dd               7879 drivers/infiniband/hw/qib/qib_iba7322.c 	le_val = IS_QME(ppd->dd) ? 0 : 1;
dd               7883 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
dd               7890 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
dd               7891 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
dd               7892 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
dd               7893 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
dd               7896 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
dd               7897 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
dd               7898 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
dd               7899 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
dd               7902 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
dd               7918 drivers/infiniband/hw/qib/qib_iba7322.c 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
dd               7928 drivers/infiniband/hw/qib/qib_iba7322.c 			    ppd->dd->cspec->r1 ?
dd               7938 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!ppd->dd->cspec->r1) {
dd               7956 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
dd               7998 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!ppd->dd->cspec->r1) {
dd               8024 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
dd               8025 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
dd               8026 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
dd               8027 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
dd               8030 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
dd               8031 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
dd               8032 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
dd               8033 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
dd               8036 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
dd               8060 drivers/infiniband/hw/qib/qib_iba7322.c 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
dd               8073 drivers/infiniband/hw/qib/qib_iba7322.c 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
dd               8088 drivers/infiniband/hw/qib/qib_iba7322.c 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
dd               8098 drivers/infiniband/hw/qib/qib_iba7322.c 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
dd               8119 drivers/infiniband/hw/qib/qib_iba7322.c 			    ppd->dd->cspec->r1 ?
dd               8149 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8157 drivers/infiniband/hw/qib/qib_iba7322.c 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8160 drivers/infiniband/hw/qib/qib_iba7322.c 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8167 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8169 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8171 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8173 drivers/infiniband/hw/qib/qib_iba7322.c 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
dd               8234 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!ppd->dd->cspec->r1)
dd               8256 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_r_grab(struct qib_devdata *dd)
dd               8260 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_r_access, val);
dd               8261 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               8268 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_r_wait_for_rdy(struct qib_devdata *dd)
dd               8274 drivers/infiniband/hw/qib/qib_iba7322.c 		val = qib_read_kreg32(dd, kr_r_access);
dd               8281 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_r_shift(struct qib_devdata *dd, int bisten,
dd               8289 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_r_wait_for_rdy(dd);
dd               8303 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_r_access, val);
dd               8304 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg32(dd, kr_scratch);
dd               8305 drivers/infiniband/hw/qib/qib_iba7322.c 		ret = qib_r_wait_for_rdy(dd);
dd               8311 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_r_access, val);
dd               8312 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               8313 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_r_wait_for_rdy(dd);
dd               8321 drivers/infiniband/hw/qib/qib_iba7322.c static int qib_r_update(struct qib_devdata *dd, int bisten)
dd               8327 drivers/infiniband/hw/qib/qib_iba7322.c 	ret = qib_r_wait_for_rdy(dd);
dd               8329 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_r_access, val);
dd               8330 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg32(dd, kr_scratch);
dd               8433 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               8435 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!ppd->dd->cspec->r1)
dd               8438 drivers/infiniband/hw/qib/qib_iba7322.c 		dd->cspec->recovery_ports_initted++;
dd               8441 drivers/infiniband/hw/qib/qib_iba7322.c 	if (!both && dd->cspec->recovery_ports_initted == 1) {
dd               8449 drivers/infiniband/hw/qib/qib_iba7322.c 	if (qib_r_grab(dd) < 0 ||
dd               8450 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
dd               8451 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_update(dd, BISTEN_ETM) < 0 ||
dd               8452 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
dd               8453 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_update(dd, BISTEN_AT) < 0 ||
dd               8454 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
dd               8456 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
dd               8457 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
dd               8458 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_update(dd, BISTEN_AT) < 0 ||
dd               8459 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
dd               8460 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_r_update(dd, BISTEN_ETM) < 0)
dd               8461 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "Failed IB link recovery setup\n");
dd               8466 drivers/infiniband/hw/qib/qib_iba7322.c 	struct qib_devdata *dd = ppd->dd;
dd               8469 drivers/infiniband/hw/qib/qib_iba7322.c 	if (dd->cspec->recovery_ports_initted != 1)
dd               8471 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_control, dd->control |
dd               8473 drivers/infiniband/hw/qib/qib_iba7322.c 	(void)qib_read_kreg64(dd, kr_scratch);
dd               8475 drivers/infiniband/hw/qib/qib_iba7322.c 	fmask = qib_read_kreg64(dd, kr_act_fmask);
dd               8482 drivers/infiniband/hw/qib/qib_iba7322.c 		ppd->dd->cspec->stay_in_freeze = 1;
dd               8483 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_7322_set_intr_state(ppd->dd, 0);
dd               8484 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_write_kreg(dd, kr_fmask, 0ULL);
dd               8485 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_dev_err(dd, "HCA unusable until powercycled\n");
dd               8489 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(ppd->dd, kr_hwerrclear,
dd               8493 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_write_kreg(dd, kr_control, dd->control);
dd               8494 drivers/infiniband/hw/qib/qib_iba7322.c 	qib_read_kreg32(dd, kr_scratch);
dd               8501 drivers/infiniband/hw/qib/qib_iba7322.c 		qib_read_kreg32(dd, kr_scratch);
dd                102 drivers/infiniband/hw/qib/qib_init.c void qib_set_ctxtcnt(struct qib_devdata *dd)
dd                105 drivers/infiniband/hw/qib/qib_init.c 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
dd                106 drivers/infiniband/hw/qib/qib_init.c 		if (dd->cfgctxts > dd->ctxtcnt)
dd                107 drivers/infiniband/hw/qib/qib_init.c 			dd->cfgctxts = dd->ctxtcnt;
dd                108 drivers/infiniband/hw/qib/qib_init.c 	} else if (qib_cfgctxts < dd->num_pports)
dd                109 drivers/infiniband/hw/qib/qib_init.c 		dd->cfgctxts = dd->ctxtcnt;
dd                110 drivers/infiniband/hw/qib/qib_init.c 	else if (qib_cfgctxts <= dd->ctxtcnt)
dd                111 drivers/infiniband/hw/qib/qib_init.c 		dd->cfgctxts = qib_cfgctxts;
dd                113 drivers/infiniband/hw/qib/qib_init.c 		dd->cfgctxts = dd->ctxtcnt;
dd                114 drivers/infiniband/hw/qib/qib_init.c 	dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
dd                115 drivers/infiniband/hw/qib/qib_init.c 		dd->cfgctxts - dd->first_user_ctxt;
dd                121 drivers/infiniband/hw/qib/qib_init.c int qib_create_ctxts(struct qib_devdata *dd)
dd                124 drivers/infiniband/hw/qib/qib_init.c 	int local_node_id = pcibus_to_node(dd->pcidev->bus);
dd                128 drivers/infiniband/hw/qib/qib_init.c 	dd->assigned_node_id = local_node_id;
dd                134 drivers/infiniband/hw/qib/qib_init.c 	dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
dd                135 drivers/infiniband/hw/qib/qib_init.c 	if (!dd->rcd)
dd                139 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; i < dd->first_user_ctxt; ++i) {
dd                143 drivers/infiniband/hw/qib/qib_init.c 		if (dd->skip_kctxt_mask & (1 << i))
dd                146 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + (i % dd->num_pports);
dd                148 drivers/infiniband/hw/qib/qib_init.c 		rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
dd                150 drivers/infiniband/hw/qib/qib_init.c 			qib_dev_err(dd,
dd                152 drivers/infiniband/hw/qib/qib_init.c 			kfree(dd->rcd);
dd                153 drivers/infiniband/hw/qib/qib_init.c 			dd->rcd = NULL;
dd                168 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = ppd->dd;
dd                176 drivers/infiniband/hw/qib/qib_init.c 		rcd->dd = dd;
dd                179 drivers/infiniband/hw/qib/qib_init.c 		dd->rcd[ctxt] = rcd;
dd                181 drivers/infiniband/hw/qib/qib_init.c 		if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
dd                186 drivers/infiniband/hw/qib/qib_init.c 				qib_dev_err(dd,
dd                192 drivers/infiniband/hw/qib/qib_init.c 		dd->f_init_ctxt(rcd);
dd                207 drivers/infiniband/hw/qib/qib_init.c 			rcd->rcvegrbuf_size / dd->rcvegrbufsize;
dd                220 drivers/infiniband/hw/qib/qib_init.c int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
dd                225 drivers/infiniband/hw/qib/qib_init.c 	ppd->dd = dd;
dd                297 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd,
dd                302 drivers/infiniband/hw/qib/qib_init.c 	qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
dd                307 drivers/infiniband/hw/qib/qib_init.c static int init_pioavailregs(struct qib_devdata *dd)
dd                312 drivers/infiniband/hw/qib/qib_init.c 	dd->pioavailregs_dma = dma_alloc_coherent(
dd                313 drivers/infiniband/hw/qib/qib_init.c 		&dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
dd                315 drivers/infiniband/hw/qib/qib_init.c 	if (!dd->pioavailregs_dma) {
dd                316 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd,
dd                327 drivers/infiniband/hw/qib/qib_init.c 		((char *) dd->pioavailregs_dma +
dd                329 drivers/infiniband/hw/qib/qib_init.c 		   dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
dd                331 drivers/infiniband/hw/qib/qib_init.c 	dd->devstatusp = status_page;
dd                333 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                334 drivers/infiniband/hw/qib/qib_init.c 		dd->pport[pidx].statusp = status_page;
dd                342 drivers/infiniband/hw/qib/qib_init.c 	dd->freezemsg = (char *) status_page;
dd                343 drivers/infiniband/hw/qib/qib_init.c 	*dd->freezemsg = 0;
dd                345 drivers/infiniband/hw/qib/qib_init.c 	ret = (char *) status_page - (char *) dd->pioavailregs_dma;
dd                346 drivers/infiniband/hw/qib/qib_init.c 	dd->freezelen = PAGE_SIZE - ret;
dd                365 drivers/infiniband/hw/qib/qib_init.c static void init_shadow_tids(struct qib_devdata *dd)
dd                371 drivers/infiniband/hw/qib/qib_init.c 				   dd->cfgctxts * dd->rcvtidcnt));
dd                376 drivers/infiniband/hw/qib/qib_init.c 				   dd->cfgctxts * dd->rcvtidcnt));
dd                380 drivers/infiniband/hw/qib/qib_init.c 	dd->pageshadow = pages;
dd                381 drivers/infiniband/hw/qib/qib_init.c 	dd->physshadow = addrs;
dd                387 drivers/infiniband/hw/qib/qib_init.c 	dd->pageshadow = NULL;
dd                394 drivers/infiniband/hw/qib/qib_init.c static int loadtime_init(struct qib_devdata *dd)
dd                398 drivers/infiniband/hw/qib/qib_init.c 	if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
dd                400 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd,
dd                403 drivers/infiniband/hw/qib/qib_init.c 			(int)(dd->revision >>
dd                406 drivers/infiniband/hw/qib/qib_init.c 			(unsigned long long) dd->revision);
dd                411 drivers/infiniband/hw/qib/qib_init.c 	if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
dd                412 drivers/infiniband/hw/qib/qib_init.c 		qib_devinfo(dd->pcidev, "%s", dd->boardversion);
dd                414 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_init(&dd->pioavail_lock);
dd                415 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_init(&dd->sendctrl_lock);
dd                416 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_init(&dd->uctxt_lock);
dd                417 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_init(&dd->qib_diag_trans_lock);
dd                418 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_init(&dd->eep_st_lock);
dd                419 drivers/infiniband/hw/qib/qib_init.c 	mutex_init(&dd->eep_lock);
dd                424 drivers/infiniband/hw/qib/qib_init.c 	ret = init_pioavailregs(dd);
dd                425 drivers/infiniband/hw/qib/qib_init.c 	init_shadow_tids(dd);
dd                427 drivers/infiniband/hw/qib/qib_init.c 	qib_get_eeprom_info(dd);
dd                430 drivers/infiniband/hw/qib/qib_init.c 	timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
dd                443 drivers/infiniband/hw/qib/qib_init.c static int init_after_reset(struct qib_devdata *dd)
dd                452 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; i < dd->num_pports; ++i) {
dd                457 drivers/infiniband/hw/qib/qib_init.c 		dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
dd                461 drivers/infiniband/hw/qib/qib_init.c 		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
dd                468 drivers/infiniband/hw/qib/qib_init.c static void enable_chip(struct qib_devdata *dd)
dd                476 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; i < dd->num_pports; ++i)
dd                477 drivers/infiniband/hw/qib/qib_init.c 		dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
dd                484 drivers/infiniband/hw/qib/qib_init.c 	rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
dd                486 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
dd                487 drivers/infiniband/hw/qib/qib_init.c 		struct qib_ctxtdata *rcd = dd->rcd[i];
dd                490 drivers/infiniband/hw/qib/qib_init.c 			dd->f_rcvctrl(rcd->ppd, rcvmask, i);
dd                496 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
dd                499 drivers/infiniband/hw/qib/qib_init.c 	if (!dd)
dd                506 drivers/infiniband/hw/qib/qib_init.c 	int_counter = qib_int_counter(dd) - dd->z_int_counter;
dd                508 drivers/infiniband/hw/qib/qib_init.c 		if (!dd->f_intr_fallback(dd))
dd                509 drivers/infiniband/hw/qib/qib_init.c 			dev_err(&dd->pcidev->dev,
dd                512 drivers/infiniband/hw/qib/qib_init.c 			mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
dd                516 drivers/infiniband/hw/qib/qib_init.c static void init_piobuf_state(struct qib_devdata *dd)
dd                529 drivers/infiniband/hw/qib/qib_init.c 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
dd                530 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd                531 drivers/infiniband/hw/qib/qib_init.c 		dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
dd                539 drivers/infiniband/hw/qib/qib_init.c 	uctxts = dd->cfgctxts - dd->first_user_ctxt;
dd                540 drivers/infiniband/hw/qib/qib_init.c 	dd->ctxts_extrabuf = dd->pbufsctxt ?
dd                541 drivers/infiniband/hw/qib/qib_init.c 		dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
dd                552 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; i < dd->pioavregs; i++) {
dd                555 drivers/infiniband/hw/qib/qib_init.c 		tmp = dd->pioavailregs_dma[i];
dd                561 drivers/infiniband/hw/qib/qib_init.c 		dd->pioavailshadow[i] = le64_to_cpu(tmp);
dd                563 drivers/infiniband/hw/qib/qib_init.c 	while (i < ARRAY_SIZE(dd->pioavailshadow))
dd                564 drivers/infiniband/hw/qib/qib_init.c 		dd->pioavailshadow[i++] = 0; /* for debugging sanity */
dd                567 drivers/infiniband/hw/qib/qib_init.c 	qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
dd                569 drivers/infiniband/hw/qib/qib_init.c 	dd->f_initvl15_bufs(dd);
dd                576 drivers/infiniband/hw/qib/qib_init.c static int qib_create_workqueues(struct qib_devdata *dd)
dd                581 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                582 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                587 drivers/infiniband/hw/qib/qib_init.c 				dd->unit, pidx);
dd                598 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                599 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                632 drivers/infiniband/hw/qib/qib_init.c int qib_init(struct qib_devdata *dd, int reinit)
dd                642 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                643 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                652 drivers/infiniband/hw/qib/qib_init.c 		ret = init_after_reset(dd);
dd                654 drivers/infiniband/hw/qib/qib_init.c 		ret = loadtime_init(dd);
dd                662 drivers/infiniband/hw/qib/qib_init.c 	ret = dd->f_late_initreg(dd);
dd                667 drivers/infiniband/hw/qib/qib_init.c 	for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
dd                674 drivers/infiniband/hw/qib/qib_init.c 		rcd = dd->rcd[i];
dd                678 drivers/infiniband/hw/qib/qib_init.c 		lastfail = qib_create_rcvhdrq(dd, rcd);
dd                682 drivers/infiniband/hw/qib/qib_init.c 			qib_dev_err(dd,
dd                686 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                691 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                699 drivers/infiniband/hw/qib/qib_init.c 					 dd->piosize4k : dd->piosize2k,
dd                700 drivers/infiniband/hw/qib/qib_init.c 					 dd->rcvegrbufsize +
dd                701 drivers/infiniband/hw/qib/qib_init.c 					 (dd->rcvhdrentsize << 2));
dd                713 drivers/infiniband/hw/qib/qib_init.c 		lastfail = dd->f_bringup_serdes(ppd);
dd                715 drivers/infiniband/hw/qib/qib_init.c 			qib_devinfo(dd->pcidev,
dd                733 drivers/infiniband/hw/qib/qib_init.c 	enable_chip(dd);
dd                735 drivers/infiniband/hw/qib/qib_init.c 	init_piobuf_state(dd);
dd                740 drivers/infiniband/hw/qib/qib_init.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                741 drivers/infiniband/hw/qib/qib_init.c 			ppd = dd->pport + pidx;
dd                750 drivers/infiniband/hw/qib/qib_init.c 			if (dd->flags & QIB_HAS_SEND_DMA)
dd                757 drivers/infiniband/hw/qib/qib_init.c 		dd->f_set_intr_state(dd, 1);
dd                763 drivers/infiniband/hw/qib/qib_init.c 		mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
dd                765 drivers/infiniband/hw/qib/qib_init.c 		mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
dd                778 drivers/infiniband/hw/qib/qib_init.c int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
dd                783 drivers/infiniband/hw/qib/qib_init.c void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
dd                796 drivers/infiniband/hw/qib/qib_init.c static void qib_stop_timers(struct qib_devdata *dd)
dd                801 drivers/infiniband/hw/qib/qib_init.c 	if (dd->stats_timer.function)
dd                802 drivers/infiniband/hw/qib/qib_init.c 		del_timer_sync(&dd->stats_timer);
dd                803 drivers/infiniband/hw/qib/qib_init.c 	if (dd->intrchk_timer.function)
dd                804 drivers/infiniband/hw/qib/qib_init.c 		del_timer_sync(&dd->intrchk_timer);
dd                805 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                806 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                827 drivers/infiniband/hw/qib/qib_init.c static void qib_shutdown_device(struct qib_devdata *dd)
dd                832 drivers/infiniband/hw/qib/qib_init.c 	if (dd->flags & QIB_SHUTDOWN)
dd                834 drivers/infiniband/hw/qib/qib_init.c 	dd->flags |= QIB_SHUTDOWN;
dd                836 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                837 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                846 drivers/infiniband/hw/qib/qib_init.c 	dd->flags &= ~QIB_INITTED;
dd                849 drivers/infiniband/hw/qib/qib_init.c 	dd->f_set_intr_state(dd, 0);
dd                851 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                852 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                853 drivers/infiniband/hw/qib/qib_init.c 		dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
dd                861 drivers/infiniband/hw/qib/qib_init.c 		dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
dd                870 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd                871 drivers/infiniband/hw/qib/qib_init.c 		ppd = dd->pport + pidx;
dd                872 drivers/infiniband/hw/qib/qib_init.c 		dd->f_setextled(ppd, 0); /* make sure LEDs are off */
dd                874 drivers/infiniband/hw/qib/qib_init.c 		if (dd->flags & QIB_HAS_SEND_DMA)
dd                877 drivers/infiniband/hw/qib/qib_init.c 		dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
dd                883 drivers/infiniband/hw/qib/qib_init.c 		dd->f_quiet_serdes(ppd);
dd                905 drivers/infiniband/hw/qib/qib_init.c void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
dd                911 drivers/infiniband/hw/qib/qib_init.c 		dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
dd                915 drivers/infiniband/hw/qib/qib_init.c 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
dd                928 drivers/infiniband/hw/qib/qib_init.c 			dma_free_coherent(&dd->pcidev->dev, size,
dd                963 drivers/infiniband/hw/qib/qib_init.c static void qib_verify_pioperf(struct qib_devdata *dd)
dd                970 drivers/infiniband/hw/qib/qib_init.c 	piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
dd                972 drivers/infiniband/hw/qib/qib_init.c 		qib_devinfo(dd->pcidev,
dd                996 drivers/infiniband/hw/qib/qib_init.c 	dd->f_set_armlaunch(dd, 0);
dd               1017 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd,
dd               1027 drivers/infiniband/hw/qib/qib_init.c 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
dd               1028 drivers/infiniband/hw/qib/qib_init.c 	qib_sendbuf_done(dd, pbnum);
dd               1029 drivers/infiniband/hw/qib/qib_init.c 	dd->f_set_armlaunch(dd, 1);
dd               1032 drivers/infiniband/hw/qib/qib_init.c void qib_free_devdata(struct qib_devdata *dd)
dd               1037 drivers/infiniband/hw/qib/qib_init.c 	__xa_erase(&qib_dev_table, dd->unit);
dd               1041 drivers/infiniband/hw/qib/qib_init.c 	qib_dbg_ibdev_exit(&dd->verbs_dev);
dd               1043 drivers/infiniband/hw/qib/qib_init.c 	free_percpu(dd->int_counter);
dd               1044 drivers/infiniband/hw/qib/qib_init.c 	rvt_dealloc_device(&dd->verbs_dev.rdi);
dd               1047 drivers/infiniband/hw/qib/qib_init.c u64 qib_int_counter(struct qib_devdata *dd)
dd               1053 drivers/infiniband/hw/qib/qib_init.c 		int_counter += *per_cpu_ptr(dd->int_counter, cpu);
dd               1060 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd;
dd               1064 drivers/infiniband/hw/qib/qib_init.c 	xa_for_each(&qib_dev_table, index, dd) {
dd               1065 drivers/infiniband/hw/qib/qib_init.c 		sps_ints += qib_int_counter(dd);
dd               1079 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd;
dd               1084 drivers/infiniband/hw/qib/qib_init.c 	dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
dd               1086 drivers/infiniband/hw/qib/qib_init.c 	if (!dd)
dd               1089 drivers/infiniband/hw/qib/qib_init.c 	ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b,
dd               1096 drivers/infiniband/hw/qib/qib_init.c 	rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
dd               1098 drivers/infiniband/hw/qib/qib_init.c 	dd->int_counter = alloc_percpu(u64);
dd               1099 drivers/infiniband/hw/qib/qib_init.c 	if (!dd->int_counter) {
dd               1115 drivers/infiniband/hw/qib/qib_init.c 	qib_dbg_ibdev_init(&dd->verbs_dev);
dd               1117 drivers/infiniband/hw/qib/qib_init.c 	return dd;
dd               1119 drivers/infiniband/hw/qib/qib_init.c 	if (!list_empty(&dd->list))
dd               1120 drivers/infiniband/hw/qib/qib_init.c 		list_del_init(&dd->list);
dd               1121 drivers/infiniband/hw/qib/qib_init.c 	rvt_dealloc_device(&dd->verbs_dev.rdi);
dd               1130 drivers/infiniband/hw/qib/qib_init.c void qib_disable_after_error(struct qib_devdata *dd)
dd               1132 drivers/infiniband/hw/qib/qib_init.c 	if (dd->flags & QIB_INITTED) {
dd               1135 drivers/infiniband/hw/qib/qib_init.c 		dd->flags &= ~QIB_INITTED;
dd               1136 drivers/infiniband/hw/qib/qib_init.c 		if (dd->pport)
dd               1137 drivers/infiniband/hw/qib/qib_init.c 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1140 drivers/infiniband/hw/qib/qib_init.c 				ppd = dd->pport + pidx;
dd               1141 drivers/infiniband/hw/qib/qib_init.c 				if (dd->flags & QIB_PRESENT) {
dd               1144 drivers/infiniband/hw/qib/qib_init.c 					dd->f_setextled(ppd, 0);
dd               1155 drivers/infiniband/hw/qib/qib_init.c 	if (dd->devstatusp)
dd               1156 drivers/infiniband/hw/qib/qib_init.c 		*dd->devstatusp |= QIB_STATUS_HWERROR;
dd               1195 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = dev_get_drvdata(device);
dd               1198 drivers/infiniband/hw/qib/qib_init.c 	return dd->f_notify_dca(dd, event);
dd               1291 drivers/infiniband/hw/qib/qib_init.c static void cleanup_device_data(struct qib_devdata *dd)
dd               1299 drivers/infiniband/hw/qib/qib_init.c 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
dd               1300 drivers/infiniband/hw/qib/qib_init.c 		if (dd->pport[pidx].statusp)
dd               1301 drivers/infiniband/hw/qib/qib_init.c 			*dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
dd               1303 drivers/infiniband/hw/qib/qib_init.c 		spin_lock(&dd->pport[pidx].cc_shadow_lock);
dd               1305 drivers/infiniband/hw/qib/qib_init.c 		kfree(dd->pport[pidx].congestion_entries);
dd               1306 drivers/infiniband/hw/qib/qib_init.c 		dd->pport[pidx].congestion_entries = NULL;
dd               1307 drivers/infiniband/hw/qib/qib_init.c 		kfree(dd->pport[pidx].ccti_entries);
dd               1308 drivers/infiniband/hw/qib/qib_init.c 		dd->pport[pidx].ccti_entries = NULL;
dd               1309 drivers/infiniband/hw/qib/qib_init.c 		kfree(dd->pport[pidx].ccti_entries_shadow);
dd               1310 drivers/infiniband/hw/qib/qib_init.c 		dd->pport[pidx].ccti_entries_shadow = NULL;
dd               1311 drivers/infiniband/hw/qib/qib_init.c 		kfree(dd->pport[pidx].congestion_entries_shadow);
dd               1312 drivers/infiniband/hw/qib/qib_init.c 		dd->pport[pidx].congestion_entries_shadow = NULL;
dd               1314 drivers/infiniband/hw/qib/qib_init.c 		spin_unlock(&dd->pport[pidx].cc_shadow_lock);
dd               1317 drivers/infiniband/hw/qib/qib_init.c 	qib_disable_wc(dd);
dd               1319 drivers/infiniband/hw/qib/qib_init.c 	if (dd->pioavailregs_dma) {
dd               1320 drivers/infiniband/hw/qib/qib_init.c 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
dd               1321 drivers/infiniband/hw/qib/qib_init.c 				  (void *) dd->pioavailregs_dma,
dd               1322 drivers/infiniband/hw/qib/qib_init.c 				  dd->pioavailregs_phys);
dd               1323 drivers/infiniband/hw/qib/qib_init.c 		dd->pioavailregs_dma = NULL;
dd               1326 drivers/infiniband/hw/qib/qib_init.c 	if (dd->pageshadow) {
dd               1327 drivers/infiniband/hw/qib/qib_init.c 		struct page **tmpp = dd->pageshadow;
dd               1328 drivers/infiniband/hw/qib/qib_init.c 		dma_addr_t *tmpd = dd->physshadow;
dd               1331 drivers/infiniband/hw/qib/qib_init.c 		for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
dd               1332 drivers/infiniband/hw/qib/qib_init.c 			int ctxt_tidbase = ctxt * dd->rcvtidcnt;
dd               1333 drivers/infiniband/hw/qib/qib_init.c 			int maxtid = ctxt_tidbase + dd->rcvtidcnt;
dd               1338 drivers/infiniband/hw/qib/qib_init.c 				pci_unmap_page(dd->pcidev, tmpd[i],
dd               1345 drivers/infiniband/hw/qib/qib_init.c 		dd->pageshadow = NULL;
dd               1347 drivers/infiniband/hw/qib/qib_init.c 		dd->physshadow = NULL;
dd               1358 drivers/infiniband/hw/qib/qib_init.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd               1359 drivers/infiniband/hw/qib/qib_init.c 	tmp = dd->rcd;
dd               1360 drivers/infiniband/hw/qib/qib_init.c 	dd->rcd = NULL;
dd               1361 drivers/infiniband/hw/qib/qib_init.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd               1362 drivers/infiniband/hw/qib/qib_init.c 	for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
dd               1366 drivers/infiniband/hw/qib/qib_init.c 		qib_free_ctxtdata(dd, rcd);
dd               1375 drivers/infiniband/hw/qib/qib_init.c static void qib_postinit_cleanup(struct qib_devdata *dd)
dd               1384 drivers/infiniband/hw/qib/qib_init.c 	if (dd->f_cleanup)
dd               1385 drivers/infiniband/hw/qib/qib_init.c 		dd->f_cleanup(dd);
dd               1387 drivers/infiniband/hw/qib/qib_init.c 	qib_pcie_ddcleanup(dd);
dd               1389 drivers/infiniband/hw/qib/qib_init.c 	cleanup_device_data(dd);
dd               1391 drivers/infiniband/hw/qib/qib_init.c 	qib_free_devdata(dd);
dd               1397 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = NULL;
dd               1410 drivers/infiniband/hw/qib/qib_init.c 		dd = qib_init_iba6120_funcs(pdev, ent);
dd               1415 drivers/infiniband/hw/qib/qib_init.c 		dd = ERR_PTR(-ENODEV);
dd               1420 drivers/infiniband/hw/qib/qib_init.c 		dd = qib_init_iba7220_funcs(pdev, ent);
dd               1424 drivers/infiniband/hw/qib/qib_init.c 		dd = qib_init_iba7322_funcs(pdev, ent);
dd               1434 drivers/infiniband/hw/qib/qib_init.c 	if (IS_ERR(dd))
dd               1435 drivers/infiniband/hw/qib/qib_init.c 		ret = PTR_ERR(dd);
dd               1439 drivers/infiniband/hw/qib/qib_init.c 	ret = qib_create_workqueues(dd);
dd               1444 drivers/infiniband/hw/qib/qib_init.c 	initfail = qib_init(dd, 0);
dd               1446 drivers/infiniband/hw/qib/qib_init.c 	ret = qib_register_ib_device(dd);
dd               1455 drivers/infiniband/hw/qib/qib_init.c 		dd->flags |= QIB_INITTED;
dd               1457 drivers/infiniband/hw/qib/qib_init.c 	j = qib_device_create(dd);
dd               1459 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
dd               1460 drivers/infiniband/hw/qib/qib_init.c 	j = qibfs_add(dd);
dd               1462 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
dd               1466 drivers/infiniband/hw/qib/qib_init.c 		qib_stop_timers(dd);
dd               1468 drivers/infiniband/hw/qib/qib_init.c 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd               1469 drivers/infiniband/hw/qib/qib_init.c 			dd->f_quiet_serdes(dd->pport + pidx);
dd               1473 drivers/infiniband/hw/qib/qib_init.c 			(void) qibfs_remove(dd);
dd               1474 drivers/infiniband/hw/qib/qib_init.c 			qib_device_remove(dd);
dd               1477 drivers/infiniband/hw/qib/qib_init.c 			qib_unregister_ib_device(dd);
dd               1478 drivers/infiniband/hw/qib/qib_init.c 		qib_postinit_cleanup(dd);
dd               1484 drivers/infiniband/hw/qib/qib_init.c 	ret = qib_enable_wc(dd);
dd               1486 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd,
dd               1492 drivers/infiniband/hw/qib/qib_init.c 	qib_verify_pioperf(dd);
dd               1499 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = pci_get_drvdata(pdev);
dd               1503 drivers/infiniband/hw/qib/qib_init.c 	qib_unregister_ib_device(dd);
dd               1510 drivers/infiniband/hw/qib/qib_init.c 		qib_shutdown_device(dd);
dd               1512 drivers/infiniband/hw/qib/qib_init.c 	qib_stop_timers(dd);
dd               1517 drivers/infiniband/hw/qib/qib_init.c 	ret = qibfs_remove(dd);
dd               1519 drivers/infiniband/hw/qib/qib_init.c 		qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
dd               1522 drivers/infiniband/hw/qib/qib_init.c 	qib_device_remove(dd);
dd               1524 drivers/infiniband/hw/qib/qib_init.c 	qib_postinit_cleanup(dd);
dd               1529 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = pci_get_drvdata(pdev);
dd               1531 drivers/infiniband/hw/qib/qib_init.c 	qib_shutdown_device(dd);
dd               1543 drivers/infiniband/hw/qib/qib_init.c int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
dd               1552 drivers/infiniband/hw/qib/qib_init.c 		amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
dd               1554 drivers/infiniband/hw/qib/qib_init.c 		gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
dd               1557 drivers/infiniband/hw/qib/qib_init.c 		old_node_id = dev_to_node(&dd->pcidev->dev);
dd               1558 drivers/infiniband/hw/qib/qib_init.c 		set_dev_node(&dd->pcidev->dev, rcd->node_id);
dd               1560 drivers/infiniband/hw/qib/qib_init.c 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
dd               1562 drivers/infiniband/hw/qib/qib_init.c 		set_dev_node(&dd->pcidev->dev, old_node_id);
dd               1565 drivers/infiniband/hw/qib/qib_init.c 			qib_dev_err(dd,
dd               1571 drivers/infiniband/hw/qib/qib_init.c 		if (rcd->ctxt >= dd->first_user_ctxt) {
dd               1577 drivers/infiniband/hw/qib/qib_init.c 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
dd               1578 drivers/infiniband/hw/qib/qib_init.c 			set_dev_node(&dd->pcidev->dev, rcd->node_id);
dd               1580 drivers/infiniband/hw/qib/qib_init.c 				&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
dd               1582 drivers/infiniband/hw/qib/qib_init.c 			set_dev_node(&dd->pcidev->dev, old_node_id);
dd               1598 drivers/infiniband/hw/qib/qib_init.c 	qib_dev_err(dd,
dd               1604 drivers/infiniband/hw/qib/qib_init.c 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
dd               1622 drivers/infiniband/hw/qib/qib_init.c 	struct qib_devdata *dd = rcd->dd;
dd               1638 drivers/infiniband/hw/qib/qib_init.c 	egrsize = dd->rcvegrbufsize;
dd               1662 drivers/infiniband/hw/qib/qib_init.c 		old_node_id = dev_to_node(&dd->pcidev->dev);
dd               1663 drivers/infiniband/hw/qib/qib_init.c 		set_dev_node(&dd->pcidev->dev, rcd->node_id);
dd               1665 drivers/infiniband/hw/qib/qib_init.c 			dma_alloc_coherent(&dd->pcidev->dev, size,
dd               1668 drivers/infiniband/hw/qib/qib_init.c 		set_dev_node(&dd->pcidev->dev, old_node_id);
dd               1683 drivers/infiniband/hw/qib/qib_init.c 			dd->f_put_tid(dd, e + egroff +
dd               1686 drivers/infiniband/hw/qib/qib_init.c 					   dd->kregbase +
dd               1687 drivers/infiniband/hw/qib/qib_init.c 					   dd->rcvegrbase),
dd               1698 drivers/infiniband/hw/qib/qib_init.c 		dma_free_coherent(&dd->pcidev->dev, size,
dd               1715 drivers/infiniband/hw/qib/qib_init.c int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
dd               1721 drivers/infiniband/hw/qib/qib_init.c 	u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
dd               1722 drivers/infiniband/hw/qib/qib_init.c 	u64 qib_pio4koffset = dd->piobufbase >> 32;
dd               1723 drivers/infiniband/hw/qib/qib_init.c 	u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
dd               1724 drivers/infiniband/hw/qib/qib_init.c 	u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
dd               1725 drivers/infiniband/hw/qib/qib_init.c 	u64 qib_physaddr = dd->physaddr;
dd               1734 drivers/infiniband/hw/qib/qib_init.c 	iounmap(dd->kregbase);
dd               1735 drivers/infiniband/hw/qib/qib_init.c 	dd->kregbase = NULL;
dd               1746 drivers/infiniband/hw/qib/qib_init.c 	if (dd->piobcnt4k == 0) {
dd               1758 drivers/infiniband/hw/qib/qib_init.c 	if (dd->uregbase > qib_kreglen)
dd               1759 drivers/infiniband/hw/qib/qib_init.c 		qib_userlen = dd->ureg_align * dd->cfgctxts;
dd               1771 drivers/infiniband/hw/qib/qib_init.c 		qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
dd               1777 drivers/infiniband/hw/qib/qib_init.c 	dd->kregbase = qib_kregbase;
dd               1778 drivers/infiniband/hw/qib/qib_init.c 	dd->kregend = (u64 __iomem *)
dd               1780 drivers/infiniband/hw/qib/qib_init.c 	dd->piobase = qib_piobase;
dd               1781 drivers/infiniband/hw/qib/qib_init.c 	dd->pio2kbase = (void __iomem *)
dd               1782 drivers/infiniband/hw/qib/qib_init.c 		(((char __iomem *) dd->piobase) +
dd               1784 drivers/infiniband/hw/qib/qib_init.c 	if (dd->piobcnt4k)
dd               1785 drivers/infiniband/hw/qib/qib_init.c 		dd->pio4kbase = (void __iomem *)
dd               1786 drivers/infiniband/hw/qib/qib_init.c 			(((char __iomem *) dd->piobase) +
dd               1790 drivers/infiniband/hw/qib/qib_init.c 		dd->userbase = qib_userbase;
dd                 75 drivers/infiniband/hw/qib/qib_intr.c 	struct qib_devdata *dd = ppd->dd;
dd                 77 drivers/infiniband/hw/qib/qib_intr.c 	event.device = &dd->verbs_dev.rdi.ibdev;
dd                 85 drivers/infiniband/hw/qib/qib_intr.c 	struct qib_devdata *dd = ppd->dd;
dd                 91 drivers/infiniband/hw/qib/qib_intr.c 	lstate = dd->f_iblink_state(ibcs); /* linkstate */
dd                 92 drivers/infiniband/hw/qib/qib_intr.c 	ltstate = dd->f_ibphys_portstate(ibcs);
dd                107 drivers/infiniband/hw/qib/qib_intr.c 		if (dd->f_ib_updown(ppd, 1, ibcs))
dd                113 drivers/infiniband/hw/qib/qib_intr.c 		    dd->f_ib_updown(ppd, 0, ibcs))
dd                150 drivers/infiniband/hw/qib/qib_intr.c 			if (dd->flags & QIB_HAS_SEND_DMA)
dd                154 drivers/infiniband/hw/qib/qib_intr.c 			dd->f_setextled(ppd, 1);
dd                181 drivers/infiniband/hw/qib/qib_intr.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
dd                189 drivers/infiniband/hw/qib/qib_intr.c void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
dd                195 drivers/infiniband/hw/qib/qib_intr.c 	spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                196 drivers/infiniband/hw/qib/qib_intr.c 	for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
dd                199 drivers/infiniband/hw/qib/qib_intr.c 		rcd = dd->rcd[i];
dd                205 drivers/infiniband/hw/qib/qib_intr.c 			dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
dd                213 drivers/infiniband/hw/qib/qib_intr.c 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                216 drivers/infiniband/hw/qib/qib_intr.c void qib_bad_intrstatus(struct qib_devdata *dd)
dd                226 drivers/infiniband/hw/qib/qib_intr.c 	qib_dev_err(dd,
dd                231 drivers/infiniband/hw/qib/qib_intr.c 			dd->f_set_intr_state(dd, 0);
dd                233 drivers/infiniband/hw/qib/qib_intr.c 			qib_dev_err(dd,
dd                235 drivers/infiniband/hw/qib/qib_intr.c 			dd->flags |= QIB_BADINTR;
dd                236 drivers/infiniband/hw/qib/qib_intr.c 			dd->flags &= ~QIB_INITTED;
dd                237 drivers/infiniband/hw/qib/qib_intr.c 			dd->f_free_irq(dd);
dd                209 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_dev(ibdev);
dd                210 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
dd                282 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd                287 drivers/infiniband/hw/qib/qib_mad.c 	if (smp->attr_mod || pidx >= dd->num_pports ||
dd                288 drivers/infiniband/hw/qib/qib_mad.c 	    dd->pport[pidx].guid == 0)
dd                291 drivers/infiniband/hw/qib/qib_mad.c 		nip->port_guid = dd->pport[pidx].guid;
dd                299 drivers/infiniband/hw/qib/qib_mad.c 	nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
dd                300 drivers/infiniband/hw/qib/qib_mad.c 	nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
dd                301 drivers/infiniband/hw/qib/qib_mad.c 	nip->device_id = cpu_to_be16(dd->deviceid);
dd                302 drivers/infiniband/hw/qib/qib_mad.c 	majrev = dd->majrev;
dd                303 drivers/infiniband/hw/qib/qib_mad.c 	minrev = dd->minrev;
dd                316 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd                325 drivers/infiniband/hw/qib/qib_mad.c 	if (startgx == 0 && pidx < dd->num_pports) {
dd                326 drivers/infiniband/hw/qib/qib_mad.c 		struct qib_pportdata *ppd = dd->pport + pidx;
dd                348 drivers/infiniband/hw/qib/qib_mad.c 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
dd                353 drivers/infiniband/hw/qib/qib_mad.c 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
dd                358 drivers/infiniband/hw/qib/qib_mad.c 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
dd                370 drivers/infiniband/hw/qib/qib_mad.c 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
dd                377 drivers/infiniband/hw/qib/qib_mad.c 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
dd                389 drivers/infiniband/hw/qib/qib_mad.c 	(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
dd                402 drivers/infiniband/hw/qib/qib_mad.c 	return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
dd                457 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd;
dd                484 drivers/infiniband/hw/qib/qib_mad.c 	dd = dd_from_ibdev(ibdev);
dd                486 drivers/infiniband/hw/qib/qib_mad.c 	ppd = dd->pport + (port_num - 1);
dd                507 drivers/infiniband/hw/qib/qib_mad.c 	state = dd->f_iblink_state(ppd->lastibcstat);
dd                511 drivers/infiniband/hw/qib/qib_mad.c 		(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
dd                538 drivers/infiniband/hw/qib/qib_mad.c 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
dd                540 drivers/infiniband/hw/qib/qib_mad.c 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
dd                546 drivers/infiniband/hw/qib/qib_mad.c 		dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
dd                563 drivers/infiniband/hw/qib/qib_mad.c 		v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
dd                581 drivers/infiniband/hw/qib/qib_mad.c static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
dd                583 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_pportdata *ppd = dd->pport + port - 1;
dd                589 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
dd                607 drivers/infiniband/hw/qib/qib_mad.c 		struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd                608 drivers/infiniband/hw/qib/qib_mad.c 		unsigned i, n = qib_get_npkeys(dd);
dd                610 drivers/infiniband/hw/qib/qib_mad.c 		get_pkeys(dd, port, p);
dd                623 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd                630 drivers/infiniband/hw/qib/qib_mad.c 	if (startgx == 0 && pidx < dd->num_pports) {
dd                631 drivers/infiniband/hw/qib/qib_mad.c 		struct qib_pportdata *ppd = dd->pport + pidx;
dd                658 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd;
dd                683 drivers/infiniband/hw/qib/qib_mad.c 	dd = dd_from_ibdev(ibdev);
dd                685 drivers/infiniband/hw/qib/qib_mad.c 	ppd = dd->pport + (port_num - 1);
dd                763 drivers/infiniband/hw/qib/qib_mad.c 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
dd                767 drivers/infiniband/hw/qib/qib_mad.c 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
dd                776 drivers/infiniband/hw/qib/qib_mad.c 	(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
dd                791 drivers/infiniband/hw/qib/qib_mad.c 			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
dd                988 drivers/infiniband/hw/qib/qib_mad.c static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
dd               1001 drivers/infiniband/hw/qib/qib_mad.c 	ppd = dd->pport + (port - 1);
dd               1002 drivers/infiniband/hw/qib/qib_mad.c 	rcd = dd->rcd[ppd->hw_pidx];
dd               1029 drivers/infiniband/hw/qib/qib_mad.c 		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
dd               1032 drivers/infiniband/hw/qib/qib_mad.c 		event.device = &dd->verbs_dev.rdi.ibdev;
dd               1045 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd               1046 drivers/infiniband/hw/qib/qib_mad.c 	unsigned i, n = qib_get_npkeys(dd);
dd               1051 drivers/infiniband/hw/qib/qib_mad.c 	if (startpx != 0 || set_pkeys(dd, port, q) != 0)
dd               1108 drivers/infiniband/hw/qib/qib_mad.c 		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
dd               1111 drivers/infiniband/hw/qib/qib_mad.c 		(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
dd               1128 drivers/infiniband/hw/qib/qib_mad.c 		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
dd               1131 drivers/infiniband/hw/qib/qib_mad.c 		(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
dd               1156 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd               1172 drivers/infiniband/hw/qib/qib_mad.c 			    dd->psxmitwait_supported <<
dd               1188 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd               1202 drivers/infiniband/hw/qib/qib_mad.c 	p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
dd               1203 drivers/infiniband/hw/qib/qib_mad.c 	p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
dd               1226 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd               1244 drivers/infiniband/hw/qib/qib_mad.c 	status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
dd               1256 drivers/infiniband/hw/qib/qib_mad.c 		dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
dd               1274 drivers/infiniband/hw/qib/qib_mad.c 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
dd               1277 drivers/infiniband/hw/qib/qib_mad.c 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
dd               1280 drivers/infiniband/hw/qib/qib_mad.c 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
dd               1283 drivers/infiniband/hw/qib/qib_mad.c 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
dd               1286 drivers/infiniband/hw/qib/qib_mad.c 		ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
dd               1355 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd               1368 drivers/infiniband/hw/qib/qib_mad.c 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
dd               1374 drivers/infiniband/hw/qib/qib_mad.c 			dd->f_set_cntr_sample(ppd,
dd               1394 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd               1408 drivers/infiniband/hw/qib/qib_mad.c 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
dd               1416 drivers/infiniband/hw/qib/qib_mad.c 			dd->f_set_cntr_sample(ppd,
dd               1537 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ppd(ppd);
dd               1546 drivers/infiniband/hw/qib/qib_mad.c 	if (!dd->psxmitwait_supported)
dd               1585 drivers/infiniband/hw/qib/qib_mad.c 			    (dd->psxmitwait_check_rate &
dd               1773 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ppd(ppd);
dd               1786 drivers/infiniband/hw/qib/qib_mad.c 		dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
dd               1996 drivers/infiniband/hw/qib/qib_mad.c 			ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
dd               2452 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = dd_from_ppd(ppd);
dd               2458 drivers/infiniband/hw/qib/qib_mad.c 		status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
dd               2467 drivers/infiniband/hw/qib/qib_mad.c 	dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
dd               2476 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = container_of(ibdev,
dd               2480 drivers/infiniband/hw/qib/qib_mad.c 	dd->pport[port_idx].cong_stats.counter = 0;
dd               2481 drivers/infiniband/hw/qib/qib_mad.c 	timer_setup(&dd->pport[port_idx].cong_stats.timer,
dd               2483 drivers/infiniband/hw/qib/qib_mad.c 	dd->pport[port_idx].cong_stats.timer.expires = 0;
dd               2484 drivers/infiniband/hw/qib/qib_mad.c 	add_timer(&dd->pport[port_idx].cong_stats.timer);
dd               2490 drivers/infiniband/hw/qib/qib_mad.c 	struct qib_devdata *dd = container_of(ibdev,
dd               2493 drivers/infiniband/hw/qib/qib_mad.c 	if (dd->pport[port_idx].cong_stats.timer.function)
dd               2494 drivers/infiniband/hw/qib/qib_mad.c 		del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
dd               2496 drivers/infiniband/hw/qib/qib_mad.c 	if (dd->pport[port_idx].ibport_data.smi_ah)
dd               2497 drivers/infiniband/hw/qib/qib_mad.c 		rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
dd                136 drivers/infiniband/hw/qib/qib_pcie.c int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
dd                142 drivers/infiniband/hw/qib/qib_pcie.c 	dd->pcidev = pdev;
dd                143 drivers/infiniband/hw/qib/qib_pcie.c 	pci_set_drvdata(pdev, dd);
dd                148 drivers/infiniband/hw/qib/qib_pcie.c 	dd->kregbase = ioremap_nocache(addr, len);
dd                149 drivers/infiniband/hw/qib/qib_pcie.c 	if (!dd->kregbase)
dd                152 drivers/infiniband/hw/qib/qib_pcie.c 	dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
dd                153 drivers/infiniband/hw/qib/qib_pcie.c 	dd->physaddr = addr;        /* used for io_remap, etc. */
dd                159 drivers/infiniband/hw/qib/qib_pcie.c 	dd->pcibar0 = addr;
dd                160 drivers/infiniband/hw/qib/qib_pcie.c 	dd->pcibar1 = addr >> 32;
dd                161 drivers/infiniband/hw/qib/qib_pcie.c 	dd->deviceid = ent->device; /* save for later use */
dd                162 drivers/infiniband/hw/qib/qib_pcie.c 	dd->vendorid = ent->vendor;
dd                172 drivers/infiniband/hw/qib/qib_pcie.c void qib_pcie_ddcleanup(struct qib_devdata *dd)
dd                174 drivers/infiniband/hw/qib/qib_pcie.c 	u64 __iomem *base = (void __iomem *) dd->kregbase;
dd                176 drivers/infiniband/hw/qib/qib_pcie.c 	dd->kregbase = NULL;
dd                178 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd->piobase)
dd                179 drivers/infiniband/hw/qib/qib_pcie.c 		iounmap(dd->piobase);
dd                180 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd->userbase)
dd                181 drivers/infiniband/hw/qib/qib_pcie.c 		iounmap(dd->userbase);
dd                182 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd->piovl15base)
dd                183 drivers/infiniband/hw/qib/qib_pcie.c 		iounmap(dd->piovl15base);
dd                185 drivers/infiniband/hw/qib/qib_pcie.c 	pci_disable_device(dd->pcidev);
dd                186 drivers/infiniband/hw/qib/qib_pcie.c 	pci_release_regions(dd->pcidev);
dd                188 drivers/infiniband/hw/qib/qib_pcie.c 	pci_set_drvdata(dd->pcidev, NULL);
dd                196 drivers/infiniband/hw/qib/qib_pcie.c static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
dd                198 drivers/infiniband/hw/qib/qib_pcie.c 	struct pci_dev *pdev = dd->pcidev;
dd                201 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo);
dd                202 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi);
dd                208 drivers/infiniband/hw/qib/qib_pcie.c 			     &dd->msi_data);
dd                211 drivers/infiniband/hw/qib/qib_pcie.c int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
dd                218 drivers/infiniband/hw/qib/qib_pcie.c 	if (!pci_is_pcie(dd->pcidev)) {
dd                219 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd, "Can't find PCI Express capability!\n");
dd                221 drivers/infiniband/hw/qib/qib_pcie.c 		dd->lbus_width = 1;
dd                222 drivers/infiniband/hw/qib/qib_pcie.c 		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
dd                227 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd->flags & QIB_HAS_INTX)
dd                230 drivers/infiniband/hw/qib/qib_pcie.c 	nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
dd                240 drivers/infiniband/hw/qib/qib_pcie.c 		*nent = !dd->pcidev->msix_enabled ? 0 : nvec;
dd                242 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd->pcidev->msi_enabled)
dd                243 drivers/infiniband/hw/qib/qib_pcie.c 		qib_cache_msi_info(dd, dd->pcidev->msi_cap);
dd                245 drivers/infiniband/hw/qib/qib_pcie.c 	pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
dd                253 drivers/infiniband/hw/qib/qib_pcie.c 	dd->lbus_width = linkstat;
dd                257 drivers/infiniband/hw/qib/qib_pcie.c 		dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
dd                260 drivers/infiniband/hw/qib/qib_pcie.c 		dd->lbus_speed = 5000; /* Gen1, 5GHz */
dd                263 drivers/infiniband/hw/qib/qib_pcie.c 		dd->lbus_speed = 2500;
dd                272 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd,
dd                276 drivers/infiniband/hw/qib/qib_pcie.c 	qib_tune_pcie_caps(dd);
dd                278 drivers/infiniband/hw/qib/qib_pcie.c 	qib_tune_pcie_coalesce(dd);
dd                282 drivers/infiniband/hw/qib/qib_pcie.c 	snprintf(dd->lbus_info, sizeof(dd->lbus_info),
dd                283 drivers/infiniband/hw/qib/qib_pcie.c 		 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
dd                295 drivers/infiniband/hw/qib/qib_pcie.c void qib_free_irq(struct qib_devdata *dd)
dd                297 drivers/infiniband/hw/qib/qib_pcie.c 	pci_free_irq(dd->pcidev, 0, dd);
dd                298 drivers/infiniband/hw/qib/qib_pcie.c 	pci_free_irq_vectors(dd->pcidev);
dd                309 drivers/infiniband/hw/qib/qib_pcie.c int qib_reinit_intr(struct qib_devdata *dd)
dd                316 drivers/infiniband/hw/qib/qib_pcie.c 	if (!dd->msi_lo)
dd                319 drivers/infiniband/hw/qib/qib_pcie.c 	pos = dd->pcidev->msi_cap;
dd                321 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd,
dd                327 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
dd                328 drivers/infiniband/hw/qib/qib_pcie.c 			       dd->msi_lo);
dd                329 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
dd                330 drivers/infiniband/hw/qib/qib_pcie.c 			       dd->msi_hi);
dd                331 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
dd                334 drivers/infiniband/hw/qib/qib_pcie.c 		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
dd                338 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_word(dd->pcidev, pos +
dd                340 drivers/infiniband/hw/qib/qib_pcie.c 			      dd->msi_data);
dd                343 drivers/infiniband/hw/qib/qib_pcie.c 	qib_free_irq(dd);
dd                345 drivers/infiniband/hw/qib/qib_pcie.c 	if (!ret && (dd->flags & QIB_HAS_INTX))
dd                349 drivers/infiniband/hw/qib/qib_pcie.c 	pci_set_master(dd->pcidev);
dd                358 drivers/infiniband/hw/qib/qib_pcie.c void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
dd                360 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
dd                361 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
dd                362 drivers/infiniband/hw/qib/qib_pcie.c 	pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
dd                365 drivers/infiniband/hw/qib/qib_pcie.c void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
dd                369 drivers/infiniband/hw/qib/qib_pcie.c 	r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd                370 drivers/infiniband/hw/qib/qib_pcie.c 				   dd->pcibar0);
dd                372 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
dd                373 drivers/infiniband/hw/qib/qib_pcie.c 	r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
dd                374 drivers/infiniband/hw/qib/qib_pcie.c 				   dd->pcibar1);
dd                376 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
dd                378 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
dd                379 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
dd                380 drivers/infiniband/hw/qib/qib_pcie.c 	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
dd                381 drivers/infiniband/hw/qib/qib_pcie.c 	r = pci_enable_device(dd->pcidev);
dd                383 drivers/infiniband/hw/qib/qib_pcie.c 		qib_dev_err(dd,
dd                398 drivers/infiniband/hw/qib/qib_pcie.c static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
dd                408 drivers/infiniband/hw/qib/qib_pcie.c 	parent = dd->pcidev->bus->self;
dd                410 drivers/infiniband/hw/qib/qib_pcie.c 		qib_devinfo(dd->pcidev, "Parent not root\n");
dd                465 drivers/infiniband/hw/qib/qib_pcie.c static void qib_tune_pcie_caps(struct qib_devdata *dd)
dd                472 drivers/infiniband/hw/qib/qib_pcie.c 	parent = dd->pcidev->bus->self;
dd                474 drivers/infiniband/hw/qib/qib_pcie.c 		qib_devinfo(dd->pcidev, "Parent not root\n");
dd                478 drivers/infiniband/hw/qib/qib_pcie.c 	if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
dd                484 drivers/infiniband/hw/qib/qib_pcie.c 	ep_mpss = dd->pcidev->pcie_mpss;
dd                485 drivers/infiniband/hw/qib/qib_pcie.c 	ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
dd                502 drivers/infiniband/hw/qib/qib_pcie.c 		pcie_set_mps(dd->pcidev, 128 << ep_mps);
dd                516 drivers/infiniband/hw/qib/qib_pcie.c 	ep_mrrs = pcie_get_readrq(dd->pcidev);
dd                524 drivers/infiniband/hw/qib/qib_pcie.c 		pcie_set_readrq(dd->pcidev, ep_mrrs);
dd                536 drivers/infiniband/hw/qib/qib_pcie.c 	struct qib_devdata *dd = pci_get_drvdata(pdev);
dd                552 drivers/infiniband/hw/qib/qib_pcie.c 		if (dd) {
dd                554 drivers/infiniband/hw/qib/qib_pcie.c 			dd->flags &= ~QIB_PRESENT;
dd                555 drivers/infiniband/hw/qib/qib_pcie.c 			qib_disable_after_error(dd);
dd                573 drivers/infiniband/hw/qib/qib_pcie.c 	struct qib_devdata *dd = pci_get_drvdata(pdev);
dd                576 drivers/infiniband/hw/qib/qib_pcie.c 	if (dd && dd->pport) {
dd                577 drivers/infiniband/hw/qib/qib_pcie.c 		words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
dd                597 drivers/infiniband/hw/qib/qib_pcie.c 	struct qib_devdata *dd = pci_get_drvdata(pdev);
dd                605 drivers/infiniband/hw/qib/qib_pcie.c 	qib_init(dd, 1); /* same as re-init after reset */
dd                134 drivers/infiniband/hw/qib/qib_qp.c 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
dd                136 drivers/infiniband/hw/qib/qib_qp.c 	u16 qpt_mask = dd->qpn_mask;
dd                155 drivers/infiniband/hw/qib/qib_qp.c 	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
dd                173 drivers/infiniband/hw/qib/qib_qp.c 				dd->n_krcv_queues, qpt_mask);
dd                216 drivers/infiniband/hw/qib/qib_qp.c 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
dd                220 drivers/infiniband/hw/qib/qib_qp.c 	for (n = 0; n < dd->num_pports; n++) {
dd                221 drivers/infiniband/hw/qib/qib_qp.c 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
dd                296 drivers/infiniband/hw/qib/qib_qp.c 	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
dd                302 drivers/infiniband/hw/qib/qib_qp.c 	if (mtu > dd->pport[pidx].ibmtu)
dd                303 drivers/infiniband/hw/qib/qib_qp.c 		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
dd                 49 drivers/infiniband/hw/qib/qib_qsfp.c 	struct qib_devdata *dd = ppd->dd;
dd                 55 drivers/infiniband/hw/qib/qib_qsfp.c 	ret = mutex_lock_interruptible(&dd->eep_lock);
dd                 59 drivers/infiniband/hw/qib/qib_qsfp.c 	if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
dd                 78 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, out, mask, mask);
dd                 87 drivers/infiniband/hw/qib/qib_qsfp.c 	ret = qib_twsi_reset(dd);
dd                 89 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(dd, ppd->port,
dd                106 drivers/infiniband/hw/qib/qib_qsfp.c 		ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
dd                128 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, mask, mask, mask);
dd                136 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
dd                139 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
dd                141 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
dd                146 drivers/infiniband/hw/qib/qib_qsfp.c 	mutex_unlock(&dd->eep_lock);
dd                160 drivers/infiniband/hw/qib/qib_qsfp.c 	struct qib_devdata *dd = ppd->dd;
dd                165 drivers/infiniband/hw/qib/qib_qsfp.c 	ret = mutex_lock_interruptible(&dd->eep_lock);
dd                169 drivers/infiniband/hw/qib/qib_qsfp.c 	if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
dd                187 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, out, mask, mask);
dd                196 drivers/infiniband/hw/qib/qib_qsfp.c 	ret = qib_twsi_reset(dd);
dd                198 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(dd, ppd->port,
dd                214 drivers/infiniband/hw/qib/qib_qsfp.c 		ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
dd                233 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, mask, mask, mask);
dd                242 drivers/infiniband/hw/qib/qib_qsfp.c 	mutex_unlock(&dd->eep_lock);
dd                292 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                305 drivers/infiniband/hw/qib/qib_qsfp.c 			qib_dev_porterr(ppd->dd, ppd->port,
dd                315 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                384 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                422 drivers/infiniband/hw/qib/qib_qsfp.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                451 drivers/infiniband/hw/qib/qib_qsfp.c 	ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
dd                466 drivers/infiniband/hw/qib/qib_qsfp.c 	struct qib_devdata *dd = qd->ppd->dd;
dd                482 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, highs, mask, mask);
dd                485 drivers/infiniband/hw/qib/qib_qsfp.c 	dd->f_gpio_mod(dd, mask, mask, mask);
dd                599 drivers/infiniband/hw/qib/qib_rc.c 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                662 drivers/infiniband/hw/qib/qib_rc.c 	control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
dd                667 drivers/infiniband/hw/qib/qib_rc.c 	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
dd                687 drivers/infiniband/hw/qib/qib_rc.c 	if (dd->flags & QIB_PIO_FLUSH_WC) {
dd                697 drivers/infiniband/hw/qib/qib_rc.c 	if (dd->flags & QIB_USE_SPCL_TRIG) {
dd                698 drivers/infiniband/hw/qib/qib_rc.c 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
dd                705 drivers/infiniband/hw/qib/qib_rc.c 	qib_sendbuf_done(dd, pbufn);
dd                 98 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
dd                100 drivers/infiniband/hw/qib/qib_sd7220.c static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
dd                102 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_trimdone_poll(struct qib_devdata *dd);
dd                103 drivers/infiniband/hw/qib/qib_sd7220.c static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
dd                104 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_setvals(struct qib_devdata *dd);
dd                105 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_early(struct qib_devdata *dd);
dd                106 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_dactrim(struct qib_devdata *dd);
dd                107 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_internal_presets(struct qib_devdata *dd);
dd                109 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_trimself(struct qib_devdata *dd, int val);
dd                110 drivers/infiniband/hw/qib/qib_sd7220.c static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
dd                111 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_ib_load(struct qib_devdata *dd,
dd                113 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
dd                126 drivers/infiniband/hw/qib/qib_sd7220.c 	struct qib_devdata *dd = ppd->dd;
dd                128 drivers/infiniband/hw/qib/qib_sd7220.c 	if (!dd->cspec->serdes_first_init_done &&
dd                129 drivers/infiniband/hw/qib/qib_sd7220.c 	    qib_sd7220_ib_vfy(dd, fw) > 0)
dd                130 drivers/infiniband/hw/qib/qib_sd7220.c 		dd->cspec->serdes_first_init_done = 1;
dd                131 drivers/infiniband/hw/qib/qib_sd7220.c 	return dd->cspec->serdes_first_init_done;
dd                143 drivers/infiniband/hw/qib/qib_sd7220.c void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
dd                148 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
dd                151 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
dd                154 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
dd                157 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_read_kreg32(dd, kr_scratch);
dd                159 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_write_kreg(dd, kr_hwerrclear,
dd                161 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_read_kreg32(dd, kr_scratch);
dd                175 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_resync_ibepb(struct qib_devdata *dd)
dd                184 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
dd                186 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed read in resync\n");
dd                190 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "unexpected pattern in resync\n");
dd                192 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
dd                194 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed write in resync\n");
dd                197 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
dd                199 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed re-read in resync\n");
dd                203 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed compare1 in resync\n");
dd                207 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
dd                209 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
dd                213 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
dd                227 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
dd                233 drivers/infiniband/hw/qib/qib_sd7220.c 	rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
dd                239 drivers/infiniband/hw/qib/qib_sd7220.c 		spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
dd                240 drivers/infiniband/hw/qib/qib_sd7220.c 		epb_access(dd, IB_7220_SERDES, 1);
dd                243 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, kr_hwerrmask,
dd                244 drivers/infiniband/hw/qib/qib_sd7220.c 			       dd->cspec->hwerrmask &
dd                246 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
dd                248 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_read_kreg32(dd, kr_scratch);
dd                251 drivers/infiniband/hw/qib/qib_sd7220.c 		epb_access(dd, IB_7220_SERDES, -1);
dd                252 drivers/infiniband/hw/qib/qib_sd7220.c 		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
dd                264 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, kr_hwerrmask,
dd                265 drivers/infiniband/hw/qib/qib_sd7220.c 			       dd->cspec->hwerrmask &
dd                268 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_resync_ibepb(dd);
dd                270 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "unable to re-sync IB EPB\n");
dd                273 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
dd                277 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
dd                280 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed to set WDOG disable\n");
dd                283 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
dd                285 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_read_kreg32(dd, kr_scratch);
dd                288 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_sd7220_clr_ibpar(dd);
dd                289 drivers/infiniband/hw/qib/qib_sd7220.c 		val = qib_read_kreg64(dd, kr_hwerrstatus);
dd                291 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "IBUC Parity still set after RST\n");
dd                292 drivers/infiniband/hw/qib/qib_sd7220.c 			dd->cspec->hwerrmask &=
dd                295 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, kr_hwerrmask,
dd                296 drivers/infiniband/hw/qib/qib_sd7220.c 			dd->cspec->hwerrmask);
dd                303 drivers/infiniband/hw/qib/qib_sd7220.c static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
dd                315 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_resync_ibepb(dd);
dd                317 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
dd                320 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
dd                322 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
dd                325 drivers/infiniband/hw/qib/qib_sd7220.c 	val = qib_read_kreg64(dd, kr_ibcstatus);
dd                327 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
dd                334 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
dd                336 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
dd                343 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd                346 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd,
dd                354 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd,
dd                357 drivers/infiniband/hw/qib/qib_sd7220.c 			probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd                359 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "probe is %d (%02X)\n",
dd                361 drivers/infiniband/hw/qib/qib_sd7220.c 			probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd                363 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "re-read: %d (%02X)\n",
dd                365 drivers/infiniband/hw/qib/qib_sd7220.c 			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd                368 drivers/infiniband/hw/qib/qib_sd7220.c 				qib_dev_err(dd,
dd                375 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd,
dd                378 drivers/infiniband/hw/qib/qib_sd7220.c 			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd                381 drivers/infiniband/hw/qib/qib_sd7220.c 				qib_dev_err(dd,
dd                394 drivers/infiniband/hw/qib/qib_sd7220.c int qib_sd7220_init(struct qib_devdata *dd)
dd                401 drivers/infiniband/hw/qib/qib_sd7220.c 	was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
dd                404 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_ibsd_reset(dd, 1);
dd                405 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_sd_trimdone_monitor(dd, "Driver-reload");
dd                408 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
dd                410 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to load IB SERDES image\n");
dd                415 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_ibsd_ucode_loaded(dd->pport, fw);
dd                424 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd_early(dd);
dd                426 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
dd                435 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd_dactrim(dd);
dd                437 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
dd                447 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_internal_presets(dd);
dd                449 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to set IB SERDES presets\n");
dd                452 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_sd_trimself(dd, 0x80);
dd                454 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
dd                464 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_ib_load(dd, fw);
dd                466 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed to load IB SERDES image\n");
dd                470 drivers/infiniband/hw/qib/qib_sd7220.c 			vfy = qib_sd7220_ib_vfy(dd, fw);
dd                472 drivers/infiniband/hw/qib/qib_sd7220.c 				qib_dev_err(dd, "SERDES PRAM VFY failed\n");
dd                490 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
dd                492 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "Failed clearing START_EQ1\n");
dd                496 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_ibsd_reset(dd, 0);
dd                501 drivers/infiniband/hw/qib/qib_sd7220.c 		trim_done = qib_sd_trimdone_poll(dd);
dd                507 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_ibsd_reset(dd, 1);
dd                510 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd, "No TRIMDONE seen\n");
dd                517 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_sd_trimdone_monitor(dd, "First-reset");
dd                519 drivers/infiniband/hw/qib/qib_sd7220.c 		dd->cspec->serdes_first_init_done = 1;
dd                526 drivers/infiniband/hw/qib/qib_sd7220.c 	if (qib_sd_setvals(dd) >= 0)
dd                532 drivers/infiniband/hw/qib/qib_sd7220.c 	set_7220_relock_poll(dd, -1);
dd                552 drivers/infiniband/hw/qib/qib_sd7220.c static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
dd                580 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_read_kreg32(dd, kr_scratch);
dd                583 drivers/infiniband/hw/qib/qib_sd7220.c 	accval = qib_read_kreg32(dd, acc);
dd                595 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, acc, newval);
dd                597 drivers/infiniband/hw/qib/qib_sd7220.c 		pollval = qib_read_kreg32(dd, acc);
dd                599 drivers/infiniband/hw/qib/qib_sd7220.c 		pollval = qib_read_kreg32(dd, acc);
dd                607 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_write_kreg(dd, acc, newval);
dd                609 drivers/infiniband/hw/qib/qib_sd7220.c 		pollval = qib_read_kreg32(dd, acc);
dd                611 drivers/infiniband/hw/qib/qib_sd7220.c 		pollval = qib_read_kreg32(dd, acc);
dd                621 drivers/infiniband/hw/qib/qib_sd7220.c static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
dd                626 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_write_kreg(dd, reg, i_val);
dd                628 drivers/infiniband/hw/qib/qib_sd7220.c 	transval = qib_read_kreg64(dd, reg);
dd                631 drivers/infiniband/hw/qib/qib_sd7220.c 		transval = qib_read_kreg32(dd, reg);
dd                656 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
dd                683 drivers/infiniband/hw/qib/qib_sd7220.c 	spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
dd                685 drivers/infiniband/hw/qib/qib_sd7220.c 	owned = epb_access(dd, sdnum, 1);
dd                687 drivers/infiniband/hw/qib/qib_sd7220.c 		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
dd                692 drivers/infiniband/hw/qib/qib_sd7220.c 		transval = qib_read_kreg32(dd, trans);
dd                706 drivers/infiniband/hw/qib/qib_sd7220.c 			tries = epb_trans(dd, trans, transval, &transval);
dd                714 drivers/infiniband/hw/qib/qib_sd7220.c 			tries = epb_trans(dd, trans, transval, &transval);
dd                722 drivers/infiniband/hw/qib/qib_sd7220.c 	if (epb_access(dd, sdnum, -1) < 0)
dd                727 drivers/infiniband/hw/qib/qib_sd7220.c 	spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
dd                746 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
dd                777 drivers/infiniband/hw/qib/qib_sd7220.c 	spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
dd                779 drivers/infiniband/hw/qib/qib_sd7220.c 	owned = epb_access(dd, sdnum, 1);
dd                781 drivers/infiniband/hw/qib/qib_sd7220.c 		spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
dd                794 drivers/infiniband/hw/qib/qib_sd7220.c 		transval = qib_read_kreg32(dd, trans);
dd                811 drivers/infiniband/hw/qib/qib_sd7220.c 		tries = epb_trans(dd, trans, transval, &transval);
dd                818 drivers/infiniband/hw/qib/qib_sd7220.c 				tries = epb_trans(dd, trans, transval,
dd                824 drivers/infiniband/hw/qib/qib_sd7220.c 				tries = epb_trans(dd, trans, transval,
dd                834 drivers/infiniband/hw/qib/qib_sd7220.c 			tries = epb_trans(dd, trans, transval, &transval);
dd                843 drivers/infiniband/hw/qib/qib_sd7220.c 		tries = epb_trans(dd, trans, transval, &transval);
dd                848 drivers/infiniband/hw/qib/qib_sd7220.c 	if (epb_access(dd, sdnum, -1) < 0)
dd                851 drivers/infiniband/hw/qib/qib_sd7220.c 	spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
dd                859 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
dd                869 drivers/infiniband/hw/qib/qib_sd7220.c 		cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
dd                883 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
dd                895 drivers/infiniband/hw/qib/qib_sd7220.c 		cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
dd                912 drivers/infiniband/hw/qib/qib_sd7220.c qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
dd                914 drivers/infiniband/hw/qib/qib_sd7220.c 	return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
dd                918 drivers/infiniband/hw/qib/qib_sd7220.c qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
dd                920 drivers/infiniband/hw/qib/qib_sd7220.c 	return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
dd                929 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_trimdone_poll(struct qib_devdata *dd)
dd                940 drivers/infiniband/hw/qib/qib_sd7220.c 		val = qib_read_kreg64(dd, kr_ibcstatus);
dd                948 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
dd               1042 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_setvals(struct qib_devdata *dd)
dd               1051 drivers/infiniband/hw/qib/qib_sd7220.c 	taddr = dd->kregbase + kr_serdes_maptable;
dd               1052 drivers/infiniband/hw/qib/qib_sd7220.c 	iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
dd               1059 drivers/infiniband/hw/qib/qib_sd7220.c 	sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
dd               1062 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
dd               1071 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_read_kreg32(dd, kr_scratch);
dd               1078 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_read_kreg32(dd, kr_scratch);
dd               1099 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_read_kreg32(dd, kr_scratch);
dd               1104 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_read_kreg32(dd, kr_scratch);
dd               1127 drivers/infiniband/hw/qib/qib_sd7220.c static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
dd               1143 drivers/infiniband/hw/qib/qib_sd7220.c 			ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
dd               1148 drivers/infiniband/hw/qib/qib_sd7220.c 				qib_dev_err(dd,
dd               1157 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
dd               1161 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd,
dd               1173 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
dd               1177 drivers/infiniband/hw/qib/qib_sd7220.c 			qib_dev_err(dd,
dd               1191 drivers/infiniband/hw/qib/qib_sd7220.c static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
dd               1203 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
dd               1214 drivers/infiniband/hw/qib/qib_sd7220.c static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
dd               1228 drivers/infiniband/hw/qib/qib_sd7220.c 		ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
dd               1250 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_internal_presets(struct qib_devdata *dd)
dd               1254 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
dd               1257 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to set default DDS values\n");
dd               1258 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
dd               1260 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "Failed to set default RXEQ values\n");
dd               1264 drivers/infiniband/hw/qib/qib_sd7220.c int qib_sd7220_presets(struct qib_devdata *dd)
dd               1268 drivers/infiniband/hw/qib/qib_sd7220.c 	if (!dd->cspec->presets_needed)
dd               1270 drivers/infiniband/hw/qib/qib_sd7220.c 	dd->cspec->presets_needed = 0;
dd               1272 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_ibsd_reset(dd, 1);
dd               1274 drivers/infiniband/hw/qib/qib_sd7220.c 	qib_sd_trimdone_monitor(dd, "link-down");
dd               1276 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = qib_internal_presets(dd);
dd               1280 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_trimself(struct qib_devdata *dd, int val)
dd               1284 drivers/infiniband/hw/qib/qib_sd7220.c 	return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
dd               1287 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_early(struct qib_devdata *dd)
dd               1291 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
dd               1294 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
dd               1297 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
dd               1306 drivers/infiniband/hw/qib/qib_sd7220.c static int qib_sd_dactrim(struct qib_devdata *dd)
dd               1310 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
dd               1315 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
dd               1319 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
dd               1323 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
dd               1327 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
dd               1337 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
dd               1345 drivers/infiniband/hw/qib/qib_sd7220.c void toggle_7220_rclkrls(struct qib_devdata *dd)
dd               1350 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
dd               1352 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
dd               1355 drivers/infiniband/hw/qib/qib_sd7220.c 		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
dd               1359 drivers/infiniband/hw/qib/qib_sd7220.c 	ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
dd               1361 drivers/infiniband/hw/qib/qib_sd7220.c 		qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
dd               1364 drivers/infiniband/hw/qib/qib_sd7220.c 		ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
dd               1367 drivers/infiniband/hw/qib/qib_sd7220.c 	dd->f_xgxs_reset(dd->pport);
dd               1376 drivers/infiniband/hw/qib/qib_sd7220.c void shutdown_7220_relock_poll(struct qib_devdata *dd)
dd               1378 drivers/infiniband/hw/qib/qib_sd7220.c 	if (dd->cspec->relock_timer_active)
dd               1379 drivers/infiniband/hw/qib/qib_sd7220.c 		del_timer_sync(&dd->cspec->relock_timer);
dd               1390 drivers/infiniband/hw/qib/qib_sd7220.c 	struct qib_devdata *dd = cs->dd;
dd               1391 drivers/infiniband/hw/qib/qib_sd7220.c 	struct qib_pportdata *ppd = dd->pport;
dd               1400 drivers/infiniband/hw/qib/qib_sd7220.c 	if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
dd               1405 drivers/infiniband/hw/qib/qib_sd7220.c 				toggle_7220_rclkrls(dd);
dd               1417 drivers/infiniband/hw/qib/qib_sd7220.c void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
dd               1419 drivers/infiniband/hw/qib/qib_sd7220.c 	struct qib_chip_specific *cs = dd->cspec;
dd                175 drivers/infiniband/hw/qib/qib_sdma.c 		ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
dd                177 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->dd->f_sdma_hw_start_up(ppd);
dd                226 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
dd                241 drivers/infiniband/hw/qib/qib_sdma.c 	dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
dd                251 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
dd                256 drivers/infiniband/hw/qib/qib_sdma.c 		qib_dev_err(ppd->dd,
dd                262 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
dd                265 drivers/infiniband/hw/qib/qib_sdma.c 		qib_dev_err(ppd->dd,
dd                273 drivers/infiniband/hw/qib/qib_sdma.c 	dma_free_coherent(&ppd->dd->pcidev->dev,
dd                285 drivers/infiniband/hw/qib/qib_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd                288 drivers/infiniband/hw/qib/qib_sdma.c 		dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
dd                296 drivers/infiniband/hw/qib/qib_sdma.c 		dma_free_coherent(&dd->pcidev->dev,
dd                328 drivers/infiniband/hw/qib/qib_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd                333 drivers/infiniband/hw/qib/qib_sdma.c 	hwhead = dd->f_sdma_gethead(ppd);
dd                411 drivers/infiniband/hw/qib/qib_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd                420 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->dd->f_sdma_init_early(ppd);
dd                442 drivers/infiniband/hw/qib/qib_sdma.c 	ret = dd->f_init_sdma_regs(ppd);
dd                537 drivers/infiniband/hw/qib/qib_sdma.c 		if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
dd                538 drivers/infiniband/hw/qib/qib_sdma.c 			ppd->dd->f_sdma_set_desc_cnt(ppd,
dd                571 drivers/infiniband/hw/qib/qib_sdma.c 		addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
dd                573 drivers/infiniband/hw/qib/qib_sdma.c 		if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
dd                608 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->dd->f_sdma_update_tail(ppd, tail);
dd                654 drivers/infiniband/hw/qib/qib_sdma.c 		dev = &ppd->dd->verbs_dev;
dd                695 drivers/infiniband/hw/qib/qib_sdma.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd                697 drivers/infiniband/hw/qib/qib_sdma.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd                699 drivers/infiniband/hw/qib/qib_sdma.c 	qib_dev_porterr(ppd->dd, ppd->port,
dd                718 drivers/infiniband/hw/qib/qib_sdma.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                728 drivers/infiniband/hw/qib/qib_sdma.c 		qib_dev_porterr(ppd->dd, ppd->port,
dd                945 drivers/infiniband/hw/qib/qib_sdma.c 			ppd->dd->f_sdma_hw_clean_up(ppd);
dd                 45 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = ppd->dd;
dd                 48 drivers/infiniband/hw/qib/qib_sysfs.c 	ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
dd                 56 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = ppd->dd;
dd                 62 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
dd                 73 drivers/infiniband/hw/qib/qib_sysfs.c 	ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
dd                 80 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = ppd->dd;
dd                 83 drivers/infiniband/hw/qib/qib_sysfs.c 	r = dd->f_set_ib_loopback(ppd, buf);
dd                 93 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = ppd->dd;
dd                 99 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd, "attempt to set invalid LED override\n");
dd                480 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = ppd->dd;				\
dd                486 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd, "Per CPU cntrs can only be zeroed");	\
dd                577 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                580 drivers/infiniband/hw/qib/qib_sysfs.c 	if (!dd->boardname)
dd                583 drivers/infiniband/hw/qib/qib_sysfs.c 		ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
dd                602 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                605 drivers/infiniband/hw/qib/qib_sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
dd                614 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                617 drivers/infiniband/hw/qib/qib_sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
dd                626 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                632 drivers/infiniband/hw/qib/qib_sysfs.c 			(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
dd                633 drivers/infiniband/hw/qib/qib_sysfs.c 			(dd->cfgctxts - dd->first_user_ctxt));
dd                642 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                645 drivers/infiniband/hw/qib/qib_sysfs.c 	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
dd                654 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                656 drivers/infiniband/hw/qib/qib_sysfs.c 	buf[sizeof(dd->serial)] = '\0';
dd                657 drivers/infiniband/hw/qib/qib_sysfs.c 	memcpy(buf, dd->serial, sizeof(dd->serial));
dd                669 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                672 drivers/infiniband/hw/qib/qib_sysfs.c 	if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
dd                677 drivers/infiniband/hw/qib/qib_sysfs.c 	ret = qib_reset_device(dd->unit);
dd                691 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                700 drivers/infiniband/hw/qib/qib_sysfs.c 		ret = dd->f_tempsense_rd(dd, idx);
dd                745 drivers/infiniband/hw/qib/qib_sysfs.c 	struct qib_devdata *dd = dd_from_ibdev(ibdev);
dd                748 drivers/infiniband/hw/qib/qib_sysfs.c 	if (!port_num || port_num > dd->num_pports) {
dd                749 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                755 drivers/infiniband/hw/qib/qib_sysfs.c 	ppd = &dd->pport[port_num - 1];
dd                760 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                770 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                780 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                793 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                804 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                813 drivers/infiniband/hw/qib/qib_sysfs.c 		qib_dev_err(dd,
dd                819 drivers/infiniband/hw/qib/qib_sysfs.c 	qib_devinfo(dd->pcidev,
dd                821 drivers/infiniband/hw/qib/qib_sysfs.c 		dd->unit, port_num);
dd                842 drivers/infiniband/hw/qib/qib_sysfs.c void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
dd                847 drivers/infiniband/hw/qib/qib_sysfs.c 	for (i = 0; i < dd->num_pports; i++) {
dd                848 drivers/infiniband/hw/qib/qib_sysfs.c 		ppd = &dd->pport[i];
dd                 67 drivers/infiniband/hw/qib/qib_twsi.c static void i2c_wait_for_writes(struct qib_devdata *dd)
dd                 74 drivers/infiniband/hw/qib/qib_twsi.c 	dd->f_gpio_mod(dd, 0, 0, 0);
dd                 89 drivers/infiniband/hw/qib/qib_twsi.c static void scl_out(struct qib_devdata *dd, u8 bit)
dd                 95 drivers/infiniband/hw/qib/qib_twsi.c 	mask = 1UL << dd->gpio_scl_num;
dd                 98 drivers/infiniband/hw/qib/qib_twsi.c 	dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
dd                110 drivers/infiniband/hw/qib/qib_twsi.c 			if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
dd                115 drivers/infiniband/hw/qib/qib_twsi.c 			qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
dd                118 drivers/infiniband/hw/qib/qib_twsi.c 	i2c_wait_for_writes(dd);
dd                121 drivers/infiniband/hw/qib/qib_twsi.c static void sda_out(struct qib_devdata *dd, u8 bit)
dd                125 drivers/infiniband/hw/qib/qib_twsi.c 	mask = 1UL << dd->gpio_sda_num;
dd                128 drivers/infiniband/hw/qib/qib_twsi.c 	dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
dd                130 drivers/infiniband/hw/qib/qib_twsi.c 	i2c_wait_for_writes(dd);
dd                134 drivers/infiniband/hw/qib/qib_twsi.c static u8 sda_in(struct qib_devdata *dd, int wait)
dd                139 drivers/infiniband/hw/qib/qib_twsi.c 	bnum = dd->gpio_sda_num;
dd                142 drivers/infiniband/hw/qib/qib_twsi.c 	dd->f_gpio_mod(dd, 0, 0, mask);
dd                143 drivers/infiniband/hw/qib/qib_twsi.c 	read_val = dd->f_gpio_mod(dd, 0, 0, 0);
dd                145 drivers/infiniband/hw/qib/qib_twsi.c 		i2c_wait_for_writes(dd);
dd                153 drivers/infiniband/hw/qib/qib_twsi.c static int i2c_ackrcv(struct qib_devdata *dd)
dd                159 drivers/infiniband/hw/qib/qib_twsi.c 	ack_received = sda_in(dd, 1);
dd                160 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 1);
dd                161 drivers/infiniband/hw/qib/qib_twsi.c 	ack_received = sda_in(dd, 1) == 0;
dd                162 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 0);
dd                166 drivers/infiniband/hw/qib/qib_twsi.c static void stop_cmd(struct qib_devdata *dd);
dd                174 drivers/infiniband/hw/qib/qib_twsi.c static int rd_byte(struct qib_devdata *dd, int last)
dd                182 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 1);
dd                183 drivers/infiniband/hw/qib/qib_twsi.c 		data |= sda_in(dd, 0);
dd                184 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 0);
dd                187 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 1);
dd                188 drivers/infiniband/hw/qib/qib_twsi.c 		stop_cmd(dd);
dd                190 drivers/infiniband/hw/qib/qib_twsi.c 		sda_out(dd, 0);
dd                191 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 1);
dd                192 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 0);
dd                193 drivers/infiniband/hw/qib/qib_twsi.c 		sda_out(dd, 1);
dd                205 drivers/infiniband/hw/qib/qib_twsi.c static int wr_byte(struct qib_devdata *dd, u8 data)
dd                212 drivers/infiniband/hw/qib/qib_twsi.c 		sda_out(dd, bit);
dd                213 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 1);
dd                214 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 0);
dd                216 drivers/infiniband/hw/qib/qib_twsi.c 	return (!i2c_ackrcv(dd)) ? 1 : 0;
dd                223 drivers/infiniband/hw/qib/qib_twsi.c static void start_seq(struct qib_devdata *dd)
dd                225 drivers/infiniband/hw/qib/qib_twsi.c 	sda_out(dd, 1);
dd                226 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 1);
dd                227 drivers/infiniband/hw/qib/qib_twsi.c 	sda_out(dd, 0);
dd                229 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 0);
dd                238 drivers/infiniband/hw/qib/qib_twsi.c static void stop_seq(struct qib_devdata *dd)
dd                240 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 0);
dd                241 drivers/infiniband/hw/qib/qib_twsi.c 	sda_out(dd, 0);
dd                242 drivers/infiniband/hw/qib/qib_twsi.c 	scl_out(dd, 1);
dd                243 drivers/infiniband/hw/qib/qib_twsi.c 	sda_out(dd, 1);
dd                252 drivers/infiniband/hw/qib/qib_twsi.c static void stop_cmd(struct qib_devdata *dd)
dd                254 drivers/infiniband/hw/qib/qib_twsi.c 	stop_seq(dd);
dd                263 drivers/infiniband/hw/qib/qib_twsi.c int qib_twsi_reset(struct qib_devdata *dd)
dd                272 drivers/infiniband/hw/qib/qib_twsi.c 	mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
dd                279 drivers/infiniband/hw/qib/qib_twsi.c 	dd->f_gpio_mod(dd, 0, 0, mask);
dd                292 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 0);
dd                293 drivers/infiniband/hw/qib/qib_twsi.c 		scl_out(dd, 1);
dd                295 drivers/infiniband/hw/qib/qib_twsi.c 		was_high |= sda_in(dd, 0);
dd                304 drivers/infiniband/hw/qib/qib_twsi.c 		pins = dd->f_gpio_mod(dd, 0, 0, 0);
dd                306 drivers/infiniband/hw/qib/qib_twsi.c 			qib_dev_err(dd, "GPIO pins not at rest: %d\n",
dd                310 drivers/infiniband/hw/qib/qib_twsi.c 		sda_out(dd, 0);
dd                313 drivers/infiniband/hw/qib/qib_twsi.c 		sda_out(dd, 1);
dd                327 drivers/infiniband/hw/qib/qib_twsi.c static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
dd                332 drivers/infiniband/hw/qib/qib_twsi.c 		start_seq(dd);
dd                334 drivers/infiniband/hw/qib/qib_twsi.c 	ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
dd                337 drivers/infiniband/hw/qib/qib_twsi.c 		stop_cmd(dd);
dd                356 drivers/infiniband/hw/qib/qib_twsi.c int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
dd                367 drivers/infiniband/hw/qib/qib_twsi.c 		ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
dd                370 drivers/infiniband/hw/qib/qib_twsi.c 		ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
dd                372 drivers/infiniband/hw/qib/qib_twsi.c 			stop_cmd(dd);
dd                383 drivers/infiniband/hw/qib/qib_twsi.c 		ret = qib_twsi_wr(dd, addr, 0);
dd                387 drivers/infiniband/hw/qib/qib_twsi.c 			qib_dev_err(dd,
dd                393 drivers/infiniband/hw/qib/qib_twsi.c 		ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
dd                396 drivers/infiniband/hw/qib/qib_twsi.c 		stop_cmd(dd);
dd                413 drivers/infiniband/hw/qib/qib_twsi.c 		*bp++ = rd_byte(dd, !len);
dd                434 drivers/infiniband/hw/qib/qib_twsi.c int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
dd                444 drivers/infiniband/hw/qib/qib_twsi.c 			if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
dd                450 drivers/infiniband/hw/qib/qib_twsi.c 			if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
dd                452 drivers/infiniband/hw/qib/qib_twsi.c 			ret = qib_twsi_wr(dd, addr, 0);
dd                454 drivers/infiniband/hw/qib/qib_twsi.c 				qib_dev_err(dd,
dd                466 drivers/infiniband/hw/qib/qib_twsi.c 			if (qib_twsi_wr(dd, *bp++, 0))
dd                469 drivers/infiniband/hw/qib/qib_twsi.c 		stop_cmd(dd);
dd                483 drivers/infiniband/hw/qib/qib_twsi.c 		while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
dd                484 drivers/infiniband/hw/qib/qib_twsi.c 			stop_cmd(dd);
dd                489 drivers/infiniband/hw/qib/qib_twsi.c 		rd_byte(dd, 1);
dd                496 drivers/infiniband/hw/qib/qib_twsi.c 	stop_cmd(dd);
dd                 61 drivers/infiniband/hw/qib/qib_tx.c void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
dd                 68 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                 70 drivers/infiniband/hw/qib/qib_tx.c 		__clear_bit(i, dd->pio_need_disarm);
dd                 71 drivers/infiniband/hw/qib/qib_tx.c 		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
dd                 73 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                 82 drivers/infiniband/hw/qib/qib_tx.c 	struct qib_devdata *dd = rcd->dd;
dd                103 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irq(&dd->pioavail_lock);
dd                105 drivers/infiniband/hw/qib/qib_tx.c 		if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
dd                107 drivers/infiniband/hw/qib/qib_tx.c 			dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
dd                110 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irq(&dd->pioavail_lock);
dd                114 drivers/infiniband/hw/qib/qib_tx.c static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
dd                119 drivers/infiniband/hw/qib/qib_tx.c 	for (pidx = 0; pidx < dd->num_pports; pidx++) {
dd                120 drivers/infiniband/hw/qib/qib_tx.c 		ppd = dd->pport + pidx;
dd                132 drivers/infiniband/hw/qib/qib_tx.c static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
dd                138 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock(&dd->uctxt_lock);
dd                139 drivers/infiniband/hw/qib/qib_tx.c 	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
dd                140 drivers/infiniband/hw/qib/qib_tx.c 		rcd = dd->rcd[ctxt];
dd                159 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock(&dd->uctxt_lock);
dd                171 drivers/infiniband/hw/qib/qib_tx.c void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
dd                178 drivers/infiniband/hw/qib/qib_tx.c 	for (i = 0; i < dd->num_pports; i++)
dd                188 drivers/infiniband/hw/qib/qib_tx.c 		ppd = is_sdma_buf(dd, i);
dd                197 drivers/infiniband/hw/qib/qib_tx.c 		spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                198 drivers/infiniband/hw/qib/qib_tx.c 		if (test_bit(i, dd->pio_writing) ||
dd                199 drivers/infiniband/hw/qib/qib_tx.c 		    (!test_bit(i << 1, dd->pioavailkernel) &&
dd                200 drivers/infiniband/hw/qib/qib_tx.c 		     find_ctxt(dd, i))) {
dd                201 drivers/infiniband/hw/qib/qib_tx.c 			__set_bit(i, dd->pio_need_disarm);
dd                203 drivers/infiniband/hw/qib/qib_tx.c 			dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
dd                205 drivers/infiniband/hw/qib/qib_tx.c 		spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                209 drivers/infiniband/hw/qib/qib_tx.c 	for (i = 0; i < dd->num_pports; i++)
dd                220 drivers/infiniband/hw/qib/qib_tx.c static void update_send_bufs(struct qib_devdata *dd)
dd                224 drivers/infiniband/hw/qib/qib_tx.c 	const unsigned piobregs = dd->pioavregs;
dd                244 drivers/infiniband/hw/qib/qib_tx.c 	if (!dd->pioavailregs_dma)
dd                246 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                250 drivers/infiniband/hw/qib/qib_tx.c 		piov = le64_to_cpu(dd->pioavailregs_dma[i]);
dd                251 drivers/infiniband/hw/qib/qib_tx.c 		pchg = dd->pioavailkernel[i] &
dd                252 drivers/infiniband/hw/qib/qib_tx.c 			~(dd->pioavailshadow[i] ^ piov);
dd                254 drivers/infiniband/hw/qib/qib_tx.c 		if (pchg && (pchbusy & dd->pioavailshadow[i])) {
dd                255 drivers/infiniband/hw/qib/qib_tx.c 			pnew = dd->pioavailshadow[i] & ~pchbusy;
dd                257 drivers/infiniband/hw/qib/qib_tx.c 			dd->pioavailshadow[i] = pnew;
dd                260 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                266 drivers/infiniband/hw/qib/qib_tx.c static noinline void no_send_bufs(struct qib_devdata *dd)
dd                268 drivers/infiniband/hw/qib/qib_tx.c 	dd->upd_pio_shadow = 1;
dd                281 drivers/infiniband/hw/qib/qib_tx.c u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
dd                287 drivers/infiniband/hw/qib/qib_tx.c 	unsigned long *shadow = dd->pioavailshadow;
dd                290 drivers/infiniband/hw/qib/qib_tx.c 	if (!(dd->flags & QIB_PRESENT))
dd                294 drivers/infiniband/hw/qib/qib_tx.c 	if (dd->upd_pio_shadow) {
dd                301 drivers/infiniband/hw/qib/qib_tx.c 		update_send_bufs(dd);
dd                310 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                311 drivers/infiniband/hw/qib/qib_tx.c 	if (dd->last_pio >= first && dd->last_pio <= last)
dd                312 drivers/infiniband/hw/qib/qib_tx.c 		i = dd->last_pio + 1;
dd                315 drivers/infiniband/hw/qib/qib_tx.c 		nbufs = last - dd->min_kernel_pio + 1;
dd                318 drivers/infiniband/hw/qib/qib_tx.c 			i = !first ? dd->min_kernel_pio : first;
dd                324 drivers/infiniband/hw/qib/qib_tx.c 		__set_bit(i, dd->pio_writing);
dd                326 drivers/infiniband/hw/qib/qib_tx.c 			dd->last_pio = i;
dd                329 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                338 drivers/infiniband/hw/qib/qib_tx.c 		no_send_bufs(dd);
dd                341 drivers/infiniband/hw/qib/qib_tx.c 		if (i < dd->piobcnt2k)
dd                342 drivers/infiniband/hw/qib/qib_tx.c 			buf = (u32 __iomem *)(dd->pio2kbase +
dd                343 drivers/infiniband/hw/qib/qib_tx.c 				i * dd->palign);
dd                344 drivers/infiniband/hw/qib/qib_tx.c 		else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
dd                345 drivers/infiniband/hw/qib/qib_tx.c 			buf = (u32 __iomem *)(dd->pio4kbase +
dd                346 drivers/infiniband/hw/qib/qib_tx.c 				(i - dd->piobcnt2k) * dd->align4k);
dd                348 drivers/infiniband/hw/qib/qib_tx.c 			buf = (u32 __iomem *)(dd->piovl15base +
dd                349 drivers/infiniband/hw/qib/qib_tx.c 				(i - (dd->piobcnt2k + dd->piobcnt4k)) *
dd                350 drivers/infiniband/hw/qib/qib_tx.c 				dd->align4k);
dd                353 drivers/infiniband/hw/qib/qib_tx.c 		dd->upd_pio_shadow = 0;
dd                363 drivers/infiniband/hw/qib/qib_tx.c void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
dd                367 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                368 drivers/infiniband/hw/qib/qib_tx.c 	__clear_bit(n, dd->pio_writing);
dd                369 drivers/infiniband/hw/qib/qib_tx.c 	if (__test_and_clear_bit(n, dd->pio_need_disarm))
dd                370 drivers/infiniband/hw/qib/qib_tx.c 		dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
dd                371 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                381 drivers/infiniband/hw/qib/qib_tx.c void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
dd                392 drivers/infiniband/hw/qib/qib_tx.c 	spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                414 drivers/infiniband/hw/qib/qib_tx.c 				    dd->pioavailshadow);
dd                416 drivers/infiniband/hw/qib/qib_tx.c 				le64_to_cpu(dd->pioavailregs_dma[i]);
dd                420 drivers/infiniband/hw/qib/qib_tx.c 					  start, dd->pioavailshadow);
dd                423 drivers/infiniband/hw/qib/qib_tx.c 					    + start, dd->pioavailshadow);
dd                424 drivers/infiniband/hw/qib/qib_tx.c 			__set_bit(start, dd->pioavailkernel);
dd                425 drivers/infiniband/hw/qib/qib_tx.c 			if ((start >> 1) < dd->min_kernel_pio)
dd                426 drivers/infiniband/hw/qib/qib_tx.c 				dd->min_kernel_pio = start >> 1;
dd                429 drivers/infiniband/hw/qib/qib_tx.c 				  dd->pioavailshadow);
dd                430 drivers/infiniband/hw/qib/qib_tx.c 			__clear_bit(start, dd->pioavailkernel);
dd                431 drivers/infiniband/hw/qib/qib_tx.c 			if ((start >> 1) > dd->min_kernel_pio)
dd                432 drivers/infiniband/hw/qib/qib_tx.c 				dd->min_kernel_pio = start >> 1;
dd                437 drivers/infiniband/hw/qib/qib_tx.c 	if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
dd                438 drivers/infiniband/hw/qib/qib_tx.c 		dd->last_pio = dd->min_kernel_pio - 1;
dd                439 drivers/infiniband/hw/qib/qib_tx.c 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                441 drivers/infiniband/hw/qib/qib_tx.c 	dd->f_txchk_change(dd, ostart, len, avail, rcd);
dd                455 drivers/infiniband/hw/qib/qib_tx.c 	struct qib_devdata *dd = ppd->dd;
dd                470 drivers/infiniband/hw/qib/qib_tx.c 	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
dd                471 drivers/infiniband/hw/qib/qib_tx.c 		spin_lock_irqsave(&dd->uctxt_lock, flags);
dd                472 drivers/infiniband/hw/qib/qib_tx.c 		rcd = dd->rcd[ctxt];
dd                488 drivers/infiniband/hw/qib/qib_tx.c 			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                489 drivers/infiniband/hw/qib/qib_tx.c 			spin_lock_irqsave(&dd->pioavail_lock, flags);
dd                491 drivers/infiniband/hw/qib/qib_tx.c 				__set_bit(i, dd->pio_need_disarm);
dd                492 drivers/infiniband/hw/qib/qib_tx.c 			spin_unlock_irqrestore(&dd->pioavail_lock, flags);
dd                494 drivers/infiniband/hw/qib/qib_tx.c 			spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd                497 drivers/infiniband/hw/qib/qib_tx.c 	if (!(dd->flags & QIB_HAS_SEND_DMA))
dd                498 drivers/infiniband/hw/qib/qib_tx.c 		dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
dd                509 drivers/infiniband/hw/qib/qib_tx.c void qib_force_pio_avail_update(struct qib_devdata *dd)
dd                511 drivers/infiniband/hw/qib/qib_tx.c 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
dd                556 drivers/infiniband/hw/qib/qib_tx.c 	if (!(ppd->dd->flags & QIB_INITTED))
dd                 55 drivers/infiniband/hw/qib/qib_ud.c 	struct qib_devdata *dd = ppd->dd;
dd                 56 drivers/infiniband/hw/qib/qib_ud.c 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd                396 drivers/infiniband/hw/qib/qib_ud.c 	struct qib_devdata *dd = ppd->dd;
dd                402 drivers/infiniband/hw/qib/qib_ud.c 	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
dd                403 drivers/infiniband/hw/qib/qib_ud.c 		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
dd                295 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
dd                309 drivers/infiniband/hw/qib/qib_user_sdma.c 		dma_map_page(&dd->pcidev->dev,
dd                313 drivers/infiniband/hw/qib/qib_user_sdma.c 	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
dd                384 drivers/infiniband/hw/qib/qib_user_sdma.c 				dma_map_single(&dd->pcidev->dev,
dd                388 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (dma_mapping_error(&dd->pcidev->dev,
dd                475 drivers/infiniband/hw/qib/qib_user_sdma.c 			dma_map_single(&dd->pcidev->dev,
dd                479 drivers/infiniband/hw/qib/qib_user_sdma.c 		if (dma_mapping_error(&dd->pcidev->dev,
dd                557 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
dd                591 drivers/infiniband/hw/qib/qib_user_sdma.c 	ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
dd                658 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
dd                687 drivers/infiniband/hw/qib/qib_user_sdma.c 			ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
dd                715 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
dd                728 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
dd                739 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
dd                745 drivers/infiniband/hw/qib/qib_user_sdma.c 		dma_unmap_single(&dd->pcidev->dev,
dd                757 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
dd                767 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
dd                769 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
dd                802 drivers/infiniband/hw/qib/qib_user_sdma.c static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
dd                984 drivers/infiniband/hw/qib/qib_user_sdma.c 			ret = qib_user_sdma_init_payload(dd, pq, pkt,
dd                999 drivers/infiniband/hw/qib/qib_user_sdma.c 				dma_addr = dma_map_single(&dd->pcidev->dev,
dd               1001 drivers/infiniband/hw/qib/qib_user_sdma.c 				if (dma_mapping_error(&dd->pcidev->dev,
dd               1035 drivers/infiniband/hw/qib/qib_user_sdma.c 	qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
dd               1050 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd               1089 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
dd               1128 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd               1169 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
dd               1173 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
dd               1238 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_devdata *dd = ppd->dd;
dd               1280 drivers/infiniband/hw/qib/qib_user_sdma.c 			if (ofs > dd->piosize2kmax_dwords) {
dd               1313 drivers/infiniband/hw/qib/qib_user_sdma.c 		dd->f_sdma_update_tail(ppd, tail_c);
dd               1378 drivers/infiniband/hw/qib/qib_user_sdma.c 	struct qib_devdata *dd = rcd->dd;
dd               1403 drivers/infiniband/hw/qib/qib_user_sdma.c 		ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
dd               1435 drivers/infiniband/hw/qib/qib_user_sdma.c 		qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
dd                277 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = ppd->dd;
dd                278 drivers/infiniband/hw/qib/qib_verbs.c 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd                677 drivers/infiniband/hw/qib/qib_verbs.c 	dev = &ppd->dd->verbs_dev;
dd                777 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_dev(dev);
dd                798 drivers/infiniband/hw/qib/qib_verbs.c 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
dd                806 drivers/infiniband/hw/qib/qib_verbs.c 	if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
dd                810 drivers/infiniband/hw/qib/qib_verbs.c 	if (plen + 1 > dd->piosize2kmax_dwords)
dd                847 drivers/infiniband/hw/qib/qib_verbs.c 	tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
dd                849 drivers/infiniband/hw/qib/qib_verbs.c 	if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
dd                879 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd;
dd                896 drivers/infiniband/hw/qib/qib_verbs.c 			dd = dd_from_dev(dev);
dd                897 drivers/infiniband/hw/qib/qib_verbs.c 			dd->f_wantpiobuf_intr(dd, 1);
dd                911 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd                912 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
dd                922 drivers/infiniband/hw/qib/qib_verbs.c 	control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
dd                925 drivers/infiniband/hw/qib/qib_verbs.c 	piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
dd                938 drivers/infiniband/hw/qib/qib_verbs.c 	flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
dd                981 drivers/infiniband/hw/qib/qib_verbs.c 	if (dd->flags & QIB_USE_SPCL_TRIG) {
dd                982 drivers/infiniband/hw/qib/qib_verbs.c 		u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
dd                987 drivers/infiniband/hw/qib/qib_verbs.c 	qib_sendbuf_done(dd, pbufn);
dd               1018 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
dd               1035 drivers/infiniband/hw/qib/qib_verbs.c 	    !(dd->flags & QIB_HAS_SEND_DMA))
dd               1050 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = ppd->dd;
dd               1052 drivers/infiniband/hw/qib/qib_verbs.c 	if (!(dd->flags & QIB_PRESENT)) {
dd               1057 drivers/infiniband/hw/qib/qib_verbs.c 	*swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
dd               1058 drivers/infiniband/hw/qib/qib_verbs.c 	*rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
dd               1059 drivers/infiniband/hw/qib/qib_verbs.c 	*spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
dd               1060 drivers/infiniband/hw/qib/qib_verbs.c 	*rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
dd               1061 drivers/infiniband/hw/qib/qib_verbs.c 	*xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
dd               1081 drivers/infiniband/hw/qib/qib_verbs.c 	if (!(ppd->dd->flags & QIB_PRESENT)) {
dd               1087 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
dd               1089 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
dd               1096 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
dd               1098 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
dd               1099 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
dd               1100 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
dd               1101 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
dd               1102 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
dd               1103 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
dd               1104 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
dd               1105 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
dd               1106 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
dd               1108 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
dd               1110 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
dd               1112 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
dd               1114 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
dd               1115 drivers/infiniband/hw/qib/qib_verbs.c 	cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
dd               1117 drivers/infiniband/hw/qib/qib_verbs.c 	cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
dd               1119 drivers/infiniband/hw/qib/qib_verbs.c 	cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
dd               1121 drivers/infiniband/hw/qib/qib_verbs.c 	cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
dd               1124 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
dd               1126 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
dd               1128 drivers/infiniband/hw/qib/qib_verbs.c 		ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
dd               1144 drivers/infiniband/hw/qib/qib_verbs.c void qib_ib_piobufavail(struct qib_devdata *dd)
dd               1146 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_ibdev *dev = &dd->verbs_dev;
dd               1173 drivers/infiniband/hw/qib/qib_verbs.c 	dd->f_wantpiobuf_intr(dd, 0);
dd               1196 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_dev(ibdev);
dd               1197 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
dd               1204 drivers/infiniband/hw/qib/qib_verbs.c 	props->state = dd->f_iblink_state(ppd->lastibcstat);
dd               1205 drivers/infiniband/hw/qib/qib_verbs.c 	props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
dd               1241 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_ibdev(device);
dd               1254 drivers/infiniband/hw/qib/qib_verbs.c 		for (i = 0; i < dd->num_pports; i++) {
dd               1255 drivers/infiniband/hw/qib/qib_verbs.c 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
dd               1264 drivers/infiniband/hw/qib/qib_verbs.c 		for (i = 0; i < dd->num_pports; i++) {
dd               1265 drivers/infiniband/hw/qib/qib_verbs.c 			struct qib_ibport *ibp = &dd->pport[i].ibport_data;
dd               1280 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_dev(ibdev);
dd               1281 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_pportdata *ppd = &dd->pport[port_num - 1];
dd               1345 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = dd_from_ppd(ppd);
dd               1349 drivers/infiniband/hw/qib/qib_verbs.c 	attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
dd               1364 drivers/infiniband/hw/qib/qib_verbs.c unsigned qib_get_npkeys(struct qib_devdata *dd)
dd               1366 drivers/infiniband/hw/qib/qib_verbs.c 	return ARRAY_SIZE(dd->rcd[0]->pkeys);
dd               1376 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_devdata *dd = ppd->dd;
dd               1381 drivers/infiniband/hw/qib/qib_verbs.c 	if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
dd               1384 drivers/infiniband/hw/qib/qib_verbs.c 		ret = dd->rcd[ctxt]->pkeys[index];
dd               1403 drivers/infiniband/hw/qib/qib_verbs.c 	if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
dd               1437 drivers/infiniband/hw/qib/qib_verbs.c static void qib_fill_device_attr(struct qib_devdata *dd)
dd               1439 drivers/infiniband/hw/qib/qib_verbs.c 	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
dd               1452 drivers/infiniband/hw/qib/qib_verbs.c 	rdi->dparms.props.vendor_part_id = dd->deviceid;
dd               1453 drivers/infiniband/hw/qib/qib_verbs.c 	rdi->dparms.props.hw_ver = dd->minrev;
dd               1471 drivers/infiniband/hw/qib/qib_verbs.c 	rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
dd               1478 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.post_parms = qib_post_parms;
dd               1481 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
dd               1498 drivers/infiniband/hw/qib/qib_verbs.c int qib_register_ib_device(struct qib_devdata *dd)
dd               1500 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_ibdev *dev = &dd->verbs_dev;
dd               1502 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_pportdata *ppd = dd->pport;
dd               1507 drivers/infiniband/hw/qib/qib_verbs.c 	for (i = 0; i < dd->num_pports; i++)
dd               1520 drivers/infiniband/hw/qib/qib_verbs.c 		dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
dd               1552 drivers/infiniband/hw/qib/qib_verbs.c 	ibdev->phys_port_cnt = dd->num_pports;
dd               1553 drivers/infiniband/hw/qib/qib_verbs.c 	ibdev->dev.parent = &dd->pcidev->dev;
dd               1561 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd               1562 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd               1563 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
dd               1564 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
dd               1565 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
dd               1566 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
dd               1567 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
dd               1568 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
dd               1569 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
dd               1570 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
dd               1571 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
dd               1572 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
dd               1573 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
dd               1574 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
dd               1575 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
dd               1576 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
dd               1577 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
dd               1578 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
dd               1579 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
dd               1580 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
dd               1581 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
dd               1582 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
dd               1583 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
dd               1584 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
dd               1586 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
dd               1589 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
dd               1590 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
dd               1591 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
dd               1592 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
dd               1593 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qpn_start = 1;
dd               1594 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
dd               1595 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
dd               1596 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qpn_inc = 1;
dd               1597 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.qos_shift = 1;
dd               1598 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
dd               1599 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
dd               1600 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
dd               1601 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd               1602 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
dd               1603 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
dd               1604 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd               1605 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
dd               1606 drivers/infiniband/hw/qib/qib_verbs.c 	dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
dd               1608 drivers/infiniband/hw/qib/qib_verbs.c 	qib_fill_device_attr(dd);
dd               1610 drivers/infiniband/hw/qib/qib_verbs.c 	ppd = dd->pport;
dd               1611 drivers/infiniband/hw/qib/qib_verbs.c 	for (i = 0; i < dd->num_pports; i++, ppd++) {
dd               1613 drivers/infiniband/hw/qib/qib_verbs.c 		rvt_init_port(&dd->verbs_dev.rdi,
dd               1616 drivers/infiniband/hw/qib/qib_verbs.c 			      dd->rcd[ctxt]->pkeys);
dd               1618 drivers/infiniband/hw/qib/qib_verbs.c 	rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
dd               1621 drivers/infiniband/hw/qib/qib_verbs.c 	ret = rvt_register_device(&dd->verbs_dev.rdi);
dd               1637 drivers/infiniband/hw/qib/qib_verbs.c 		dma_free_coherent(&dd->pcidev->dev,
dd               1642 drivers/infiniband/hw/qib/qib_verbs.c 	qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
dd               1646 drivers/infiniband/hw/qib/qib_verbs.c void qib_unregister_ib_device(struct qib_devdata *dd)
dd               1648 drivers/infiniband/hw/qib/qib_verbs.c 	struct qib_ibdev *dev = &dd->verbs_dev;
dd               1650 drivers/infiniband/hw/qib/qib_verbs.c 	qib_verbs_unregister_sysfs(dd);
dd               1652 drivers/infiniband/hw/qib/qib_verbs.c 	rvt_unregister_device(&dd->verbs_dev.rdi);
dd               1655 drivers/infiniband/hw/qib/qib_verbs.c 		qib_dev_err(dd, "piowait list not empty!\n");
dd               1657 drivers/infiniband/hw/qib/qib_verbs.c 		qib_dev_err(dd, "dmawait list not empty!\n");
dd               1659 drivers/infiniband/hw/qib/qib_verbs.c 		qib_dev_err(dd, "txwait list not empty!\n");
dd               1661 drivers/infiniband/hw/qib/qib_verbs.c 		qib_dev_err(dd, "memwait list not empty!\n");
dd               1672 drivers/infiniband/hw/qib/qib_verbs.c 	if (dd->pport->sdma_descq_cnt)
dd               1673 drivers/infiniband/hw/qib/qib_verbs.c 		dma_free_coherent(&dd->pcidev->dev,
dd               1674 drivers/infiniband/hw/qib/qib_verbs.c 				  dd->pport->sdma_descq_cnt *
dd                 46 drivers/infiniband/hw/qib/qib_wc_ppc64.c int qib_enable_wc(struct qib_devdata *dd)
dd                 53 drivers/infiniband/hw/qib/qib_wc_x86_64.c int qib_enable_wc(struct qib_devdata *dd)
dd                 58 drivers/infiniband/hw/qib/qib_wc_x86_64.c 	const unsigned long addr = pci_resource_start(dd->pcidev, 0);
dd                 59 drivers/infiniband/hw/qib/qib_wc_x86_64.c 	const size_t len = pci_resource_len(dd->pcidev, 0);
dd                 72 drivers/infiniband/hw/qib/qib_wc_x86_64.c 	if (dd->piobcnt2k && dd->piobcnt4k) {
dd                 76 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		pio2kbase = dd->piobufbase & 0xffffffffUL;
dd                 77 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
dd                 82 drivers/infiniband/hw/qib/qib_wc_x86_64.c 				dd->piobcnt4k * dd->align4k;
dd                 86 drivers/infiniband/hw/qib/qib_wc_x86_64.c 				dd->piobcnt2k * dd->palign;
dd                 89 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		pioaddr = addr + dd->piobufbase;
dd                 90 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		piolen = dd->piobcnt2k * dd->palign +
dd                 91 drivers/infiniband/hw/qib/qib_wc_x86_64.c 			dd->piobcnt4k * dd->align4k;
dd                107 drivers/infiniband/hw/qib/qib_wc_x86_64.c 			qib_dev_err(dd,
dd                119 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
dd                120 drivers/infiniband/hw/qib/qib_wc_x86_64.c 		if (dd->wc_cookie < 0)
dd                122 drivers/infiniband/hw/qib/qib_wc_x86_64.c 			ret = dd->wc_cookie;
dd                132 drivers/infiniband/hw/qib/qib_wc_x86_64.c void qib_disable_wc(struct qib_devdata *dd)
dd                134 drivers/infiniband/hw/qib/qib_wc_x86_64.c 	arch_phys_wc_del(dd->wc_cookie);
dd                 10 drivers/input/mouse/cypress_ps2.h #define ENCODE_CMD(aa, bb, cc, dd) \
dd                 11 drivers/input/mouse/cypress_ps2.h 	(COMPOSIT((aa), 6) | COMPOSIT((bb), 4) | COMPOSIT((cc), 2) | COMPOSIT((dd), 0))
dd                 61 drivers/ipack/devices/scc2698.h 		u8 dd, ip;   /* Input port register of block */
dd                 79 drivers/ipack/devices/scc2698.h 		u8 dd, opcr; /* Output port configuration register of block */
dd                652 drivers/irqchip/irq-stm32-exti.c stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
dd                661 drivers/irqchip/irq-stm32-exti.c 	host_data->drv_data = dd;
dd                662 drivers/irqchip/irq-stm32-exti.c 	host_data->chips_data = kcalloc(dd->bank_nr,
dd                 77 drivers/md/dm-dust.c static int dust_remove_block(struct dust_device *dd, unsigned long long block)
dd                 82 drivers/md/dm-dust.c 	spin_lock_irqsave(&dd->dust_lock, flags);
dd                 83 drivers/md/dm-dust.c 	bblock = dust_rb_search(&dd->badblocklist, block);
dd                 86 drivers/md/dm-dust.c 		if (!dd->quiet_mode) {
dd                 90 drivers/md/dm-dust.c 		spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                 94 drivers/md/dm-dust.c 	rb_erase(&bblock->node, &dd->badblocklist);
dd                 95 drivers/md/dm-dust.c 	dd->badblock_count--;
dd                 96 drivers/md/dm-dust.c 	if (!dd->quiet_mode)
dd                 99 drivers/md/dm-dust.c 	spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                104 drivers/md/dm-dust.c static int dust_add_block(struct dust_device *dd, unsigned long long block)
dd                111 drivers/md/dm-dust.c 		if (!dd->quiet_mode)
dd                116 drivers/md/dm-dust.c 	spin_lock_irqsave(&dd->dust_lock, flags);
dd                118 drivers/md/dm-dust.c 	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
dd                119 drivers/md/dm-dust.c 		if (!dd->quiet_mode) {
dd                123 drivers/md/dm-dust.c 		spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                128 drivers/md/dm-dust.c 	dd->badblock_count++;
dd                129 drivers/md/dm-dust.c 	if (!dd->quiet_mode)
dd                131 drivers/md/dm-dust.c 	spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                136 drivers/md/dm-dust.c static int dust_query_block(struct dust_device *dd, unsigned long long block)
dd                141 drivers/md/dm-dust.c 	spin_lock_irqsave(&dd->dust_lock, flags);
dd                142 drivers/md/dm-dust.c 	bblock = dust_rb_search(&dd->badblocklist, block);
dd                147 drivers/md/dm-dust.c 	spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                152 drivers/md/dm-dust.c static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
dd                154 drivers/md/dm-dust.c 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
dd                162 drivers/md/dm-dust.c static int dust_map_read(struct dust_device *dd, sector_t thisblock,
dd                169 drivers/md/dm-dust.c 		thisblock >>= dd->sect_per_block_shift;
dd                170 drivers/md/dm-dust.c 		spin_lock_irqsave(&dd->dust_lock, flags);
dd                171 drivers/md/dm-dust.c 		ret = __dust_map_read(dd, thisblock);
dd                172 drivers/md/dm-dust.c 		spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                178 drivers/md/dm-dust.c static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
dd                180 drivers/md/dm-dust.c 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
dd                183 drivers/md/dm-dust.c 		rb_erase(&bblk->node, &dd->badblocklist);
dd                184 drivers/md/dm-dust.c 		dd->badblock_count--;
dd                186 drivers/md/dm-dust.c 		if (!dd->quiet_mode) {
dd                187 drivers/md/dm-dust.c 			sector_div(thisblock, dd->sect_per_block);
dd                194 drivers/md/dm-dust.c static int dust_map_write(struct dust_device *dd, sector_t thisblock,
dd                200 drivers/md/dm-dust.c 		thisblock >>= dd->sect_per_block_shift;
dd                201 drivers/md/dm-dust.c 		spin_lock_irqsave(&dd->dust_lock, flags);
dd                202 drivers/md/dm-dust.c 		__dust_map_write(dd, thisblock);
dd                203 drivers/md/dm-dust.c 		spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                211 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                214 drivers/md/dm-dust.c 	bio_set_dev(bio, dd->dev->bdev);
dd                215 drivers/md/dm-dust.c 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
dd                218 drivers/md/dm-dust.c 		ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
dd                220 drivers/md/dm-dust.c 		ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
dd                249 drivers/md/dm-dust.c static int dust_clear_badblocks(struct dust_device *dd)
dd                255 drivers/md/dm-dust.c 	spin_lock_irqsave(&dd->dust_lock, flags);
dd                256 drivers/md/dm-dust.c 	badblocklist = dd->badblocklist;
dd                257 drivers/md/dm-dust.c 	badblock_count = dd->badblock_count;
dd                258 drivers/md/dm-dust.c 	dd->badblocklist = RB_ROOT;
dd                259 drivers/md/dm-dust.c 	dd->badblock_count = 0;
dd                260 drivers/md/dm-dust.c 	spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                281 drivers/md/dm-dust.c 	struct dust_device *dd;
dd                321 drivers/md/dm-dust.c 	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
dd                322 drivers/md/dm-dust.c 	if (dd == NULL) {
dd                327 drivers/md/dm-dust.c 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
dd                329 drivers/md/dm-dust.c 		kfree(dd);
dd                333 drivers/md/dm-dust.c 	dd->sect_per_block = sect_per_block;
dd                334 drivers/md/dm-dust.c 	dd->blksz = blksz;
dd                335 drivers/md/dm-dust.c 	dd->start = tmp;
dd                337 drivers/md/dm-dust.c 	dd->sect_per_block_shift = __ffs(sect_per_block);
dd                343 drivers/md/dm-dust.c 	dd->fail_read_on_bb = false;
dd                348 drivers/md/dm-dust.c 	dd->badblocklist = RB_ROOT;
dd                349 drivers/md/dm-dust.c 	dd->badblock_count = 0;
dd                350 drivers/md/dm-dust.c 	spin_lock_init(&dd->dust_lock);
dd                352 drivers/md/dm-dust.c 	dd->quiet_mode = false;
dd                354 drivers/md/dm-dust.c 	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
dd                358 drivers/md/dm-dust.c 	ti->private = dd;
dd                365 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                367 drivers/md/dm-dust.c 	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
dd                368 drivers/md/dm-dust.c 	dm_put_device(ti, dd->dev);
dd                369 drivers/md/dm-dust.c 	kfree(dd);
dd                375 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                376 drivers/md/dm-dust.c 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
dd                390 drivers/md/dm-dust.c 			dd->fail_read_on_bb = false;
dd                394 drivers/md/dm-dust.c 			dd->fail_read_on_bb = true;
dd                397 drivers/md/dm-dust.c 			spin_lock_irqsave(&dd->dust_lock, flags);
dd                399 drivers/md/dm-dust.c 			       dd->badblock_count);
dd                400 drivers/md/dm-dust.c 			spin_unlock_irqrestore(&dd->dust_lock, flags);
dd                403 drivers/md/dm-dust.c 			result = dust_clear_badblocks(dd);
dd                405 drivers/md/dm-dust.c 			if (!dd->quiet_mode)
dd                406 drivers/md/dm-dust.c 				dd->quiet_mode = true;
dd                408 drivers/md/dm-dust.c 				dd->quiet_mode = false;
dd                418 drivers/md/dm-dust.c 		sector_div(size, dd->sect_per_block);
dd                425 drivers/md/dm-dust.c 			result = dust_add_block(dd, block);
dd                427 drivers/md/dm-dust.c 			result = dust_remove_block(dd, block);
dd                429 drivers/md/dm-dust.c 			result = dust_query_block(dd, block);
dd                445 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                450 drivers/md/dm-dust.c 		DMEMIT("%s %s %s", dd->dev->name,
dd                451 drivers/md/dm-dust.c 		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
dd                452 drivers/md/dm-dust.c 		       dd->quiet_mode ? "quiet" : "verbose");
dd                456 drivers/md/dm-dust.c 		DMEMIT("%s %llu %u", dd->dev->name,
dd                457 drivers/md/dm-dust.c 		       (unsigned long long)dd->start, dd->blksz);
dd                464 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                465 drivers/md/dm-dust.c 	struct dm_dev *dev = dd->dev;
dd                472 drivers/md/dm-dust.c 	if (dd->start ||
dd                482 drivers/md/dm-dust.c 	struct dust_device *dd = ti->private;
dd                484 drivers/md/dm-dust.c 	return fn(ti, dd->dev, dd->start, ti->len, data);
dd               1460 drivers/md/dm-ioctl.c 	struct dm_dev_internal *dd;
dd               1485 drivers/md/dm-ioctl.c 	list_for_each_entry (dd, dm_table_get_devices(table), list)
dd               1486 drivers/md/dm-ioctl.c 		deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
dd                222 drivers/md/dm-table.c 		struct dm_dev_internal *dd =
dd                225 drivers/md/dm-table.c 		       dm_device_name(md), dd->dm_dev->name);
dd                226 drivers/md/dm-table.c 		dm_put_table_device(md, dd->dm_dev);
dd                227 drivers/md/dm-table.c 		kfree(dd);
dd                267 drivers/md/dm-table.c 	struct dm_dev_internal *dd;
dd                269 drivers/md/dm-table.c 	list_for_each_entry (dd, l, list)
dd                270 drivers/md/dm-table.c 		if (dd->dm_dev->bdev->bd_dev == dev)
dd                271 drivers/md/dm-table.c 			return dd;
dd                383 drivers/md/dm-table.c static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
dd                389 drivers/md/dm-table.c 	old_dev = dd->dm_dev;
dd                391 drivers/md/dm-table.c 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
dd                392 drivers/md/dm-table.c 				dd->dm_dev->mode | new_mode, &new_dev);
dd                396 drivers/md/dm-table.c 	dd->dm_dev = new_dev;
dd                431 drivers/md/dm-table.c 	struct dm_dev_internal *dd;
dd                440 drivers/md/dm-table.c 	dd = find_device(&t->devices, dev);
dd                441 drivers/md/dm-table.c 	if (!dd) {
dd                442 drivers/md/dm-table.c 		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
dd                443 drivers/md/dm-table.c 		if (!dd)
dd                446 drivers/md/dm-table.c 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
dd                447 drivers/md/dm-table.c 			kfree(dd);
dd                451 drivers/md/dm-table.c 		refcount_set(&dd->count, 1);
dd                452 drivers/md/dm-table.c 		list_add(&dd->list, &t->devices);
dd                455 drivers/md/dm-table.c 	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
dd                456 drivers/md/dm-table.c 		r = upgrade_mode(dd, mode, t->md);
dd                460 drivers/md/dm-table.c 	refcount_inc(&dd->count);
dd                462 drivers/md/dm-table.c 	*result = dd->dm_dev;
dd                503 drivers/md/dm-table.c 	struct dm_dev_internal *dd;
dd                505 drivers/md/dm-table.c 	list_for_each_entry(dd, devices, list) {
dd                506 drivers/md/dm-table.c 		if (dd->dm_dev == d) {
dd                516 drivers/md/dm-table.c 	if (refcount_dec_and_test(&dd->count)) {
dd                518 drivers/md/dm-table.c 		list_del(&dd->list);
dd                519 drivers/md/dm-table.c 		kfree(dd);
dd               1203 drivers/md/dm-table.c 	struct dm_dev_internal *dd = NULL;
dd               1213 drivers/md/dm-table.c 	list_for_each_entry(dd, devices, list) {
dd               1214 drivers/md/dm-table.c 		template_disk = dd->dm_dev->bdev->bd_disk;
dd               2089 drivers/md/dm-table.c 	struct dm_dev_internal *dd;
dd               2094 drivers/md/dm-table.c 	list_for_each_entry(dd, devices, list) {
dd               2095 drivers/md/dm-table.c 		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
dd               2103 drivers/md/dm-table.c 				     bdevname(dd->dm_dev->bdev, b));
dd               2109 drivers/md/raid5-cache.c 		int dd;
dd               2138 drivers/md/raid5-cache.c 				conf, le64_to_cpu(payload->location), 0, &dd,
dd                269 drivers/media/platform/exynos4-is/fimc-lite-reg.c 	if (dev->dd->max_dma_bufs == 1)
dd                288 drivers/media/platform/exynos4-is/fimc-lite-reg.c 	if (dev->dd->max_dma_bufs == 1)
dd                554 drivers/media/platform/exynos4-is/fimc-lite.c 	struct flite_drvdata *dd = fimc->dd;
dd                559 drivers/media/platform/exynos4-is/fimc-lite.c 		v4l_bound_align_image(&mf->width, 8, dd->max_width,
dd                560 drivers/media/platform/exynos4-is/fimc-lite.c 				ffs(dd->out_width_align) - 1,
dd                561 drivers/media/platform/exynos4-is/fimc-lite.c 				&mf->height, 0, dd->max_height, 0, 0);
dd                611 drivers/media/platform/exynos4-is/fimc-lite.c 	r->left = round_down(r->left, fimc->dd->win_hor_offs_align);
dd                631 drivers/media/platform/exynos4-is/fimc-lite.c 	r->left = round_down(r->left, fimc->dd->out_hor_offs_align);
dd                694 drivers/media/platform/exynos4-is/fimc-lite.c 	struct flite_drvdata *dd = fimc->dd;
dd                715 drivers/media/platform/exynos4-is/fimc-lite.c 	v4l_bound_align_image(&pixm->width, 8, dd->max_width,
dd                716 drivers/media/platform/exynos4-is/fimc-lite.c 			      ffs(dd->out_width_align) - 1,
dd                717 drivers/media/platform/exynos4-is/fimc-lite.c 			      &pixm->height, 0, dd->max_height, 0, 0);
dd               1484 drivers/media/platform/exynos4-is/fimc-lite.c 	fimc->dd = drv_data;
dd                143 drivers/media/platform/exynos4-is/fimc-lite.h 	struct flite_drvdata	*dd;
dd                137 drivers/misc/mic/vop/vop_main.h 	struct mic_device_desc *dd;
dd                151 drivers/misc/mic/vop/vop_main.h 	return !!vdev->dd->status;
dd                 31 drivers/misc/mic/vop/vop_vringh.c 	if (!vdev->dd || !vdev->dd->type) {
dd                 37 drivers/misc/mic/vop/vop_vringh.c 	if (vdev->dd->type == -1) {
dd                 58 drivers/misc/mic/vop/vop_vringh.c 	struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
dd                 62 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++) {
dd                 88 drivers/misc/mic/vop/vop_vringh.c 		__func__, vdev->dd->status, vdev->virtio_id);
dd                 90 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++)
dd                 98 drivers/misc/mic/vop/vop_vringh.c 	vdev->dd->status = 0;
dd                102 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++) {
dd                111 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++)
dd                163 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++)
dd                166 drivers/misc/mic/vop/vop_vringh.c 	if (db == -1 || vdev->dd->type == -1) {
dd                171 drivers/misc/mic/vop/vop_vringh.c 	memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
dd                187 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++)
dd                264 drivers/misc/mic/vop/vop_vringh.c 	struct mic_device_desc *dd = NULL;
dd                279 drivers/misc/mic/vop/vop_vringh.c 	ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
dd                286 drivers/misc/mic/vop/vop_vringh.c 	vop_init_device_ctrl(vdev, dd);
dd                288 drivers/misc/mic/vop/vop_vringh.c 	vdev->dd = dd;
dd                290 drivers/misc/mic/vop/vop_vringh.c 	vqconfig = mic_vq_config(dd);
dd                293 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < dd->num_vq; i++) {
dd                326 drivers/misc/mic/vop/vop_vringh.c 				       *(u32 *)mic_vq_features(vdev->dd),
dd                370 drivers/misc/mic/vop/vop_vringh.c 	dd->type = type;
dd                378 drivers/misc/mic/vop/vop_vringh.c 	dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
dd                381 drivers/misc/mic/vop/vop_vringh.c 	vqconfig = mic_vq_config(dd);
dd                432 drivers/misc/mic/vop/vop_vringh.c 	vqconfig = mic_vq_config(vdev->dd);
dd                433 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++) {
dd                455 drivers/misc/mic/vop/vop_vringh.c 	vdev->dd->type = -1;
dd                837 drivers/misc/mic/vop/vop_vringh.c 	if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
dd                928 drivers/misc/mic/vop/vop_vringh.c 		struct mic_device_desc dd, *dd_config;
dd                930 drivers/misc/mic/vop/vop_vringh.c 		if (copy_from_user(&dd, argp, sizeof(dd)))
dd                933 drivers/misc/mic/vop/vop_vringh.c 		if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
dd                934 drivers/misc/mic/vop/vop_vringh.c 		    dd.num_vq > MIC_MAX_VRINGS)
dd                937 drivers/misc/mic/vop/vop_vringh.c 		dd_config = memdup_user(argp, mic_desc_size(&dd));
dd                942 drivers/misc/mic/vop/vop_vringh.c 		if (memcmp(&dd, dd_config, sizeof(dd))) {
dd                992 drivers/misc/mic/vop/vop_vringh.c 		buf = memdup_user(argp, vdev->dd->config_len);
dd               1059 drivers/misc/mic/vop/vop_vringh.c 	for (i = 0; i < vdev->dd->num_vq; i++) {
dd                 65 drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c 				  struct dentry *dd)
dd                 70 drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c 	debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
dd                830 drivers/net/fddi/skfp/h/skfbi.h #define	MDRW(dd)	outpw(FM_A(FM_MDRU),(unsigned int)((dd)>>16)) ;\
dd                831 drivers/net/fddi/skfp/h/skfbi.h 			outpw(FM_A(FM_MDRL),(unsigned int)(dd))
dd                 75 drivers/net/wireless/ath/ath9k/ath9k.h int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
dd                283 drivers/net/wireless/ath/ath9k/init.c int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
dd                308 drivers/net/wireless/ath/ath9k/init.c 	dd->dd_desc_len = desc_len * nbuf * ndesc;
dd                317 drivers/net/wireless/ath/ath9k/init.c 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
dd                322 drivers/net/wireless/ath/ath9k/init.c 			dd->dd_desc_len += dma_len;
dd                329 drivers/net/wireless/ath/ath9k/init.c 	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
dd                330 drivers/net/wireless/ath/ath9k/init.c 					  &dd->dd_desc_paddr, GFP_KERNEL);
dd                331 drivers/net/wireless/ath/ath9k/init.c 	if (!dd->dd_desc)
dd                334 drivers/net/wireless/ath/ath9k/init.c 	ds = dd->dd_desc;
dd                336 drivers/net/wireless/ath/ath9k/init.c 		name, ds, (u32) dd->dd_desc_len,
dd                337 drivers/net/wireless/ath/ath9k/init.c 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
dd                350 drivers/net/wireless/ath/ath9k/init.c 			bf->bf_daddr = DS2PHYS(dd, ds);
dd                361 drivers/net/wireless/ath/ath9k/init.c 						   ((caddr_t) dd->dd_desc +
dd                362 drivers/net/wireless/ath/ath9k/init.c 						dd->dd_desc_len));
dd                366 drivers/net/wireless/ath/ath9k/init.c 					bf->bf_daddr = DS2PHYS(dd, ds);
dd                381 drivers/net/wireless/ath/ath9k/init.c 			bf->bf_daddr = DS2PHYS(dd, ds);
dd                392 drivers/net/wireless/ath/ath9k/init.c 						   ((caddr_t) dd->dd_desc +
dd                393 drivers/net/wireless/ath/ath9k/init.c 						dd->dd_desc_len));
dd                397 drivers/net/wireless/ath/ath9k/init.c 					bf->bf_daddr = DS2PHYS(dd, ds);
dd               2733 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_descdma *dd = &sc->txsdma;
dd               2736 drivers/net/wireless/ath/ath9k/xmit.c 	dd->dd_desc_len = size * txs_len;
dd               2737 drivers/net/wireless/ath/ath9k/xmit.c 	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
dd               2738 drivers/net/wireless/ath/ath9k/xmit.c 					  &dd->dd_desc_paddr, GFP_KERNEL);
dd               2739 drivers/net/wireless/ath/ath9k/xmit.c 	if (!dd->dd_desc)
dd                154 drivers/net/wireless/ath/wil6210/pmc.c 		struct vring_tx_desc dd = {}, *d = &dd;
dd                224 drivers/net/wireless/ath/wil6210/txrx.c 			struct vring_tx_desc dd, *d = &dd;
dd                242 drivers/net/wireless/ath/wil6210/txrx.c 			struct vring_rx_desc dd, *d = &dd;
dd                272 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_rx_desc dd, *d = &dd;
dd               2037 drivers/net/wireless/ath/wil6210/txrx.c 	struct vring_tx_desc dd, *d = &dd;
dd               2475 drivers/net/wireless/ath/wil6210/txrx.c 			struct vring_tx_desc dd, *d = &dd;
dd                176 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_rx_enhanced_desc dd, *d = &dd;
dd                465 drivers/net/wireless/ath/wil6210/txrx_edma.c 		struct wil_tx_enhanced_desc dd, *d = &dd;
dd               1230 drivers/net/wireless/ath/wil6210/txrx_edma.c 			struct wil_tx_enhanced_desc dd, *d = &dd;
dd               1535 drivers/net/wireless/ath/wil6210/txrx_edma.c 		struct wil_tx_enhanced_desc dd, *d = &dd;
dd                301 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static bool dma64_dd_parity(struct dma64desc *dd)
dd                303 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c 	return parity32(dd->addrlow ^ dd->addrhigh ^ dd->ctrl1 ^ dd->ctrl2);
dd                 54 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                 56 drivers/parport/parport_ax88796.c 	return readb(dd->spp_data);
dd                 62 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                 64 drivers/parport/parport_ax88796.c 	writeb(data, dd->spp_data);
dd                 70 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                 71 drivers/parport/parport_ax88796.c 	unsigned int cpr = readb(dd->spp_cpr);
dd                 92 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                 93 drivers/parport/parport_ax88796.c 	unsigned int cpr = readb(dd->spp_cpr);
dd                109 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr);
dd                110 drivers/parport/parport_ax88796.c 	writeb(cpr, dd->spp_cpr);
dd                113 drivers/parport/parport_ax88796.c 		dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n",
dd                121 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                122 drivers/parport/parport_ax88796.c 	unsigned int status = readb(dd->spp_spr);
dd                147 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                150 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n",
dd                160 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                164 drivers/parport/parport_ax88796.c 	if (!dd->irq_enabled) {
dd                166 drivers/parport/parport_ax88796.c 		dd->irq_enabled = 1;
dd                174 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                178 drivers/parport/parport_ax88796.c 	if (dd->irq_enabled) {
dd                180 drivers/parport/parport_ax88796.c 		dd->irq_enabled = 0;
dd                188 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                189 drivers/parport/parport_ax88796.c 	void __iomem *cpr = dd->spp_cpr;
dd                197 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                198 drivers/parport/parport_ax88796.c 	void __iomem *cpr = dd->spp_cpr;
dd                206 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(d->port);
dd                210 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s);
dd                211 drivers/parport/parport_ax88796.c 	s->u.ax88796.cpr = readb(dd->spp_cpr);
dd                217 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                219 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
dd                220 drivers/parport/parport_ax88796.c 	s->u.ax88796.cpr = readb(dd->spp_cpr);
dd                226 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                228 drivers/parport/parport_ax88796.c 	dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
dd                229 drivers/parport/parport_ax88796.c 	writeb(s->u.ax88796.cpr, dd->spp_cpr);
dd                271 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd;
dd                279 drivers/parport/parport_ax88796.c 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
dd                280 drivers/parport/parport_ax88796.c 	if (!dd)
dd                293 drivers/parport/parport_ax88796.c 	dd->io = request_mem_region(res->start, size, pdev->name);
dd                294 drivers/parport/parport_ax88796.c 	if (dd->io == NULL) {
dd                300 drivers/parport/parport_ax88796.c 	dd->base = ioremap(res->start, size);
dd                301 drivers/parport/parport_ax88796.c 	if (dd->base == NULL) {
dd                311 drivers/parport/parport_ax88796.c 	pp = parport_register_port((unsigned long)dd->base, irq,
dd                321 drivers/parport/parport_ax88796.c 	pp->private_data = dd;
dd                322 drivers/parport/parport_ax88796.c 	dd->parport = pp;
dd                323 drivers/parport/parport_ax88796.c 	dd->dev = _dev;
dd                325 drivers/parport/parport_ax88796.c 	dd->spp_data = dd->base;
dd                326 drivers/parport/parport_ax88796.c 	dd->spp_spr  = dd->base + (spacing * 1);
dd                327 drivers/parport/parport_ax88796.c 	dd->spp_cpr  = dd->base + (spacing * 2);
dd                330 drivers/parport/parport_ax88796.c 	writeb(AX_CPR_STRB, dd->spp_cpr);
dd                340 drivers/parport/parport_ax88796.c 		dd->irq_enabled = 1;
dd                353 drivers/parport/parport_ax88796.c 	iounmap(dd->base);
dd                355 drivers/parport/parport_ax88796.c 	release_mem_region(dd->io->start, size);
dd                357 drivers/parport/parport_ax88796.c 	kfree(dd);
dd                364 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                368 drivers/parport/parport_ax88796.c 	iounmap(dd->base);
dd                369 drivers/parport/parport_ax88796.c 	release_mem_region(dd->io->start, resource_size(dd->io));
dd                370 drivers/parport/parport_ax88796.c 	kfree(dd);
dd                381 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                383 drivers/parport/parport_ax88796.c 	parport_ax88796_save_state(p, &dd->suspend);
dd                384 drivers/parport/parport_ax88796.c 	writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr);
dd                391 drivers/parport/parport_ax88796.c 	struct ax_drvdata *dd = pp_to_drv(p);
dd                393 drivers/parport/parport_ax88796.c 	parport_ax88796_restore_state(p, &dd->suspend);
dd               3392 drivers/scsi/aacraid/aachba.c 	struct aac_delete_disk dd;
dd               3399 drivers/scsi/aacraid/aachba.c 	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
dd               3402 drivers/scsi/aacraid/aachba.c 	if (dd.cnum >= dev->maximum_num_containers)
dd               3407 drivers/scsi/aacraid/aachba.c 	fsa_dev_ptr[dd.cnum].deleted = 1;
dd               3411 drivers/scsi/aacraid/aachba.c 	fsa_dev_ptr[dd.cnum].valid = 0;
dd               3417 drivers/scsi/aacraid/aachba.c 	struct aac_delete_disk dd;
dd               3424 drivers/scsi/aacraid/aachba.c 	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
dd               3427 drivers/scsi/aacraid/aachba.c 	if (dd.cnum >= dev->maximum_num_containers)
dd               3432 drivers/scsi/aacraid/aachba.c 	if (fsa_dev_ptr[dd.cnum].locked)
dd               3438 drivers/scsi/aacraid/aachba.c 		fsa_dev_ptr[dd.cnum].valid = 0;
dd               3439 drivers/scsi/aacraid/aachba.c 		fsa_dev_ptr[dd.cnum].devname[0] = '\0';
dd               1689 drivers/scsi/hpsa.c 	struct raid_map_disk_data *dd = &map->data[0];
dd               1718 drivers/scsi/hpsa.c 			if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
dd               3218 drivers/scsi/hpsa.c 	struct raid_map_disk_data *dd = &map_buff->data[0];
dd               3268 drivers/scsi/hpsa.c 			for (col = 0; col < disks_per_row; col++, dd++)
dd               3271 drivers/scsi/hpsa.c 					col, dd->ioaccel_handle,
dd               3272 drivers/scsi/hpsa.c 					dd->xor_mult[0], dd->xor_mult[1]);
dd               3275 drivers/scsi/hpsa.c 			for (col = 0; col < disks_per_row; col++, dd++)
dd               3278 drivers/scsi/hpsa.c 					col, dd->ioaccel_handle,
dd               3279 drivers/scsi/hpsa.c 					dd->xor_mult[0], dd->xor_mult[1]);
dd               5078 drivers/scsi/hpsa.c 	struct raid_map_disk_data *dd = &map->data[0];
dd               5359 drivers/scsi/hpsa.c 	disk_handle = dd[map_index].ioaccel_handle;
dd                782 drivers/scsi/libfc/fc_fcp.c 	struct fcp_txrdy *dd;
dd                818 drivers/scsi/libfc/fc_fcp.c 		dd = fc_frame_payload_get(fp, sizeof(*dd));
dd                819 drivers/scsi/libfc/fc_fcp.c 		WARN_ON(!dd);
dd                822 drivers/scsi/libfc/fc_fcp.c 				      (size_t) ntohl(dd->ft_data_ro),
dd                823 drivers/scsi/libfc/fc_fcp.c 				      (size_t) ntohl(dd->ft_burst_len));
dd               8481 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t version_show(struct device_driver *dd, char *buf)
dd               8488 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t release_date_show(struct device_driver *dd, char *buf)
dd               8495 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
dd               8501 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
dd               8507 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
dd               8512 drivers/scsi/megaraid/megaraid_sas_base.c static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
dd               8526 drivers/scsi/megaraid/megaraid_sas_base.c support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
dd               8534 drivers/scsi/megaraid/megaraid_sas_base.c support_pci_lane_margining_show(struct device_driver *dd, char *buf)
dd               2356 drivers/scsi/qla2xxx/qla_bsg.c 	struct qla_dport_diag *dd;
dd               2362 drivers/scsi/qla2xxx/qla_bsg.c 	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
dd               2363 drivers/scsi/qla2xxx/qla_bsg.c 	if (!dd) {
dd               2370 drivers/scsi/qla2xxx/qla_bsg.c 	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
dd               2373 drivers/scsi/qla2xxx/qla_bsg.c 	    vha, dd->buf, sizeof(dd->buf), dd->options);
dd               2376 drivers/scsi/qla2xxx/qla_bsg.c 		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
dd               2379 drivers/scsi/qla2xxx/qla_bsg.c 	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
dd               2388 drivers/scsi/qla2xxx/qla_bsg.c 	kfree(dd);
dd                652 drivers/staging/mt7621-dma/mtk-hsdma.c 	struct dma_device *dd;
dd                689 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd = &hsdma->ddev;
dd                690 drivers/staging/mt7621-dma/mtk-hsdma.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd                691 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->copy_align = HSDMA_ALIGN_SIZE;
dd                692 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
dd                693 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
dd                694 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->device_terminate_all = mtk_hsdma_terminate_all;
dd                695 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->device_tx_status = mtk_hsdma_tx_status;
dd                696 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->device_issue_pending = mtk_hsdma_issue_pending;
dd                697 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->dev = &pdev->dev;
dd                698 drivers/staging/mt7621-dma/mtk-hsdma.c 	dd->dev->dma_parms = &hsdma->dma_parms;
dd                699 drivers/staging/mt7621-dma/mtk-hsdma.c 	dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
dd                700 drivers/staging/mt7621-dma/mtk-hsdma.c 	INIT_LIST_HEAD(&dd->channels);
dd                705 drivers/staging/mt7621-dma/mtk-hsdma.c 	vchan_init(&chan->vchan, dd);
dd                714 drivers/staging/mt7621-dma/mtk-hsdma.c 	ret = dma_async_device_register(dd);
dd                732 drivers/staging/mt7621-dma/mtk-hsdma.c 	dma_async_device_unregister(dd);
dd                797 drivers/staging/ralink-gdma/ralink-gdma.c 	struct dma_device *dd;
dd                840 drivers/staging/ralink-gdma/ralink-gdma.c 	dd = &dma_dev->ddev;
dd                841 drivers/staging/ralink-gdma/ralink-gdma.c 	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dd                842 drivers/staging/ralink-gdma/ralink-gdma.c 	dma_cap_set(DMA_SLAVE, dd->cap_mask);
dd                843 drivers/staging/ralink-gdma/ralink-gdma.c 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
dd                844 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_free_chan_resources = gdma_dma_free_chan_resources;
dd                845 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
dd                846 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
dd                847 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
dd                848 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_config = gdma_dma_config;
dd                849 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_terminate_all = gdma_dma_terminate_all;
dd                850 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_tx_status = gdma_dma_tx_status;
dd                851 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->device_issue_pending = gdma_dma_issue_pending;
dd                853 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd                854 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd                855 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd                856 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
dd                858 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->dev = &pdev->dev;
dd                859 drivers/staging/ralink-gdma/ralink-gdma.c 	dd->dev->dma_parms = &dma_dev->dma_parms;
dd                860 drivers/staging/ralink-gdma/ralink-gdma.c 	dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
dd                861 drivers/staging/ralink-gdma/ralink-gdma.c 	INIT_LIST_HEAD(&dd->channels);
dd                867 drivers/staging/ralink-gdma/ralink-gdma.c 		vchan_init(&chan->vchan, dd);
dd                873 drivers/staging/ralink-gdma/ralink-gdma.c 	ret = dma_async_device_register(dd);
dd                891 drivers/staging/ralink-gdma/ralink-gdma.c 	dma_async_device_unregister(dd);
dd                935 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_usbd_dd_gad	*dd;
dd                937 drivers/usb/gadget/udc/lpc32xx_udc.c 	dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
dd                938 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (dd)
dd                939 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd->this_dma = dma;
dd                941 drivers/usb/gadget/udc/lpc32xx_udc.c 	return dd;
dd                945 drivers/usb/gadget/udc/lpc32xx_udc.c static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
dd                947 drivers/usb/gadget/udc/lpc32xx_udc.c 	dma_pool_free(udc->dd_cache, dd, dd->this_dma);
dd               1767 drivers/usb/gadget/udc/lpc32xx_udc.c 		struct lpc32xx_usbd_dd_gad *dd;
dd               1774 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd = udc_dd_alloc(udc);
dd               1775 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (!dd) {
dd               1779 drivers/usb/gadget/udc/lpc32xx_udc.c 		req->dd_desc_ptr = dd;
dd               1782 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd->dd_next_phy = dd->dd_next_v = 0;
dd               1783 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd->dd_buffer_addr = req->req.dma;
dd               1784 drivers/usb/gadget/udc/lpc32xx_udc.c 		dd->dd_status = 0;
dd               1788 drivers/usb/gadget/udc/lpc32xx_udc.c 			dd->dd_setup = DD_SETUP_ISO_EP |
dd               1791 drivers/usb/gadget/udc/lpc32xx_udc.c 			dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
dd               1793 drivers/usb/gadget/udc/lpc32xx_udc.c 				dd->iso_status[0] = req->req.length;
dd               1795 drivers/usb/gadget/udc/lpc32xx_udc.c 				dd->iso_status[0] = 0;
dd               1797 drivers/usb/gadget/udc/lpc32xx_udc.c 			dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
dd               1994 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct lpc32xx_usbd_dd_gad *dd;
dd               2005 drivers/usb/gadget/udc/lpc32xx_udc.c 	dd = req->dd_desc_ptr;
dd               2008 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
dd               2032 drivers/usb/gadget/udc/lpc32xx_udc.c 	status = dd->dd_status;
dd               2071 drivers/usb/gadget/udc/lpc32xx_udc.c 			req->req.actual = dd->iso_status[0] & 0xFFFF;
dd                187 drivers/video/backlight/otm3225a.c 	struct otm3225a_data *dd = lcd_get_data(ld);
dd                189 drivers/video/backlight/otm3225a.c 	if (power == dd->power)
dd                193 drivers/video/backlight/otm3225a.c 		otm3225a_write(dd->spi, display_off, ARRAY_SIZE(display_off));
dd                195 drivers/video/backlight/otm3225a.c 		otm3225a_write(dd->spi, display_on, ARRAY_SIZE(display_on));
dd                196 drivers/video/backlight/otm3225a.c 	dd->power = power;
dd                203 drivers/video/backlight/otm3225a.c 	struct otm3225a_data *dd = lcd_get_data(ld);
dd                205 drivers/video/backlight/otm3225a.c 	return dd->power;
dd                215 drivers/video/backlight/otm3225a.c 	struct otm3225a_data *dd;
dd                219 drivers/video/backlight/otm3225a.c 	dd = devm_kzalloc(dev, sizeof(struct otm3225a_data), GFP_KERNEL);
dd                220 drivers/video/backlight/otm3225a.c 	if (dd == NULL)
dd                223 drivers/video/backlight/otm3225a.c 	ld = devm_lcd_device_register(dev, dev_name(dev), dev, dd,
dd                228 drivers/video/backlight/otm3225a.c 	dd->spi = spi;
dd                229 drivers/video/backlight/otm3225a.c 	dd->ld = ld;
dd                230 drivers/video/backlight/otm3225a.c 	dev_set_drvdata(dev, dd);
dd                223 fs/hpfs/dnode.c 			struct dnode *dd;
dd                224 fs/hpfs/dnode.c 			if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) {
dd                225 fs/hpfs/dnode.c 				if (le32_to_cpu(dd->up) != dno || dd->root_dnode) {
dd                226 fs/hpfs/dnode.c 					dd->up = cpu_to_le32(dno);
dd                227 fs/hpfs/dnode.c 					dd->root_dnode = 0;
dd                911 fs/hpfs/dnode.c 			       dnode_secno *dd, struct quad_buffer_head *qbh)
dd                928 fs/hpfs/dnode.c 			if (dd) *dd = dno;
dd                527 net/decnet/dn_nsp_out.c 			int ddl, unsigned char *dd, __le16 rem, __le16 loc)
dd                554 net/decnet/dn_nsp_out.c 		memcpy(msg, dd, ddl);
dd                 70 samples/mic/mpssd/mpssd.c 	struct mic_device_desc dd;
dd                 75 samples/mic/mpssd/mpssd.c 	.dd = {
dd                 90 samples/mic/mpssd/mpssd.c 	struct mic_device_desc dd;
dd                 95 samples/mic/mpssd/mpssd.c 	.dd = {
dd                122 samples/mic/mpssd/mpssd.c 	struct mic_device_desc dd;
dd                127 samples/mic/mpssd/mpssd.c 	.dd = {
dd                561 samples/mic/mpssd/mpssd.c 		virtnet_dev_page.dd.num_vq)) {
dd                783 samples/mic/mpssd/mpssd.c 		virtcons_dev_page.dd.num_vq)) {
dd                913 samples/mic/mpssd/mpssd.c add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd)
dd                925 samples/mic/mpssd/mpssd.c 	err = ioctl(fd, MIC_VIRTIO_ADD_DEVICE, dd);
dd                927 samples/mic/mpssd/mpssd.c 		mpsslog("Could not add %d %s\n", dd->type, strerror(errno));
dd                931 samples/mic/mpssd/mpssd.c 	switch (dd->type) {
dd               1053 samples/mic/mpssd/mpssd.c 	add_virtio_device(mic, &virtblk_dev_page.dd);
dd               1056 samples/mic/mpssd/mpssd.c 				  virtblk_dev_page.dd.num_vq)) {
dd               1073 samples/mic/mpssd/mpssd.c 		MIC_DEVICE_PAGE_END + vr_size * virtblk_dev_page.dd.num_vq);
dd               1668 samples/mic/mpssd/mpssd.c 		add_virtio_device(mic, &virtcons_dev_page.dd);
dd               1669 samples/mic/mpssd/mpssd.c 		add_virtio_device(mic, &virtnet_dev_page.dd);
dd                122 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                124 sound/soc/atmel/atmel-classd.c 	regmap_write(dd->regmap, CLASSD_THR, 0x0);
dd                126 sound/soc/atmel/atmel-classd.c 	return clk_prepare_enable(dd->pclk);
dd                133 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                135 sound/soc/atmel/atmel-classd.c 	clk_disable_unprepare(dd->pclk);
dd                163 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                166 sound/soc/atmel/atmel-classd.c 		dev_err(dd->dev,
dd                177 sound/soc/atmel/atmel-classd.c 	slave_config->dst_addr		= dd->phy_base + CLASSD_THR;
dd                251 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
dd                252 sound/soc/atmel/atmel-classd.c 	const struct atmel_classd_pdata *pdata = dd->pdata;
dd                304 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
dd                306 sound/soc/atmel/atmel-classd.c 	return regcache_sync(dd->regmap);
dd                325 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                327 sound/soc/atmel/atmel-classd.c 	return clk_prepare_enable(dd->gclk);
dd                381 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                404 sound/soc/atmel/atmel-classd.c 	clk_disable_unprepare(dd->gclk);
dd                406 sound/soc/atmel/atmel-classd.c 	ret = clk_set_rate(dd->gclk, sample_rates[best].gclk_rate);
dd                416 sound/soc/atmel/atmel-classd.c 	return clk_prepare_enable(dd->gclk);
dd                424 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
dd                426 sound/soc/atmel/atmel-classd.c 	clk_disable_unprepare(dd->gclk);
dd                499 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
dd                527 sound/soc/atmel/atmel-classd.c 	card->name	= dd->pdata->card_name;
dd                553 sound/soc/atmel/atmel-classd.c 	struct atmel_classd *dd;
dd                567 sound/soc/atmel/atmel-classd.c 	dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
dd                568 sound/soc/atmel/atmel-classd.c 	if (!dd)
dd                571 sound/soc/atmel/atmel-classd.c 	dd->pdata = pdata;
dd                573 sound/soc/atmel/atmel-classd.c 	dd->irq = platform_get_irq(pdev, 0);
dd                574 sound/soc/atmel/atmel-classd.c 	if (dd->irq < 0)
dd                575 sound/soc/atmel/atmel-classd.c 		return dd->irq;
dd                577 sound/soc/atmel/atmel-classd.c 	dd->pclk = devm_clk_get(dev, "pclk");
dd                578 sound/soc/atmel/atmel-classd.c 	if (IS_ERR(dd->pclk)) {
dd                579 sound/soc/atmel/atmel-classd.c 		ret = PTR_ERR(dd->pclk);
dd                584 sound/soc/atmel/atmel-classd.c 	dd->gclk = devm_clk_get(dev, "gclk");
dd                585 sound/soc/atmel/atmel-classd.c 	if (IS_ERR(dd->gclk)) {
dd                586 sound/soc/atmel/atmel-classd.c 		ret = PTR_ERR(dd->gclk);
dd                596 sound/soc/atmel/atmel-classd.c 	dd->phy_base = res->start;
dd                597 sound/soc/atmel/atmel-classd.c 	dd->dev = dev;
dd                599 sound/soc/atmel/atmel-classd.c 	dd->regmap = devm_regmap_init_mmio(dev, io_base,
dd                601 sound/soc/atmel/atmel-classd.c 	if (IS_ERR(dd->regmap)) {
dd                602 sound/soc/atmel/atmel-classd.c 		ret = PTR_ERR(dd->regmap);
dd                637 sound/soc/atmel/atmel-classd.c 	snd_soc_card_set_drvdata(card, dd);
dd                108 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
dd                111 sound/soc/atmel/atmel-pdmic.c 	ret = clk_prepare_enable(dd->gclk);
dd                115 sound/soc/atmel/atmel-pdmic.c 	ret =  clk_prepare_enable(dd->pclk);
dd                117 sound/soc/atmel/atmel-pdmic.c 		clk_disable_unprepare(dd->gclk);
dd                122 sound/soc/atmel/atmel-pdmic.c 	regmap_write(dd->regmap, PDMIC_CR, 0);
dd                124 sound/soc/atmel/atmel-pdmic.c 	dd->substream = substream;
dd                127 sound/soc/atmel/atmel-pdmic.c 	regmap_write(dd->regmap, PDMIC_IER, PDMIC_IER_OVRE);
dd                136 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
dd                139 sound/soc/atmel/atmel-pdmic.c 	regmap_write(dd->regmap, PDMIC_IDR, PDMIC_IDR_OVRE);
dd                141 sound/soc/atmel/atmel-pdmic.c 	clk_disable_unprepare(dd->gclk);
dd                142 sound/soc/atmel/atmel-pdmic.c 	clk_disable_unprepare(dd->pclk);
dd                149 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
dd                153 sound/soc/atmel/atmel-pdmic.c 	return regmap_read(dd->regmap, PDMIC_CDR, &val);
dd                201 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
dd                207 sound/soc/atmel/atmel-pdmic.c 		dev_err(dd->dev,
dd                212 sound/soc/atmel/atmel-pdmic.c 	slave_config->src_addr		= dd->phy_base + PDMIC_CDR;
dd                350 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(card);
dd                353 sound/soc/atmel/atmel-pdmic.c 		     (u32)(dd->pdata->mic_offset << PDMIC_DSPR1_OFFSET_SHIFT));
dd                377 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
dd                422 sound/soc/atmel/atmel-pdmic.c 	pclk_rate = clk_get_rate(dd->pclk);
dd                423 sound/soc/atmel/atmel-pdmic.c 	gclk_rate = clk_get_rate(dd->gclk);
dd                510 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = snd_soc_card_get_drvdata(card);
dd                538 sound/soc/atmel/atmel-pdmic.c 	card->name	= dd->pdata->card_name;
dd                544 sound/soc/atmel/atmel-pdmic.c static void atmel_pdmic_get_sample_rate(struct atmel_pdmic *dd,
dd                547 sound/soc/atmel/atmel-pdmic.c 	u32 mic_min_freq = dd->pdata->mic_min_freq;
dd                548 sound/soc/atmel/atmel-pdmic.c 	u32 mic_max_freq = dd->pdata->mic_max_freq;
dd                549 sound/soc/atmel/atmel-pdmic.c 	u32 clk_max_rate = (u32)(clk_get_rate(dd->pclk) >> 1);
dd                550 sound/soc/atmel/atmel-pdmic.c 	u32 clk_min_rate = (u32)(clk_get_rate(dd->gclk) >> 8);
dd                565 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd = (struct atmel_pdmic *)dev_id;
dd                569 sound/soc/atmel/atmel-pdmic.c 	regmap_read(dd->regmap, PDMIC_ISR, &pdmic_isr);
dd                572 sound/soc/atmel/atmel-pdmic.c 		regmap_update_bits(dd->regmap, PDMIC_CR, PDMIC_CR_ENPDM_MASK,
dd                575 sound/soc/atmel/atmel-pdmic.c 		snd_pcm_stop_xrun(dd->substream);
dd                595 sound/soc/atmel/atmel-pdmic.c 	struct atmel_pdmic *dd;
dd                607 sound/soc/atmel/atmel-pdmic.c 	dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
dd                608 sound/soc/atmel/atmel-pdmic.c 	if (!dd)
dd                611 sound/soc/atmel/atmel-pdmic.c 	dd->pdata = pdata;
dd                612 sound/soc/atmel/atmel-pdmic.c 	dd->dev = dev;
dd                614 sound/soc/atmel/atmel-pdmic.c 	dd->irq = platform_get_irq(pdev, 0);
dd                615 sound/soc/atmel/atmel-pdmic.c 	if (dd->irq < 0)
dd                616 sound/soc/atmel/atmel-pdmic.c 		return dd->irq;
dd                618 sound/soc/atmel/atmel-pdmic.c 	dd->pclk = devm_clk_get(dev, "pclk");
dd                619 sound/soc/atmel/atmel-pdmic.c 	if (IS_ERR(dd->pclk)) {
dd                620 sound/soc/atmel/atmel-pdmic.c 		ret = PTR_ERR(dd->pclk);
dd                625 sound/soc/atmel/atmel-pdmic.c 	dd->gclk = devm_clk_get(dev, "gclk");
dd                626 sound/soc/atmel/atmel-pdmic.c 	if (IS_ERR(dd->gclk)) {
dd                627 sound/soc/atmel/atmel-pdmic.c 		ret = PTR_ERR(dd->gclk);
dd                635 sound/soc/atmel/atmel-pdmic.c 	ret = clk_set_rate(dd->gclk, clk_get_rate(dd->pclk)/3);
dd                646 sound/soc/atmel/atmel-pdmic.c 	dd->phy_base = res->start;
dd                648 sound/soc/atmel/atmel-pdmic.c 	dd->regmap = devm_regmap_init_mmio(dev, io_base,
dd                650 sound/soc/atmel/atmel-pdmic.c 	if (IS_ERR(dd->regmap)) {
dd                651 sound/soc/atmel/atmel-pdmic.c 		ret = PTR_ERR(dd->regmap);
dd                656 sound/soc/atmel/atmel-pdmic.c 	ret =  devm_request_irq(dev, dd->irq, atmel_pdmic_interrupt, 0,
dd                657 sound/soc/atmel/atmel-pdmic.c 				"PDMIC", (void *)dd);
dd                660 sound/soc/atmel/atmel-pdmic.c 			dd->irq, ret);
dd                665 sound/soc/atmel/atmel-pdmic.c 	atmel_pdmic_get_sample_rate(dd, &rate_min, &rate_max);
dd                704 sound/soc/atmel/atmel-pdmic.c 	snd_soc_card_set_drvdata(card, dd);
dd                401 tools/bpf/bpftool/cfg.c 		struct dump_data dd = {};
dd                404 tools/bpf/bpftool/cfg.c 		kernel_syms_load(&dd);
dd                406 tools/bpf/bpftool/cfg.c 		dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
dd                407 tools/bpf/bpftool/cfg.c 		kernel_syms_destroy(&dd);
dd                418 tools/bpf/bpftool/prog.c 	struct dump_data dd = {};
dd                563 tools/bpf/bpftool/prog.c 				kernel_syms_load(&dd);
dd                573 tools/bpf/bpftool/prog.c 					sym = kernel_syms_search(&dd, ksyms[i]);
dd                629 tools/bpf/bpftool/prog.c 		kernel_syms_load(&dd);
dd                630 tools/bpf/bpftool/prog.c 		dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd                631 tools/bpf/bpftool/prog.c 		dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd                632 tools/bpf/bpftool/prog.c 		dd.btf = btf;
dd                633 tools/bpf/bpftool/prog.c 		dd.func_info = func_info;
dd                634 tools/bpf/bpftool/prog.c 		dd.finfo_rec_size = info->func_info_rec_size;
dd                635 tools/bpf/bpftool/prog.c 		dd.prog_linfo = prog_linfo;
dd                638 tools/bpf/bpftool/prog.c 			dump_xlated_json(&dd, buf, member_len, opcodes,
dd                641 tools/bpf/bpftool/prog.c 			dump_xlated_plain(&dd, buf, member_len, opcodes,
dd                643 tools/bpf/bpftool/prog.c 		kernel_syms_destroy(&dd);
dd                 23 tools/bpf/bpftool/xlated_dumper.c void kernel_syms_load(struct dump_data *dd)
dd                 35 tools/bpf/bpftool/xlated_dumper.c 		tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
dd                 36 tools/bpf/bpftool/xlated_dumper.c 				   sizeof(*dd->sym_mapping));
dd                 39 tools/bpf/bpftool/xlated_dumper.c 			free(dd->sym_mapping);
dd                 40 tools/bpf/bpftool/xlated_dumper.c 			dd->sym_mapping = NULL;
dd                 44 tools/bpf/bpftool/xlated_dumper.c 		dd->sym_mapping = tmp;
dd                 45 tools/bpf/bpftool/xlated_dumper.c 		sym = &dd->sym_mapping[dd->sym_count];
dd                 50 tools/bpf/bpftool/xlated_dumper.c 			dd->address_call_base = sym->address;
dd                 56 tools/bpf/bpftool/xlated_dumper.c 			dd->sym_count++;
dd                 61 tools/bpf/bpftool/xlated_dumper.c 	qsort(dd->sym_mapping, dd->sym_count,
dd                 62 tools/bpf/bpftool/xlated_dumper.c 	      sizeof(*dd->sym_mapping), kernel_syms_cmp);
dd                 65 tools/bpf/bpftool/xlated_dumper.c void kernel_syms_destroy(struct dump_data *dd)
dd                 67 tools/bpf/bpftool/xlated_dumper.c 	free(dd->sym_mapping);
dd                 70 tools/bpf/bpftool/xlated_dumper.c struct kernel_sym *kernel_syms_search(struct dump_data *dd,
dd                 77 tools/bpf/bpftool/xlated_dumper.c 	return dd->sym_mapping ?
dd                 78 tools/bpf/bpftool/xlated_dumper.c 	       bsearch(&sym, dd->sym_mapping, dd->sym_count,
dd                 79 tools/bpf/bpftool/xlated_dumper.c 		       sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
dd                138 tools/bpf/bpftool/xlated_dumper.c static const char *print_call_pcrel(struct dump_data *dd,
dd                143 tools/bpf/bpftool/xlated_dumper.c 	if (!dd->nr_jited_ksyms)
dd                145 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                148 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                151 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                153 tools/bpf/bpftool/xlated_dumper.c 	return dd->scratch_buff;
dd                156 tools/bpf/bpftool/xlated_dumper.c static const char *print_call_helper(struct dump_data *dd,
dd                161 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                164 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                166 tools/bpf/bpftool/xlated_dumper.c 	return dd->scratch_buff;
dd                172 tools/bpf/bpftool/xlated_dumper.c 	struct dump_data *dd = private_data;
dd                173 tools/bpf/bpftool/xlated_dumper.c 	unsigned long address = dd->address_call_base + insn->imm;
dd                177 tools/bpf/bpftool/xlated_dumper.c 	    (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
dd                178 tools/bpf/bpftool/xlated_dumper.c 		address = dd->jited_ksyms[insn->imm];
dd                180 tools/bpf/bpftool/xlated_dumper.c 	sym = kernel_syms_search(dd, address);
dd                182 tools/bpf/bpftool/xlated_dumper.c 		return print_call_pcrel(dd, sym, address, insn);
dd                184 tools/bpf/bpftool/xlated_dumper.c 		return print_call_helper(dd, sym, address);
dd                191 tools/bpf/bpftool/xlated_dumper.c 	struct dump_data *dd = private_data;
dd                194 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                197 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                200 tools/bpf/bpftool/xlated_dumper.c 		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
dd                202 tools/bpf/bpftool/xlated_dumper.c 	return dd->scratch_buff;
dd                205 tools/bpf/bpftool/xlated_dumper.c void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
dd                208 tools/bpf/bpftool/xlated_dumper.c 	const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
dd                213 tools/bpf/bpftool/xlated_dumper.c 		.private_data	= dd,
dd                217 tools/bpf/bpftool/xlated_dumper.c 	struct btf *btf = dd->btf;
dd                224 tools/bpf/bpftool/xlated_dumper.c 	record = dd->func_info;
dd                243 tools/bpf/bpftool/xlated_dumper.c 				record = (void *)record + dd->finfo_rec_size;
dd                290 tools/bpf/bpftool/xlated_dumper.c void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
dd                293 tools/bpf/bpftool/xlated_dumper.c 	const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
dd                298 tools/bpf/bpftool/xlated_dumper.c 		.private_data	= dd,
dd                302 tools/bpf/bpftool/xlated_dumper.c 	struct btf *btf = dd->btf;
dd                308 tools/bpf/bpftool/xlated_dumper.c 	record = dd->func_info;
dd                322 tools/bpf/bpftool/xlated_dumper.c 				record = (void *)record + dd->finfo_rec_size;
dd                354 tools/bpf/bpftool/xlated_dumper.c void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
dd                361 tools/bpf/bpftool/xlated_dumper.c 		.private_data	= dd,
dd                 29 tools/bpf/bpftool/xlated_dumper.h void kernel_syms_load(struct dump_data *dd);
dd                 30 tools/bpf/bpftool/xlated_dumper.h void kernel_syms_destroy(struct dump_data *dd);
dd                 31 tools/bpf/bpftool/xlated_dumper.h struct kernel_sym *kernel_syms_search(struct dump_data *dd, unsigned long key);
dd                 32 tools/bpf/bpftool/xlated_dumper.h void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
dd                 34 tools/bpf/bpftool/xlated_dumper.h void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
dd                 36 tools/bpf/bpftool/xlated_dumper.h void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
dd                 92 tools/testing/selftests/proc/read.c 			DIR *dd;
dd                110 tools/testing/selftests/proc/read.c 			dd = fdopendir(fd);
dd                111 tools/testing/selftests/proc/read.c 			if (!dd)
dd                113 tools/testing/selftests/proc/read.c 			f(dd, level + 1);
dd                114 tools/testing/selftests/proc/read.c 			closedir(dd);