queue              58 arch/m68k/emu/nfblock.c 	struct request_queue *queue;
queue              62 arch/m68k/emu/nfblock.c static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
queue              64 arch/m68k/emu/nfblock.c 	struct nfhd_device *dev = queue->queuedata;
queue             121 arch/m68k/emu/nfblock.c 	dev->queue = blk_alloc_queue(GFP_KERNEL);
queue             122 arch/m68k/emu/nfblock.c 	if (dev->queue == NULL)
queue             125 arch/m68k/emu/nfblock.c 	dev->queue->queuedata = dev;
queue             126 arch/m68k/emu/nfblock.c 	blk_queue_make_request(dev->queue, nfhd_make_request);
queue             127 arch/m68k/emu/nfblock.c 	blk_queue_logical_block_size(dev->queue, bsize);
queue             139 arch/m68k/emu/nfblock.c 	dev->disk->queue = dev->queue;
queue             148 arch/m68k/emu/nfblock.c 	blk_cleanup_queue(dev->queue);
queue             191 arch/m68k/emu/nfblock.c 		blk_cleanup_queue(dev->queue);
queue             325 arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c 		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
queue             326 arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
queue             329 arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c 		pko_mem_queue_qos.s.qid = queue;
queue             437 arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c 		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
queue             438 arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
queue              95 arch/mips/cavium-octeon/executive/cvmx-helper-util.c static int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
queue             107 arch/mips/cavium-octeon/executive/cvmx-helper-util.c 	cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
queue             116 arch/mips/cavium-octeon/executive/cvmx-helper-util.c 	cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
queue             136 arch/mips/cavium-octeon/executive/cvmx-helper-util.c 	int queue;
queue             151 arch/mips/cavium-octeon/executive/cvmx-helper-util.c 	for (queue = 0; queue < 8; queue++)
queue             152 arch/mips/cavium-octeon/executive/cvmx-helper-util.c 		cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
queue              70 arch/mips/cavium-octeon/executive/cvmx-pko.c 	int queue;
queue              76 arch/mips/cavium-octeon/executive/cvmx-pko.c 	for (queue = 0; queue < num_queues; queue++) {
queue              82 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.index		= queue;
queue              83 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.qid		= base_queue + queue;
queue              85 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.tail		= (queue == (num_queues - 1));
queue              86 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.s_tail		= (queue == static_priority_end);
queue              88 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.static_q	= (queue <= static_priority_end);
queue              92 arch/mips/cavium-octeon/executive/cvmx-pko.c 				CVMX_CMD_QUEUE_PKO(base_queue + queue),
queue             101 arch/mips/cavium-octeon/executive/cvmx-pko.c 			num_queues, queue);
queue             104 arch/mips/cavium-octeon/executive/cvmx-pko.c 				CVMX_CMD_QUEUE_PKO(base_queue + queue));
queue             298 arch/mips/cavium-octeon/executive/cvmx-pko.c 	int queue;
queue             302 arch/mips/cavium-octeon/executive/cvmx-pko.c 	for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
queue             307 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.queue = queue & 0x7f;
queue             313 arch/mips/cavium-octeon/executive/cvmx-pko.c 			config1.s.qid7 = queue >> 7;
queue             317 arch/mips/cavium-octeon/executive/cvmx-pko.c 		cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
queue             345 arch/mips/cavium-octeon/executive/cvmx-pko.c 	uint64_t queue;
queue             374 arch/mips/cavium-octeon/executive/cvmx-pko.c 		for (queue = 0; queue < num_queues; queue++) {
queue             377 arch/mips/cavium-octeon/executive/cvmx-pko.c 			    && priority[queue] ==
queue             379 arch/mips/cavium-octeon/executive/cvmx-pko.c 				static_priority_base = queue;
queue             383 arch/mips/cavium-octeon/executive/cvmx-pko.c 			    && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
queue             384 arch/mips/cavium-octeon/executive/cvmx-pko.c 			    && queue)
queue             385 arch/mips/cavium-octeon/executive/cvmx-pko.c 				static_priority_end = queue - 1;
queue             388 arch/mips/cavium-octeon/executive/cvmx-pko.c 				 && queue == num_queues - 1)
queue             390 arch/mips/cavium-octeon/executive/cvmx-pko.c 				static_priority_end = queue;
queue             398 arch/mips/cavium-octeon/executive/cvmx-pko.c 			    && (int)queue > static_priority_end
queue             399 arch/mips/cavium-octeon/executive/cvmx-pko.c 			    && priority[queue] ==
queue             405 arch/mips/cavium-octeon/executive/cvmx-pko.c 					(int)queue, static_priority_end);
queue             436 arch/mips/cavium-octeon/executive/cvmx-pko.c 	for (queue = 0; queue < num_queues; queue++) {
queue             440 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config1.s.idx3 = queue >> 3;
queue             441 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config1.s.qid7 = (base_queue + queue) >> 7;
queue             444 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.tail = queue == (num_queues - 1);
queue             445 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.index = queue;
queue             447 arch/mips/cavium-octeon/executive/cvmx-pko.c 		config.s.queue = base_queue + queue;
queue             451 arch/mips/cavium-octeon/executive/cvmx-pko.c 			config.s.static_q = (int)queue <= static_priority_end;
queue             452 arch/mips/cavium-octeon/executive/cvmx-pko.c 			config.s.s_tail = (int)queue == static_priority_end;
queue             459 arch/mips/cavium-octeon/executive/cvmx-pko.c 		switch ((int)priority[queue]) {
queue             496 arch/mips/cavium-octeon/executive/cvmx-pko.c 				(unsigned long long)priority[queue]);
queue             505 arch/mips/cavium-octeon/executive/cvmx-pko.c 						      (base_queue + queue),
queue             535 arch/mips/cavium-octeon/executive/cvmx-pko.c 						  (base_queue + queue));
queue            1176 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t queue:7;
queue            1178 arch/mips/include/asm/octeon/cvmx-pko-defs.h 		uint64_t queue:7;
queue             152 arch/mips/include/asm/octeon/cvmx-pko.h 		uint64_t queue:9;
queue             157 arch/mips/include/asm/octeon/cvmx-pko.h 	        uint64_t queue:9;
queue             326 arch/mips/include/asm/octeon/cvmx-pko.h static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
queue             336 arch/mips/include/asm/octeon/cvmx-pko.h 	ptr.s.queue = queue;
queue             378 arch/mips/include/asm/octeon/cvmx-pko.h static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
queue             396 arch/mips/include/asm/octeon/cvmx-pko.h 		    (CVMX_TAG_SUBGROUP_MASK & queue);
queue             421 arch/mips/include/asm/octeon/cvmx-pko.h 	uint64_t queue,
queue             429 arch/mips/include/asm/octeon/cvmx-pko.h 	result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
queue             433 arch/mips/include/asm/octeon/cvmx-pko.h 		cvmx_pko_doorbell(port, queue, 2);
queue             464 arch/mips/include/asm/octeon/cvmx-pko.h 	uint64_t queue,
queue             473 arch/mips/include/asm/octeon/cvmx-pko.h 	result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
queue             477 arch/mips/include/asm/octeon/cvmx-pko.h 		cvmx_pko_doorbell(port, queue, 3);
queue             492 arch/powerpc/include/asm/fsl_hcalls.h static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
queue             503 arch/powerpc/include/asm/fsl_hcalls.h 	r3 = queue;
queue             142 arch/powerpc/kvm/mpic.c 	unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
queue             284 arch/powerpc/kvm/mpic.c 	set_bit(n_IRQ, q->queue);
queue             289 arch/powerpc/kvm/mpic.c 	clear_bit(n_IRQ, q->queue);
queue             299 arch/powerpc/kvm/mpic.c 		irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
queue             142 arch/powerpc/sysdev/xive/common.c 		irq = xive_read_eq(&xc->queue[prio], just_peek);
queue             167 arch/powerpc/sysdev/xive/common.c 		q = &xc->queue[prio];
queue             255 arch/powerpc/sysdev/xive/common.c 		xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
queue             467 arch/powerpc/sysdev/xive/common.c 	struct xive_q *q = &xc->queue[xive_irq_priority];
queue             491 arch/powerpc/sysdev/xive/common.c 	struct xive_q *q = &xc->queue[xive_irq_priority];
queue            1317 arch/powerpc/sysdev/xive/common.c 	if (xc->queue[xive_irq_priority].qpage)
queue            1326 arch/powerpc/sysdev/xive/common.c 	if (!xc->queue[xive_irq_priority].qpage)
queue             217 arch/powerpc/sysdev/xive/native.c 	struct xive_q *q = &xc->queue[prio];
queue             230 arch/powerpc/sysdev/xive/native.c 	struct xive_q *q = &xc->queue[prio];
queue             512 arch/powerpc/sysdev/xive/spapr.c 	struct xive_q *q = &xc->queue[prio];
queue             526 arch/powerpc/sysdev/xive/spapr.c 	struct xive_q *q = &xc->queue[prio];
queue              27 arch/powerpc/sysdev/xive/xive-internal.h 	struct xive_q queue[XIVE_MAX_QUEUES];
queue             160 arch/um/drivers/ubd_kern.c 	struct request_queue *queue;
queue             822 arch/um/drivers/ubd_kern.c 		blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
queue             846 arch/um/drivers/ubd_kern.c 		ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE;
queue             847 arch/um/drivers/ubd_kern.c 		ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE;
queue             848 arch/um/drivers/ubd_kern.c 		blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
queue             849 arch/um/drivers/ubd_kern.c 		blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
queue             850 arch/um/drivers/ubd_kern.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue);
queue             852 arch/um/drivers/ubd_kern.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
queue             863 arch/um/drivers/ubd_kern.c 	blk_cleanup_queue(ubd_dev->queue);
queue             898 arch/um/drivers/ubd_kern.c 	disk->queue = ubd_devs[unit].queue;
queue             938 arch/um/drivers/ubd_kern.c 	ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
queue             939 arch/um/drivers/ubd_kern.c 	if (IS_ERR(ubd_dev->queue)) {
queue             940 arch/um/drivers/ubd_kern.c 		err = PTR_ERR(ubd_dev->queue);
queue             944 arch/um/drivers/ubd_kern.c 	ubd_dev->queue->queuedata = ubd_dev;
queue             945 arch/um/drivers/ubd_kern.c 	blk_queue_write_cache(ubd_dev->queue, true, false);
queue             947 arch/um/drivers/ubd_kern.c 	blk_queue_max_segments(ubd_dev->queue, MAX_SG);
queue             971 arch/um/drivers/ubd_kern.c 	if (!(IS_ERR(ubd_dev->queue)))
queue             972 arch/um/drivers/ubd_kern.c 		blk_cleanup_queue(ubd_dev->queue);
queue            1315 arch/um/drivers/ubd_kern.c 	struct ubd *dev = hctx->queue->queuedata;
queue            1378 arch/um/drivers/ubd_kern.c 	struct ubd *ubd_dev = hctx->queue->queuedata;
queue             477 arch/x86/kvm/x86.c 	queue:
queue             552 arch/x86/kvm/x86.c 		goto queue;
queue              30 arch/xtensa/platforms/iss/simdisk.c 	struct request_queue *queue;
queue             270 arch/xtensa/platforms/iss/simdisk.c 	dev->queue = blk_alloc_queue(GFP_KERNEL);
queue             271 arch/xtensa/platforms/iss/simdisk.c 	if (dev->queue == NULL) {
queue             276 arch/xtensa/platforms/iss/simdisk.c 	blk_queue_make_request(dev->queue, simdisk_make_request);
queue             277 arch/xtensa/platforms/iss/simdisk.c 	dev->queue->queuedata = dev;
queue             287 arch/xtensa/platforms/iss/simdisk.c 	dev->gd->queue = dev->queue;
queue             297 arch/xtensa/platforms/iss/simdisk.c 	blk_cleanup_queue(dev->queue);
queue             298 arch/xtensa/platforms/iss/simdisk.c 	dev->queue = NULL;
queue             352 arch/xtensa/platforms/iss/simdisk.c 	if (dev->queue)
queue             353 arch/xtensa/platforms/iss/simdisk.c 		blk_cleanup_queue(dev->queue);
queue             572 block/bfq-cgroup.c 	blkg = blkg_lookup(blkcg, bfqd->queue);
queue             926 block/bfq-cgroup.c 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
queue            1215 block/bfq-cgroup.c 	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
queue            1219 block/bfq-cgroup.c 	return blkg_to_bfqg(bfqd->queue->root_blkg);
queue             233 block/bfq-iosched.c 	 (!blk_queue_nonrot(bfqd->queue) ||		\
queue             425 block/bfq-iosched.c 		blk_mq_run_hw_queues(bfqd->queue, true);
queue            2216 block/bfq-iosched.c 	struct request_queue *q = hctx->queue;
queue            3741 block/bfq-iosched.c 		if (blk_queue_nonrot(bfqd->queue))
queue            4082 block/bfq-iosched.c 		!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
queue            4116 block/bfq-iosched.c 		((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
queue            4314 block/bfq-iosched.c 			if (blk_queue_nonrot(bfqd->queue) &&
queue            4607 block/bfq-iosched.c 	bfq_dispatch_remove(bfqd->queue, rq);
queue            4641 block/bfq-iosched.c 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
queue            4653 block/bfq-iosched.c 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
queue            4793 block/bfq-iosched.c 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
queue            4810 block/bfq-iosched.c 	bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
queue            4980 block/bfq-iosched.c 		dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
queue            5152 block/bfq-iosched.c 				     bfqd->queue->node);
queue            5491 block/bfq-iosched.c 	struct request_queue *q = hctx->queue;
queue            5593 block/bfq-iosched.c 		blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
queue            6364 block/bfq-iosched.c 	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
queue            6396 block/bfq-iosched.c 	blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
queue            6465 block/bfq-iosched.c 	bfqd->queue = q;
queue            6481 block/bfq-iosched.c 	bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
queue            6519 block/bfq-iosched.c 	bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
queue            6520 block/bfq-iosched.c 		ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
queue            6521 block/bfq-iosched.c 	bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
queue             445 block/bfq-iosched.h 	struct request_queue *queue;
queue            1069 block/bfq-iosched.h 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
queue            1076 block/bfq-iosched.h 	blk_add_cgroup_trace_msg((bfqd)->queue,				\
queue            1085 block/bfq-iosched.h 	blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str,	\
queue            1094 block/bfq-iosched.h 	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
queue             134 block/bio-integrity.c 	    bvec_gap_to_prev(bio->bi_disk->queue,
queue             203 block/bio-integrity.c 	struct request_queue *q = bio->bi_disk->queue;
queue            1847 block/bio.c    		rq_qos_done_bio(bio->bi_disk->queue, bio);
queue            1863 block/bio.c    		trace_block_bio_complete(bio->bi_disk->queue, bio,
queue            2101 block/bio.c    	struct request_queue *q = bio->bi_disk->queue;
queue             808 block/blk-cgroup.c 	__acquires(rcu) __acquires(&disk->queue->queue_lock)
queue             819 block/blk-cgroup.c 	q = disk->queue;
queue             913 block/blk-cgroup.c 	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
queue             915 block/blk-cgroup.c 	spin_unlock_irq(&ctx->disk->queue->queue_lock);
queue             859 block/blk-core.c 		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
queue             879 block/blk-core.c 	q = bio->bi_disk->queue;
queue            1054 block/blk-core.c 		struct request_queue *q = bio->bi_disk->queue;
queue            1074 block/blk-core.c 				if (q == bio->bi_disk->queue)
queue            1110 block/blk-core.c 	struct request_queue *q = bio->bi_disk->queue;
queue            1158 block/blk-core.c 			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
queue             123 block/blk-integrity.c 	struct blk_integrity *b1 = &gd1->queue->integrity;
queue             124 block/blk-integrity.c 	struct blk_integrity *b2 = &gd2->queue->integrity;
queue             227 block/blk-integrity.c 	struct blk_integrity *bi = &disk->queue->integrity;
queue             239 block/blk-integrity.c 	struct blk_integrity *bi = &disk->queue->integrity;
queue             401 block/blk-integrity.c 	struct blk_integrity *bi = &disk->queue->integrity;
queue             406 block/blk-integrity.c 		ilog2(queue_logical_block_size(disk->queue));
queue             411 block/blk-integrity.c 	disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
queue             424 block/blk-integrity.c 	disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
queue             425 block/blk-integrity.c 	memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
queue            2221 block/blk-iocost.c 	ioc = q_to_ioc(disk->queue);
queue            2223 block/blk-iocost.c 		ret = blk_iocost_init(disk->queue);
queue            2226 block/blk-iocost.c 		ioc = q_to_ioc(disk->queue);
queue            2387 block/blk-iocost.c 	ioc = q_to_ioc(disk->queue);
queue            2389 block/blk-iocost.c 		ret = blk_iocost_init(disk->queue);
queue            2392 block/blk-iocost.c 		ioc = q_to_ioc(disk->queue);
queue             414 block/blk-mq-debugfs.c 	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
queue             463 block/blk-mq-debugfs.c 	struct request_queue *q = hctx->queue;
queue             480 block/blk-mq-debugfs.c 	struct request_queue *q = hctx->queue;
queue             497 block/blk-mq-debugfs.c 	struct request_queue *q = hctx->queue;
queue             514 block/blk-mq-debugfs.c 	struct request_queue *q = hctx->queue;
queue              30 block/blk-mq-pci.c 	unsigned int queue, cpu;
queue              32 block/blk-mq-pci.c 	for (queue = 0; queue < qmap->nr_queues; queue++) {
queue              33 block/blk-mq-pci.c 		mask = pci_irq_get_affinity(pdev, queue + offset);
queue              38 block/blk-mq-pci.c 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
queue              28 block/blk-mq-rdma.c 	unsigned int queue, cpu;
queue              30 block/blk-mq-rdma.c 	for (queue = 0; queue < map->nr_queues; queue++) {
queue              31 block/blk-mq-rdma.c 		mask = ib_get_vector_affinity(dev, first_vec + queue);
queue              36 block/blk-mq-rdma.c 			map->mq_map[cpu] = map->queue_offset + queue;
queue              90 block/blk-mq-sched.c 	struct request_queue *q = hctx->queue;
queue             136 block/blk-mq-sched.c 	struct request_queue *q = hctx->queue;
queue             172 block/blk-mq-sched.c 	struct request_queue *q = hctx->queue;
queue             448 block/blk-mq-sched.c 	struct request_queue *q = hctx->queue;
queue             457 block/blk-mq-sched.c 	e = hctx->queue->elevator;
queue              75 block/blk-mq-sched.h 	struct elevator_queue *e = hctx->queue->elevator;
queue              72 block/blk-mq-sysfs.c 	q = ctx->queue;
queue              95 block/blk-mq-sysfs.c 	q = ctx->queue;
queue             118 block/blk-mq-sysfs.c 	q = hctx->queue;
queue             142 block/blk-mq-sysfs.c 	q = hctx->queue;
queue             253 block/blk-mq-sysfs.c 	struct request_queue *q = hctx->queue;
queue             231 block/blk-mq-tag.c 	if (rq && rq->q == hctx->queue)
queue             505 block/blk-mq-tag.c 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
queue              28 block/blk-mq-virtio.c 	unsigned int queue, cpu;
queue              33 block/blk-mq-virtio.c 	for (queue = 0; queue < qmap->nr_queues; queue++) {
queue              34 block/blk-mq-virtio.c 		mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
queue              39 block/blk-mq-virtio.c 			qmap->mq_map[cpu] = qmap->queue_offset + queue;
queue             835 block/blk-mq.c 	if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
queue            1190 block/blk-mq.c 	if (hctx->queue->elevator)
queue            1429 block/blk-mq.c 	if (hctx->queue->nr_hw_queues == 1)
queue            1505 block/blk-mq.c 	need_run = !blk_queue_quiesced(hctx->queue) &&
queue            1650 block/blk-mq.c 	trace_block_rq_insert(hctx->queue, rq);
queue            1702 block/blk-mq.c 		trace_block_rq_insert(hctx->queue, rq);
queue            1935 block/blk-mq.c 	if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
queue            1936 block/blk-mq.c 		hctx->queue->mq_ops->commit_rqs(hctx);
queue            2247 block/blk-mq.c 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
queue            2372 block/blk-mq.c 	hctx->queue = q;
queue            2434 block/blk-mq.c 		__ctx->queue = q;
queue              35 block/blk-mq.h 	struct request_queue	*queue;
queue             194 block/blk-mq.h 	struct request_queue *q = hctx->queue;
queue             202 block/blk-mq.h 	struct request_queue *q = hctx->queue;
queue             656 block/blk-settings.c 	struct request_queue *t = disk->queue;
queue             942 block/blk-sysfs.c 	struct request_queue *q = disk->queue;
queue            1031 block/blk-sysfs.c 	struct request_queue *q = disk->queue;
queue             199 block/blk-throttle.c 	struct request_queue *queue;
queue             371 block/blk-throttle.c 	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
queue             374 block/blk-throttle.c 		blk_add_cgroup_trace_msg(__td->queue,			\
queue             377 block/blk-throttle.c 		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
queue             579 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
queue            1240 block/blk-throttle.c 	struct request_queue *q = td->queue;
queue            1308 block/blk-throttle.c 	struct request_queue *q = td->queue;
queue            1385 block/blk-throttle.c 			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
queue            1856 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
queue            1902 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
queue            2042 block/blk-throttle.c 	if (!blk_queue_nonrot(td->queue))
queue            2235 block/blk-throttle.c 	    !blk_queue_nonrot(td->queue))
queue            2353 block/blk-throttle.c 	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
queue            2397 block/blk-throttle.c 	td->queue = q;
queue              45 block/blk-timeout.c 	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
queue              57 block/blk-timeout.c 		struct request_queue *q = disk->queue;
queue             125 block/blk-zoned.c 	struct request_queue *q = disk->queue;
queue             455 block/blk-zoned.c 	struct request_queue *q = disk->queue;
queue             264 block/bsg-lib.c 	struct request_queue *q = hctx->queue;
queue              30 block/bsg.c    	struct request_queue *queue;
queue             212 block/bsg.c    	struct request_queue *q = bd->queue;
queue             252 block/bsg.c    	bd->queue = rq;
queue             271 block/bsg.c    		if (bd->queue == q) {
queue             297 block/bsg.c    	bd = __bsg_get_device(iminor(inode), bcd->queue);
queue             299 block/bsg.c    		bd = bsg_add_device(inode, bcd->queue, file);
queue             334 block/bsg.c    	int queue;
queue             336 block/bsg.c    	if (get_user(queue, uarg))
queue             338 block/bsg.c    	if (queue < 1)
queue             342 block/bsg.c    	bd->max_queue = queue;
queue             373 block/bsg.c    		return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
queue             375 block/bsg.c    		return bsg_sg_io(bd->queue, file->f_mode, uarg);
queue             435 block/bsg.c    	bcd->queue = q;
queue             671 block/genhd.c  	if (disk->queue->backing_dev_info->dev) {
queue             673 block/genhd.c  			  &disk->queue->backing_dev_info->dev->kobj,
queue             705 block/genhd.c  		elevator_init_mq(disk->queue);
queue             739 block/genhd.c  		ret = bdi_register_owner(disk->queue->backing_dev_info,
queue             753 block/genhd.c  	WARN_ON_ONCE(!blk_get_queue(disk->queue));
queue             804 block/genhd.c  	if (disk->queue) {
queue             810 block/genhd.c  			bdi_unregister(disk->queue->backing_dev_info);
queue            1171 block/genhd.c  	return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
queue            1180 block/genhd.c  	return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
queue            1332 block/genhd.c  	if (disk->queue)
queue            1333 block/genhd.c  		blk_put_queue(disk->queue);
queue            1383 block/genhd.c  		inflight = part_in_flight(gp->queue, hd);
queue             463 block/kyber-iosched.c 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
queue             569 block/kyber-iosched.c 	struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
queue             576 block/kyber-iosched.c 	merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
queue             801 block/kyber-iosched.c 	struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
queue             383 block/mq-deadline.c 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
queue             465 block/mq-deadline.c 	struct request_queue *q = hctx->queue;
queue             486 block/mq-deadline.c 	struct request_queue *q = hctx->queue;
queue             526 block/mq-deadline.c 	struct request_queue *q = hctx->queue;
queue             580 block/mq-deadline.c 	struct deadline_data *dd = hctx->queue->elevator->elevator_data;
queue             122 block/partition-generic.c 	struct request_queue *q = part_to_disk(p)->queue;
queue             153 block/partition-generic.c 	struct request_queue *q = part_to_disk(p)->queue;
queue             343 block/partition-generic.c 		queue_limit_alignment_offset(&disk->queue->limits, start);
queue             345 block/partition-generic.c 		queue_limit_discard_alignment(&disk->queue->limits, start);
queue             696 block/scsi_ioctl.c 	return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
queue             895 crypto/algapi.c void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
queue             897 crypto/algapi.c 	INIT_LIST_HEAD(&queue->list);
queue             898 crypto/algapi.c 	queue->backlog = &queue->list;
queue             899 crypto/algapi.c 	queue->qlen = 0;
queue             900 crypto/algapi.c 	queue->max_qlen = max_qlen;
queue             904 crypto/algapi.c int crypto_enqueue_request(struct crypto_queue *queue,
queue             909 crypto/algapi.c 	if (unlikely(queue->qlen >= queue->max_qlen)) {
queue             915 crypto/algapi.c 		if (queue->backlog == &queue->list)
queue             916 crypto/algapi.c 			queue->backlog = &request->list;
queue             919 crypto/algapi.c 	queue->qlen++;
queue             920 crypto/algapi.c 	list_add_tail(&request->list, &queue->list);
queue             927 crypto/algapi.c struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
queue             931 crypto/algapi.c 	if (unlikely(!queue->qlen))
queue             934 crypto/algapi.c 	queue->qlen--;
queue             936 crypto/algapi.c 	if (queue->backlog != &queue->list)
queue             937 crypto/algapi.c 		queue->backlog = queue->backlog->next;
queue             939 crypto/algapi.c 	request = queue->list.next;
queue              37 crypto/cryptd.c 	struct crypto_queue queue;
queue              47 crypto/cryptd.c 	struct cryptd_queue *queue;
queue              52 crypto/cryptd.c 	struct cryptd_queue *queue;
queue              57 crypto/cryptd.c 	struct cryptd_queue *queue;
queue              62 crypto/cryptd.c 	struct cryptd_queue *queue;
queue              95 crypto/cryptd.c static int cryptd_init_queue(struct cryptd_queue *queue,
queue             101 crypto/cryptd.c 	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
queue             102 crypto/cryptd.c 	if (!queue->cpu_queue)
queue             105 crypto/cryptd.c 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
queue             106 crypto/cryptd.c 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
queue             113 crypto/cryptd.c static void cryptd_fini_queue(struct cryptd_queue *queue)
queue             119 crypto/cryptd.c 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
queue             120 crypto/cryptd.c 		BUG_ON(cpu_queue->queue.qlen);
queue             122 crypto/cryptd.c 	free_percpu(queue->cpu_queue);
queue             125 crypto/cryptd.c static int cryptd_enqueue_request(struct cryptd_queue *queue,
queue             133 crypto/cryptd.c 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
queue             134 crypto/cryptd.c 	err = crypto_enqueue_request(&cpu_queue->queue, request);
queue             171 crypto/cryptd.c 	backlog = crypto_get_backlog(&cpu_queue->queue);
queue             172 crypto/cryptd.c 	req = crypto_dequeue_request(&cpu_queue->queue);
queue             183 crypto/cryptd.c 	if (cpu_queue->queue.qlen)
queue             191 crypto/cryptd.c 	return ictx->queue;
queue             344 crypto/cryptd.c 	struct cryptd_queue *queue;
queue             346 crypto/cryptd.c 	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
queue             350 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
queue             398 crypto/cryptd.c 				  struct cryptd_queue *queue)
queue             422 crypto/cryptd.c 	ctx->queue = queue;
queue             510 crypto/cryptd.c 	struct cryptd_queue *queue =
queue             516 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
queue             670 crypto/cryptd.c 			      struct cryptd_queue *queue)
queue             694 crypto/cryptd.c 	ctx->queue = queue;
queue             810 crypto/cryptd.c 	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
queue             814 crypto/cryptd.c 	return cryptd_enqueue_request(queue, &req->base);
queue             854 crypto/cryptd.c 			      struct cryptd_queue *queue)
queue             875 crypto/cryptd.c 	ctx->queue = queue;
queue             911 crypto/cryptd.c static struct cryptd_queue queue;
queue             923 crypto/cryptd.c 		return cryptd_create_skcipher(tmpl, tb, &queue);
queue             925 crypto/cryptd.c 		return cryptd_create_hash(tmpl, tb, &queue);
queue             927 crypto/cryptd.c 		return cryptd_create_aead(tmpl, tb, &queue);
queue            1129 crypto/cryptd.c 	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
queue            1140 crypto/cryptd.c 	cryptd_fini_queue(&queue);
queue            1149 crypto/cryptd.c 	cryptd_fini_queue(&queue);
queue              87 crypto/crypto_engine.c 	if (!crypto_queue_len(&engine->queue) || !engine->running) {
queue             112 crypto/crypto_engine.c 	backlog = crypto_get_backlog(&engine->queue);
queue             113 crypto/crypto_engine.c 	async_req = crypto_dequeue_request(&engine->queue);
queue             195 crypto/crypto_engine.c 	ret = crypto_enqueue_request(&engine->queue, req);
queue             397 crypto/crypto_engine.c 	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
queue             403 crypto/crypto_engine.c 	if (crypto_queue_len(&engine->queue) || engine->busy)
queue             448 crypto/crypto_engine.c 	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
queue            1061 drivers/acpi/osl.c 	struct workqueue_struct *queue;
queue            1098 drivers/acpi/osl.c 		queue = kacpi_notify_wq;
queue            1101 drivers/acpi/osl.c 		queue = kacpid_wq;
queue            1118 drivers/acpi/osl.c 	ret = queue_work_on(0, queue, &dpc->work);
queue             577 drivers/atm/firestream.c static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q)
queue             583 drivers/atm/firestream.c static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe)
queue             627 drivers/atm/firestream.c static void submit_queue (struct fs_dev *dev, struct queue *q, 
queue             654 drivers/atm/firestream.c static void submit_command (struct fs_dev *dev, struct queue *q, 
queue             666 drivers/atm/firestream.c static void process_return_queue (struct fs_dev *dev, struct queue *q)
queue             692 drivers/atm/firestream.c static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
queue             762 drivers/atm/firestream.c static void process_incoming (struct fs_dev *dev, struct queue *q)
queue            1393 drivers/atm/firestream.c static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
queue            1402 drivers/atm/firestream.c 		    queue, nentries);
queue            1409 drivers/atm/firestream.c 	write_fs (dev, Q_SA(queue), virt_to_bus(p));
queue            1410 drivers/atm/firestream.c 	write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1));
queue            1411 drivers/atm/firestream.c 	write_fs (dev, Q_WP(queue), virt_to_bus(p));
queue            1412 drivers/atm/firestream.c 	write_fs (dev, Q_RP(queue), virt_to_bus(p));
queue            1417 drivers/atm/firestream.c 		write_fs (dev, Q_CNF(queue), 0 ); 
queue            1422 drivers/atm/firestream.c 	txq->offset = queue; 
queue            1429 drivers/atm/firestream.c static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
queue            1434 drivers/atm/firestream.c 	fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
queue            1436 drivers/atm/firestream.c 	write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
queue            1437 drivers/atm/firestream.c 	write_fs (dev, FP_SA(queue),  0);
queue            1438 drivers/atm/firestream.c 	write_fs (dev, FP_EA(queue),  0);
queue            1439 drivers/atm/firestream.c 	write_fs (dev, FP_CTU(queue), 0);
queue            1440 drivers/atm/firestream.c 	write_fs (dev, FP_CNT(queue), 0);
queue            1442 drivers/atm/firestream.c 	fp->offset = queue; 
queue            1523 drivers/atm/firestream.c static void free_queue(struct fs_dev *dev, struct queue *txq)
queue             468 drivers/atm/firestream.h 	struct queue    hp_txq, lp_txq, tx_relq, st_q;
queue             470 drivers/atm/firestream.h 	struct queue    rx_rq[FS_NR_RX_QUEUES];
queue              95 drivers/atm/idt77252.c 		       struct sk_buff *, int queue);
queue             100 drivers/atm/idt77252.c static void add_rx_skb(struct idt77252_dev *, int queue,
queue             582 drivers/atm/idt77252.c sb_pool_add(struct idt77252_dev *card, struct sk_buff *skb, int queue)
queue             584 drivers/atm/idt77252.c 	struct sb_pool *pool = &card->sbpool[queue];
queue             595 drivers/atm/idt77252.c 	IDT77252_PRV_POOL(skb) = POOL_HANDLE(queue, index);
queue             604 drivers/atm/idt77252.c 	unsigned int queue, index;
queue             609 drivers/atm/idt77252.c 	queue = POOL_QUEUE(handle);
queue             610 drivers/atm/idt77252.c 	if (queue > 3)
queue             617 drivers/atm/idt77252.c 	card->sbpool[queue].skb[index] = NULL;
queue             623 drivers/atm/idt77252.c 	unsigned int queue, index;
queue             625 drivers/atm/idt77252.c 	queue = POOL_QUEUE(handle);
queue             626 drivers/atm/idt77252.c 	if (queue > 3)
queue             633 drivers/atm/idt77252.c 	return card->sbpool[queue].skb[index];
queue            1116 drivers/atm/idt77252.c 	__skb_queue_tail(&rpp->queue, skb);
queue            1144 drivers/atm/idt77252.c 		if (skb_queue_len(&rpp->queue) > 1) {
queue            1160 drivers/atm/idt77252.c 			skb_queue_walk(&rpp->queue, sb)
queue            1238 drivers/atm/idt77252.c 	struct sk_buff	*queue;
queue            1249 drivers/atm/idt77252.c 	queue = card->raw_cell_head;
queue            1250 drivers/atm/idt77252.c 	if (!queue)
queue            1253 drivers/atm/idt77252.c 	head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
queue            1256 drivers/atm/idt77252.c 	dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(queue),
queue            1257 drivers/atm/idt77252.c 				skb_end_offset(queue) - 16,
queue            1264 drivers/atm/idt77252.c 		header = le32_to_cpu(*(u32 *) &queue->data[0]);
queue            1280 drivers/atm/idt77252.c 				printk(" %02x", queue->data[i]);
queue            1323 drivers/atm/idt77252.c 		skb_put_data(sb, &(queue->data[16]), ATM_CELL_PAYLOAD);
queue            1331 drivers/atm/idt77252.c 		skb_pull(queue, 64);
queue            1333 drivers/atm/idt77252.c 		head = IDT77252_PRV_PADDR(queue)
queue            1334 drivers/atm/idt77252.c 					+ (queue->data - queue->head - 16);
queue            1336 drivers/atm/idt77252.c 		if (queue->len < 128) {
queue            1340 drivers/atm/idt77252.c 			head = le32_to_cpu(*(u32 *) &queue->data[0]);
queue            1341 drivers/atm/idt77252.c 			handle = le32_to_cpu(*(u32 *) &queue->data[4]);
queue            1344 drivers/atm/idt77252.c 			recycle_rx_skb(card, queue);
queue            1348 drivers/atm/idt77252.c 				queue = card->raw_cell_head;
queue            1350 drivers/atm/idt77252.c 							IDT77252_PRV_PADDR(queue),
queue            1351 drivers/atm/idt77252.c 							(skb_end_pointer(queue) -
queue            1352 drivers/atm/idt77252.c 							 queue->data),
queue            1786 drivers/atm/idt77252.c idt77252_fbq_level(struct idt77252_dev *card, int queue)
queue            1788 drivers/atm/idt77252.c 	return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f;
queue            1792 drivers/atm/idt77252.c idt77252_fbq_full(struct idt77252_dev *card, int queue)
queue            1794 drivers/atm/idt77252.c 	return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) == 0x0f;
queue            1798 drivers/atm/idt77252.c push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
queue            1810 drivers/atm/idt77252.c 	switch (queue) {
queue            1827 drivers/atm/idt77252.c 	if (idt77252_fbq_full(card, queue))
queue            1836 drivers/atm/idt77252.c 	writel(handle, card->fbq[queue]);
queue            1837 drivers/atm/idt77252.c 	writel(addr, card->fbq[queue]);
queue            1844 drivers/atm/idt77252.c add_rx_skb(struct idt77252_dev *card, int queue,
queue            1856 drivers/atm/idt77252.c 		if (sb_pool_add(card, skb, queue)) {
queue            1866 drivers/atm/idt77252.c 		if (push_rx_skb(card, skb, queue)) {
queue            1909 drivers/atm/idt77252.c 	skb_queue_head_init(&rpp->queue);
queue            1918 drivers/atm/idt77252.c 	skb_queue_walk_safe(&rpp->queue, skb, tmp)
queue            2520 drivers/atm/idt77252.c 		if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
queue            2910 drivers/atm/idt77252.c 			if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
queue             176 drivers/atm/idt77252.h 	struct sk_buff_head	queue;
queue             343 drivers/atm/idt77252.h #define POOL_HANDLE(queue, index)	(((queue + 1) << 16) | (index))
queue             225 drivers/atm/nicstar.c 	while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
queue             233 drivers/atm/nicstar.c 	while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
queue             238 drivers/atm/nicstar.c 	while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
queue             240 drivers/atm/nicstar.c 	while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
queue             643 drivers/atm/nicstar.c 	skb_queue_head_init(&card->hbpool.queue);
queue             657 drivers/atm/nicstar.c 		skb_queue_tail(&card->hbpool.queue, hb);
queue             662 drivers/atm/nicstar.c 	skb_queue_head_init(&card->lbpool.queue);
queue             676 drivers/atm/nicstar.c 		skb_queue_tail(&card->lbpool.queue, lb);
queue             698 drivers/atm/nicstar.c 	skb_queue_head_init(&card->sbpool.queue);
queue             712 drivers/atm/nicstar.c 		skb_queue_tail(&card->sbpool.queue, sb);
queue             728 drivers/atm/nicstar.c 	skb_queue_head_init(&card->iovpool.queue);
queue             742 drivers/atm/nicstar.c 		skb_queue_tail(&card->iovpool.queue, iovb);
queue             820 drivers/atm/nicstar.c 		while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
queue             825 drivers/atm/nicstar.c 		while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
queue             831 drivers/atm/nicstar.c 		while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
queue             836 drivers/atm/nicstar.c 		while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
queue            1002 drivers/atm/nicstar.c 				skb_unlink(handle1, &card->sbpool.queue);
queue            1004 drivers/atm/nicstar.c 				skb_unlink(handle2, &card->sbpool.queue);
queue            1012 drivers/atm/nicstar.c 				skb_unlink(handle1, &card->lbpool.queue);
queue            1014 drivers/atm/nicstar.c 				skb_unlink(handle2, &card->lbpool.queue);
queue            1183 drivers/atm/nicstar.c 			skb_queue_tail(&card->sbpool.queue, sb);
queue            1208 drivers/atm/nicstar.c 			skb_queue_tail(&card->lbpool.queue, lb);
queue            2062 drivers/atm/nicstar.c 		iovb = skb_dequeue(&(card->iovpool.queue));
queue            2078 drivers/atm/nicstar.c 				skb_queue_tail(&card->iovpool.queue, new_iovb);
queue            2219 drivers/atm/nicstar.c 			hb = skb_dequeue(&(card->hbpool.queue));
queue            2241 drivers/atm/nicstar.c 							       queue, new_hb);
queue            2251 drivers/atm/nicstar.c 					skb_queue_tail(&card->hbpool.queue,
queue            2262 drivers/atm/nicstar.c 							       queue, new_hb);
queue            2274 drivers/atm/nicstar.c 					skb_queue_tail(&card->hbpool.queue, hb);
queue            2341 drivers/atm/nicstar.c 		skb_queue_tail(&card->iovpool.queue, iovb);
queue            2349 drivers/atm/nicstar.c 	skb_unlink(sb, &card->sbpool.queue);
queue            2354 drivers/atm/nicstar.c 			skb_queue_tail(&card->sbpool.queue, new_sb);
queue            2364 drivers/atm/nicstar.c 			skb_queue_tail(&card->sbpool.queue, new_sb);
queue            2373 drivers/atm/nicstar.c 	skb_unlink(lb, &card->lbpool.queue);
queue            2378 drivers/atm/nicstar.c 			skb_queue_tail(&card->lbpool.queue, new_lb);
queue            2388 drivers/atm/nicstar.c 			skb_queue_tail(&card->lbpool.queue, new_lb);
queue            2579 drivers/atm/nicstar.c 				skb_queue_tail(&card->sbpool.queue, sb);
queue            2593 drivers/atm/nicstar.c 				skb_queue_tail(&card->lbpool.queue, lb);
queue            2604 drivers/atm/nicstar.c 				hb = skb_dequeue(&card->hbpool.queue);
queue            2623 drivers/atm/nicstar.c 				skb_queue_tail(&card->hbpool.queue, hb);
queue            2634 drivers/atm/nicstar.c 				iovb = skb_dequeue(&card->iovpool.queue);
queue            2653 drivers/atm/nicstar.c 				skb_queue_tail(&card->iovpool.queue, iovb);
queue             690 drivers/atm/nicstar.h 	struct sk_buff_head queue;
queue            1783 drivers/block/amiflop.c 	disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
queue            1785 drivers/block/amiflop.c 	if (IS_ERR(disk->queue)) {
queue            1786 drivers/block/amiflop.c 		disk->queue = NULL;
queue            1797 drivers/block/amiflop.c 	blk_cleanup_queue(disk->queue);
queue            1798 drivers/block/amiflop.c 	disk->queue = NULL;
queue             266 drivers/block/aoe/aoeblk.c 	struct aoedev *d = hctx->queue->queuedata;
queue             412 drivers/block/aoe/aoeblk.c 	d->blkq = gd->queue = q;
queue             117 drivers/block/aoe/aoechr.c 		struct sk_buff_head queue;
queue             118 drivers/block/aoe/aoechr.c 		__skb_queue_head_init(&queue);
queue             119 drivers/block/aoe/aoechr.c 		__skb_queue_tail(&queue, skb);
queue             120 drivers/block/aoe/aoechr.c 		aoenet_xmit(&queue);
queue             374 drivers/block/aoe/aoecmd.c 	struct sk_buff_head queue;
queue             402 drivers/block/aoe/aoecmd.c 		__skb_queue_head_init(&queue);
queue             403 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(&queue, skb);
queue             404 drivers/block/aoe/aoecmd.c 		aoenet_xmit(&queue);
queue             413 drivers/block/aoe/aoecmd.c aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
queue             433 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(queue, skb);
queue             455 drivers/block/aoe/aoecmd.c 	struct sk_buff_head queue;
queue             492 drivers/block/aoe/aoecmd.c 	__skb_queue_head_init(&queue);
queue             493 drivers/block/aoe/aoecmd.c 	__skb_queue_tail(&queue, skb);
queue             494 drivers/block/aoe/aoecmd.c 	aoenet_xmit(&queue);
queue             585 drivers/block/aoe/aoecmd.c 	struct sk_buff_head queue;
queue             618 drivers/block/aoe/aoecmd.c 		__skb_queue_head_init(&queue);
queue             619 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(&queue, skb);
queue             620 drivers/block/aoe/aoecmd.c 		aoenet_xmit(&queue);
queue            1369 drivers/block/aoe/aoecmd.c 	struct sk_buff_head queue;
queue            1371 drivers/block/aoe/aoecmd.c 	__skb_queue_head_init(&queue);
queue            1372 drivers/block/aoe/aoecmd.c 	aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
queue            1373 drivers/block/aoe/aoecmd.c 	aoenet_xmit(&queue);
queue            1533 drivers/block/aoe/aoecmd.c 	struct sk_buff_head queue;
queue            1600 drivers/block/aoe/aoecmd.c 		__skb_queue_head_init(&queue);
queue            1601 drivers/block/aoe/aoecmd.c 		__skb_queue_tail(&queue, sl);
queue            1602 drivers/block/aoe/aoecmd.c 		aoenet_xmit(&queue);
queue             113 drivers/block/aoe/aoenet.c aoenet_xmit(struct sk_buff_head *queue)
queue             118 drivers/block/aoe/aoenet.c 	skb_queue_walk_safe(queue, skb, tmp) {
queue             119 drivers/block/aoe/aoenet.c 		__skb_unlink(skb, queue);
queue             726 drivers/block/ataflop.c 	struct request_queue *q = unit[drive].disk->queue;
queue            1993 drivers/block/ataflop.c 		unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
queue            1996 drivers/block/ataflop.c 		if (IS_ERR(unit[i].disk->queue)) {
queue            1998 drivers/block/ataflop.c 			ret = PTR_ERR(unit[i].disk->queue);
queue            1999 drivers/block/ataflop.c 			unit[i].disk->queue = NULL;
queue            2053 drivers/block/ataflop.c 		blk_cleanup_queue(disk->queue);
queue            2107 drivers/block/ataflop.c 		blk_cleanup_queue(unit[i].disk->queue);
queue             440 drivers/block/brd.c 		brd->brd_disk->queue = brd->brd_queue;
queue             531 drivers/block/brd.c 		brd->brd_disk->queue = brd->brd_queue;
queue            2817 drivers/block/drbd/drbd_main.c 	disk->queue = q;
queue            1344 drivers/block/drbd/drbd_nl.c 		b = bdev->backing_bdev->bd_disk->queue;
queue            1389 drivers/block/drbd/drbd_nl.c 		local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
queue            1515 drivers/block/drbd/drbd_nl.c 	struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
queue             927 drivers/block/drbd/drbd_req.c 		bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
queue            2069 drivers/block/drbd/drbd_worker.c static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
queue            2071 drivers/block/drbd/drbd_worker.c 	spin_lock_irq(&queue->q_lock);
queue            2072 drivers/block/drbd/drbd_worker.c 	list_splice_tail_init(&queue->q, work_list);
queue            2073 drivers/block/drbd/drbd_worker.c 	spin_unlock_irq(&queue->q_lock);
queue            4563 drivers/block/floppy.c 		disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
queue            4566 drivers/block/floppy.c 		if (IS_ERR(disks[drive]->queue)) {
queue            4567 drivers/block/floppy.c 			err = PTR_ERR(disks[drive]->queue);
queue            4568 drivers/block/floppy.c 			disks[drive]->queue = NULL;
queue            4572 drivers/block/floppy.c 		blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
queue            4573 drivers/block/floppy.c 		blk_queue_max_hw_sectors(disks[drive]->queue, 64);
queue            4738 drivers/block/floppy.c 		if (disks[drive]->queue) {
queue            4740 drivers/block/floppy.c 			blk_cleanup_queue(disks[drive]->queue);
queue            4741 drivers/block/floppy.c 			disks[drive]->queue = NULL;
queue            4969 drivers/block/floppy.c 		blk_cleanup_queue(disks[drive]->queue);
queue            4978 drivers/block/floppy.c 			disks[drive]->queue = NULL;
queue            2089 drivers/block/loop.c 	disk->queue		= lo->lo_queue;
queue             151 drivers/block/mtip32xx/mtip32xx.c 		if (dd->queue)
queue             152 drivers/block/mtip32xx/mtip32xx.c 			blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
queue             165 drivers/block/mtip32xx/mtip32xx.c 	struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
queue             905 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_quiesce_queue(port->dd->queue);
queue             925 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(port->dd->queue);
queue             928 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(port->dd->queue);
queue             985 drivers/block/mtip32xx/mtip32xx.c 	rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
queue            2070 drivers/block/mtip32xx/mtip32xx.c 	nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
queue            2682 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_quiesce_queue(dd->queue);
queue            2694 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_unquiesce_queue(dd->queue);
queue            3425 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
queue            3449 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
queue            3486 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = hctx->queue->queuedata;
queue            3639 drivers/block/mtip32xx/mtip32xx.c 	dd->queue = blk_mq_init_queue(&dd->tags);
queue            3640 drivers/block/mtip32xx/mtip32xx.c 	if (IS_ERR(dd->queue)) {
queue            3647 drivers/block/mtip32xx/mtip32xx.c 	dd->disk->queue		= dd->queue;
queue            3648 drivers/block/mtip32xx/mtip32xx.c 	dd->queue->queuedata	= dd;
queue            3668 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
queue            3669 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
queue            3670 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
queue            3671 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_physical_block_size(dd->queue, 4096);
queue            3672 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_hw_sectors(dd->queue, 0xffff);
queue            3673 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_max_segment_size(dd->queue, 0x400000);
queue            3675 drivers/block/mtip32xx/mtip32xx.c 	blk_queue_io_min(dd->queue, 4096);
queue            3731 drivers/block/mtip32xx/mtip32xx.c 	blk_cleanup_queue(dd->queue);
queue            3801 drivers/block/mtip32xx/mtip32xx.c 	blk_freeze_queue_start(dd->queue);
queue            3802 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_quiesce_queue(dd->queue);
queue            3804 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_unquiesce_queue(dd->queue);
queue            3817 drivers/block/mtip32xx/mtip32xx.c 		if (dd->disk->queue) {
queue            3818 drivers/block/mtip32xx/mtip32xx.c 			blk_cleanup_queue(dd->queue);
queue            3820 drivers/block/mtip32xx/mtip32xx.c 			dd->queue = NULL;
queue            3857 drivers/block/mtip32xx/mtip32xx.c 		if (dd->disk->queue) {
queue            3858 drivers/block/mtip32xx/mtip32xx.c 			blk_cleanup_queue(dd->queue);
queue            3863 drivers/block/mtip32xx/mtip32xx.c 		dd->queue = NULL;
queue            4216 drivers/block/mtip32xx/mtip32xx.c 	blk_set_queue_dying(dd->queue);
queue             434 drivers/block/mtip32xx/mtip32xx.h 	struct request_queue *queue; /* Our request queue. */
queue             226 drivers/block/nbd.c 		q = disk->queue;
queue             305 drivers/block/nbd.c 		nbd->disk->queue->limits.discard_granularity = config->blksize;
queue             306 drivers/block/nbd.c 		nbd->disk->queue->limits.discard_alignment = config->blksize;
queue             307 drivers/block/nbd.c 		blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
queue             309 drivers/block/nbd.c 	blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
queue             310 drivers/block/nbd.c 	blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
queue             810 drivers/block/nbd.c 	blk_mq_quiesce_queue(nbd->disk->queue);
queue             812 drivers/block/nbd.c 	blk_mq_unquiesce_queue(nbd->disk->queue);
queue            1133 drivers/block/nbd.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
queue            1136 drivers/block/nbd.c 			blk_queue_write_cache(nbd->disk->queue, true, true);
queue            1138 drivers/block/nbd.c 			blk_queue_write_cache(nbd->disk->queue, true, false);
queue            1141 drivers/block/nbd.c 		blk_queue_write_cache(nbd->disk->queue, false, false);
queue            1214 drivers/block/nbd.c 		nbd->disk->queue->limits.discard_granularity = 0;
queue            1215 drivers/block/nbd.c 		nbd->disk->queue->limits.discard_alignment = 0;
queue            1216 drivers/block/nbd.c 		blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
queue            1217 drivers/block/nbd.c 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
queue            1346 drivers/block/nbd.c 		blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
queue            1707 drivers/block/nbd.c 	disk->queue = q;
queue            1712 drivers/block/nbd.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
queue            1713 drivers/block/nbd.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
queue            1714 drivers/block/nbd.c 	disk->queue->limits.discard_granularity = 0;
queue            1715 drivers/block/nbd.c 	disk->queue->limits.discard_alignment = 0;
queue            1716 drivers/block/nbd.c 	blk_queue_max_discard_sectors(disk->queue, 0);
queue            1717 drivers/block/nbd.c 	blk_queue_max_segment_size(disk->queue, UINT_MAX);
queue            1718 drivers/block/nbd.c 	blk_queue_max_segments(disk->queue, USHRT_MAX);
queue            1719 drivers/block/nbd.c 	blk_queue_max_hw_sectors(disk->queue, 65536);
queue            1720 drivers/block/nbd.c 	disk->queue->limits.max_sectors = 256;
queue            1544 drivers/block/null_blk_main.c 	disk->queue		= nullb->q;
queue             314 drivers/block/paride/pcd.c 		disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
queue             316 drivers/block/paride/pcd.c 		if (IS_ERR(disk->queue)) {
queue             317 drivers/block/paride/pcd.c 			disk->queue = NULL;
queue             323 drivers/block/paride/pcd.c 		disk->queue->queuedata = cd;
queue             324 drivers/block/paride/pcd.c 		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
queue             759 drivers/block/paride/pcd.c 		blk_cleanup_queue(cd->disk->queue);
queue             760 drivers/block/paride/pcd.c 		cd->disk->queue = NULL;
queue             816 drivers/block/paride/pcd.c 	struct pcd_unit *cd = hctx->queue->queuedata;
queue            1023 drivers/block/paride/pcd.c 			blk_cleanup_queue(cd->disk->queue);
queue            1055 drivers/block/paride/pcd.c 		blk_cleanup_queue(cd->disk->queue);
queue             405 drivers/block/paride/pd.c 		q = disk ? disk->queue : NULL;
queue             758 drivers/block/paride/pd.c 	struct pd_unit *disk = hctx->queue->queuedata;
queue             778 drivers/block/paride/pd.c 	rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
queue             784 drivers/block/paride/pd.c 	blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
queue             916 drivers/block/paride/pd.c 	p->queue = blk_mq_init_queue(&disk->tag_set);
queue             917 drivers/block/paride/pd.c 	if (IS_ERR(p->queue)) {
queue             919 drivers/block/paride/pd.c 		p->queue = NULL;
queue             923 drivers/block/paride/pd.c 	p->queue->queuedata = disk;
queue             924 drivers/block/paride/pd.c 	blk_queue_max_hw_sectors(p->queue, cluster);
queue             925 drivers/block/paride/pd.c 	blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
queue            1032 drivers/block/paride/pd.c 			blk_cleanup_queue(p->queue);
queue             300 drivers/block/paride/pf.c 		disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
queue             302 drivers/block/paride/pf.c 		if (IS_ERR(disk->queue)) {
queue             303 drivers/block/paride/pf.c 			disk->queue = NULL;
queue             309 drivers/block/paride/pf.c 		disk->queue->queuedata = pf;
queue             310 drivers/block/paride/pf.c 		blk_queue_max_segments(disk->queue, cluster);
queue             311 drivers/block/paride/pf.c 		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
queue             768 drivers/block/paride/pf.c 		blk_cleanup_queue(pf->disk->queue);
queue             769 drivers/block/paride/pf.c 		pf->disk->queue = NULL;
queue             874 drivers/block/paride/pf.c 	struct pf_unit *pf = hctx->queue->queuedata;
queue            1038 drivers/block/paride/pf.c 			blk_cleanup_queue(pf->disk->queue);
queue            1068 drivers/block/paride/pf.c 		blk_cleanup_queue(pf->disk->queue);
queue            1246 drivers/block/pktcdvd.c 		clear_bdi_congested(pd->disk->queue->backing_dev_info,
queue            2494 drivers/block/pktcdvd.c 	struct request_queue *q = pd->disk->queue;
queue            2752 drivers/block/pktcdvd.c 	disk->queue = blk_alloc_queue(GFP_KERNEL);
queue            2753 drivers/block/pktcdvd.c 	if (!disk->queue)
queue            2827 drivers/block/pktcdvd.c 	blk_cleanup_queue(pd->disk->queue);
queue              32 drivers/block/ps3disk.c 	struct request_queue *queue;
queue             198 drivers/block/ps3disk.c 	struct request_queue *q = hctx->queue;
queue             270 drivers/block/ps3disk.c 	blk_mq_run_hw_queues(priv->queue, true);
queue             400 drivers/block/ps3disk.c 	struct request_queue *queue;
queue             444 drivers/block/ps3disk.c 	queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1,
queue             446 drivers/block/ps3disk.c 	if (IS_ERR(queue)) {
queue             449 drivers/block/ps3disk.c 		error = PTR_ERR(queue);
queue             453 drivers/block/ps3disk.c 	priv->queue = queue;
queue             454 drivers/block/ps3disk.c 	queue->queuedata = dev;
queue             456 drivers/block/ps3disk.c 	blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
queue             457 drivers/block/ps3disk.c 	blk_queue_segment_boundary(queue, -1UL);
queue             458 drivers/block/ps3disk.c 	blk_queue_dma_alignment(queue, dev->blk_size-1);
queue             459 drivers/block/ps3disk.c 	blk_queue_logical_block_size(queue, dev->blk_size);
queue             461 drivers/block/ps3disk.c 	blk_queue_write_cache(queue, true, false);
queue             463 drivers/block/ps3disk.c 	blk_queue_max_segments(queue, -1);
queue             464 drivers/block/ps3disk.c 	blk_queue_max_segment_size(queue, dev->bounce_size);
queue             478 drivers/block/ps3disk.c 	gendisk->queue = queue;
queue             495 drivers/block/ps3disk.c 	blk_cleanup_queue(queue);
queue             521 drivers/block/ps3disk.c 	blk_cleanup_queue(priv->queue);
queue              70 drivers/block/ps3vram.c 	struct request_queue *queue;
queue             617 drivers/block/ps3vram.c 	struct request_queue *queue;
queue             740 drivers/block/ps3vram.c 	queue = blk_alloc_queue(GFP_KERNEL);
queue             741 drivers/block/ps3vram.c 	if (!queue) {
queue             747 drivers/block/ps3vram.c 	priv->queue = queue;
queue             748 drivers/block/ps3vram.c 	queue->queuedata = dev;
queue             749 drivers/block/ps3vram.c 	blk_queue_make_request(queue, ps3vram_make_request);
queue             750 drivers/block/ps3vram.c 	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
queue             751 drivers/block/ps3vram.c 	blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
queue             752 drivers/block/ps3vram.c 	blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
queue             765 drivers/block/ps3vram.c 	gendisk->queue = queue;
queue             777 drivers/block/ps3vram.c 	blk_cleanup_queue(queue);
queue             809 drivers/block/ps3vram.c 	blk_cleanup_queue(priv->queue);
queue            4922 drivers/block/rbd.c 	blk_cleanup_queue(rbd_dev->disk->queue);
queue            5187 drivers/block/rbd.c 	disk->queue = q;
queue            7130 drivers/block/rbd.c 	blk_put_queue(rbd_dev->disk->queue);
queue            7254 drivers/block/rbd.c 		blk_mq_freeze_queue(rbd_dev->disk->queue);
queue            7255 drivers/block/rbd.c 		blk_set_queue_dying(rbd_dev->disk->queue);
queue             591 drivers/block/rsxx/core.c 					     &card->ctrl[i].queue,
queue             697 drivers/block/rsxx/core.c 		if (list_empty(&card->ctrl[i].queue)) {
queue             126 drivers/block/rsxx/cregs.c 	if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
queue             130 drivers/block/rsxx/cregs.c 	card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
queue             184 drivers/block/rsxx/cregs.c 	list_add_tail(&cmd->list, &card->creg_ctrl.queue);
queue             320 drivers/block/rsxx/cregs.c 	list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
queue             705 drivers/block/rsxx/cregs.c 		list_add(&cmd->list, &card->creg_ctrl.queue);
queue             715 drivers/block/rsxx/cregs.c 	if (!list_empty(&card->creg_ctrl.queue))
queue             732 drivers/block/rsxx/cregs.c 	INIT_LIST_HEAD(&card->creg_ctrl.queue);
queue             747 drivers/block/rsxx/cregs.c 	list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
queue             101 drivers/block/rsxx/dev.c 	generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
queue             109 drivers/block/rsxx/dev.c 	generic_end_io_acct(card->queue, bio_op(bio),
queue             251 drivers/block/rsxx/dev.c 	card->queue = blk_alloc_queue(GFP_KERNEL);
queue             252 drivers/block/rsxx/dev.c 	if (!card->queue) {
queue             261 drivers/block/rsxx/dev.c 		blk_cleanup_queue(card->queue);
queue             268 drivers/block/rsxx/dev.c 		blk_queue_dma_alignment(card->queue, blk_size - 1);
queue             269 drivers/block/rsxx/dev.c 		blk_queue_logical_block_size(card->queue, blk_size);
queue             272 drivers/block/rsxx/dev.c 	blk_queue_make_request(card->queue, rsxx_make_request);
queue             273 drivers/block/rsxx/dev.c 	blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
queue             274 drivers/block/rsxx/dev.c 	blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue             276 drivers/block/rsxx/dev.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
queue             277 drivers/block/rsxx/dev.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
queue             279 drivers/block/rsxx/dev.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
queue             280 drivers/block/rsxx/dev.c 		blk_queue_max_discard_sectors(card->queue,
queue             282 drivers/block/rsxx/dev.c 		card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
queue             283 drivers/block/rsxx/dev.c 		card->queue->limits.discard_alignment   = RSXX_HW_BLK_SIZE;
queue             286 drivers/block/rsxx/dev.c 	card->queue->queuedata = card;
queue             294 drivers/block/rsxx/dev.c 	card->gendisk->queue = card->queue;
queue             307 drivers/block/rsxx/dev.c 	blk_cleanup_queue(card->queue);
queue             308 drivers/block/rsxx/dev.c 	card->queue->queuedata = NULL;
queue             270 drivers/block/rsxx/dma.c 	list_add(&dma->list, &ctrl->queue);
queue             370 drivers/block/rsxx/dma.c 		cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
queue             398 drivers/block/rsxx/dma.c 		if (list_empty(&ctrl->queue)) {
queue             409 drivers/block/rsxx/dma.c 		dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
queue             744 drivers/block/rsxx/dma.c 			list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
queue             825 drivers/block/rsxx/dma.c 	INIT_LIST_HEAD(&ctrl->queue);
queue            1005 drivers/block/rsxx/dma.c 		rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
queue            1061 drivers/block/rsxx/dma.c 		list_splice(&issued_dmas[i], &card->ctrl[i].queue);
queue              96 drivers/block/rsxx/rsxx_priv.h 	struct list_head		queue;
queue             127 drivers/block/rsxx/rsxx_priv.h 		struct list_head	queue;
queue             158 drivers/block/rsxx/rsxx_priv.h 	struct request_queue	*queue;
queue             218 drivers/block/skd_main.c 	struct request_queue *queue;
queue             632 drivers/block/skd_main.c 	n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
queue             712 drivers/block/skd_main.c 	blk_mq_start_hw_queues(skdev->queue);
queue            2279 drivers/block/skd_main.c 		blk_mq_stop_hw_queues(skdev->queue);
queue            2856 drivers/block/skd_main.c 	skdev->queue = q;
queue            2857 drivers/block/skd_main.c 	disk->queue = q;
queue            2873 drivers/block/skd_main.c 	blk_mq_stop_hw_queues(skdev->queue);
queue            3041 drivers/block/skd_main.c 	if (skdev->queue) {
queue            3042 drivers/block/skd_main.c 		blk_cleanup_queue(skdev->queue);
queue            3043 drivers/block/skd_main.c 		skdev->queue = NULL;
queue            3045 drivers/block/skd_main.c 			disk->queue = NULL;
queue             185 drivers/block/sunvdc.c 		blk_mq_start_stopped_hw_queues(port->disk->queue, true);
queue             537 drivers/block/sunvdc.c 	struct vdc_port *port = hctx->queue->queuedata;
queue             867 drivers/block/sunvdc.c 	g->queue = q;
queue            1078 drivers/block/sunvdc.c 		blk_mq_stop_hw_queues(port->disk->queue);
queue            1085 drivers/block/sunvdc.c 		cleanup_queue(port->disk->queue);
queue            1126 drivers/block/sunvdc.c 	struct request_queue *q = port->disk->queue;
queue            1183 drivers/block/sunvdc.c 	blk_mq_stop_hw_queues(port->disk->queue);
queue             527 drivers/block/swim.c 	struct floppy_state *fs = hctx->queue->queuedata;
queue             848 drivers/block/swim.c 		swd->unit[drive].disk->queue = q;
queue             849 drivers/block/swim.c 		blk_queue_bounce_limit(swd->unit[drive].disk->queue,
queue             851 drivers/block/swim.c 		swd->unit[drive].disk->queue->queuedata = &swd->unit[drive];
queue             878 drivers/block/swim.c 			if (disk->queue) {
queue             879 drivers/block/swim.c 				blk_cleanup_queue(disk->queue);
queue             880 drivers/block/swim.c 				disk->queue = NULL;
queue             957 drivers/block/swim.c 		blk_cleanup_queue(swd->unit[drive].disk->queue);
queue             311 drivers/block/swim3.c 	struct floppy_state *fs = hctx->queue->queuedata;
queue             825 drivers/block/swim3.c 	struct request_queue *q = disks[fs->index]->queue;
queue            1197 drivers/block/swim3.c 	disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
queue            1199 drivers/block/swim3.c 	if (IS_ERR(disk->queue)) {
queue            1200 drivers/block/swim3.c 		rc = PTR_ERR(disk->queue);
queue            1201 drivers/block/swim3.c 		disk->queue = NULL;
queue            1204 drivers/block/swim3.c 	blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
queue            1205 drivers/block/swim3.c 	disk->queue->queuedata = fs;
queue            1225 drivers/block/swim3.c 	blk_cleanup_queue(disk->queue);
queue            1226 drivers/block/swim3.c 	disk->queue = NULL;
queue             705 drivers/block/sx8.c 	struct request_queue *q = hctx->queue;
queue            1371 drivers/block/sx8.c 	disk->queue = q;
queue            1385 drivers/block/sx8.c 	if (disk->queue)
queue            1386 drivers/block/sx8.c 		blk_cleanup_queue(disk->queue);
queue             110 drivers/block/umem.c 	struct request_queue *queue;
queue             888 drivers/block/umem.c 	card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
queue             889 drivers/block/umem.c 	if (!card->queue)
queue             892 drivers/block/umem.c 	blk_queue_make_request(card->queue, mm_make_request);
queue             893 drivers/block/umem.c 	card->queue->queuedata = card;
queue            1031 drivers/block/umem.c 	blk_cleanup_queue(card->queue);
queue            1089 drivers/block/umem.c 		disk->queue = cards[i].queue;
queue             269 drivers/block/virtio_blk.c 		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
queue             275 drivers/block/virtio_blk.c 	struct virtio_blk *vblk = hctx->queue->queuedata;
queue             290 drivers/block/virtio_blk.c 	struct virtio_blk *vblk = hctx->queue->queuedata;
queue             343 drivers/block/virtio_blk.c 	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
queue             388 drivers/block/virtio_blk.c 	struct request_queue *q = vblk->disk->queue;
queue             400 drivers/block/virtio_blk.c 	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
queue             520 drivers/block/virtio_blk.c 	struct request_queue *q = vblk->disk->queue;
queue             681 drivers/block/virtio_blk.c 	blk_queue_write_cache(vblk->disk->queue, writeback, false);
queue             883 drivers/block/virtio_blk.c 	vblk->disk->queue = q;
queue            1009 drivers/block/virtio_blk.c 	blk_cleanup_queue(vblk->disk->queue);
queue            1041 drivers/block/virtio_blk.c 	blk_mq_quiesce_queue(vblk->disk->queue);
queue            1058 drivers/block/virtio_blk.c 	blk_mq_unquiesce_queue(vblk->disk->queue);
queue             887 drivers/block/xen-blkfront.c 	struct blkfront_info *info = hctx->queue->queuedata;
queue             994 drivers/block/xen-blkfront.c 	info->rq = gd->queue = rq;
queue             207 drivers/block/xsysace.c 	struct request_queue *queue;
queue             512 drivers/block/xsysace.c 		while ((req = ace_get_next_request(ace->queue)) != NULL)
queue             527 drivers/block/xsysace.c 		if (ace->id_req_count || ace_has_next_request(ace->queue)) {
queue             656 drivers/block/xsysace.c 		req = ace_get_next_request(ace->queue);
queue             867 drivers/block/xsysace.c 	struct ace_device *ace = hctx->queue->queuedata;
queue            1010 drivers/block/xsysace.c 	ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2,
queue            1012 drivers/block/xsysace.c 	if (IS_ERR(ace->queue)) {
queue            1013 drivers/block/xsysace.c 		rc = PTR_ERR(ace->queue);
queue            1014 drivers/block/xsysace.c 		ace->queue = NULL;
queue            1017 drivers/block/xsysace.c 	ace->queue->queuedata = ace;
queue            1019 drivers/block/xsysace.c 	blk_queue_logical_block_size(ace->queue, 512);
queue            1020 drivers/block/xsysace.c 	blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
queue            1033 drivers/block/xsysace.c 	ace->gd->queue = ace->queue;
queue            1092 drivers/block/xsysace.c 	ace->gd->queue = NULL;
queue            1095 drivers/block/xsysace.c 	blk_cleanup_queue(ace->queue);
queue            1112 drivers/block/xsysace.c 	if (ace->queue) {
queue            1113 drivers/block/xsysace.c 		blk_cleanup_queue(ace->queue);
queue             376 drivers/block/z2ram.c     z2ram_gendisk->queue = z2_queue;
queue             407 drivers/block/zram/zram_drv.c 	zram->disk->queue->backing_dev_info->capabilities |=
queue             527 drivers/block/zram/zram_drv.c 	zram->disk->queue->backing_dev_info->capabilities &=
queue            1510 drivers/block/zram/zram_drv.c 	struct request_queue *q = zram->disk->queue;
queue            1590 drivers/block/zram/zram_drv.c static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
queue            1592 drivers/block/zram/zram_drv.c 	struct zram *zram = queue->queuedata;
queue            1878 drivers/block/zram/zram_drv.c 	struct request_queue *queue;
queue            1894 drivers/block/zram/zram_drv.c 	queue = blk_alloc_queue(GFP_KERNEL);
queue            1895 drivers/block/zram/zram_drv.c 	if (!queue) {
queue            1902 drivers/block/zram/zram_drv.c 	blk_queue_make_request(queue, zram_make_request);
queue            1916 drivers/block/zram/zram_drv.c 	zram->disk->queue = queue;
queue            1917 drivers/block/zram/zram_drv.c 	zram->disk->queue->queuedata = zram;
queue            1924 drivers/block/zram/zram_drv.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
queue            1925 drivers/block/zram/zram_drv.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
queue            1931 drivers/block/zram/zram_drv.c 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
queue            1932 drivers/block/zram/zram_drv.c 	blk_queue_logical_block_size(zram->disk->queue,
queue            1934 drivers/block/zram/zram_drv.c 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
queue            1935 drivers/block/zram/zram_drv.c 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
queue            1936 drivers/block/zram/zram_drv.c 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
queue            1937 drivers/block/zram/zram_drv.c 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
queue            1938 drivers/block/zram/zram_drv.c 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
queue            1949 drivers/block/zram/zram_drv.c 		blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
queue            1951 drivers/block/zram/zram_drv.c 	zram->disk->queue->backing_dev_info->capabilities |=
queue            1962 drivers/block/zram/zram_drv.c 	blk_cleanup_queue(queue);
queue            1998 drivers/block/zram/zram_drv.c 	blk_cleanup_queue(zram->disk->queue);
queue            1507 drivers/cdrom/cdrom.c int media_changed(struct cdrom_device_info *cdi, int queue)
queue            1509 drivers/cdrom/cdrom.c 	unsigned int mask = (1 << (queue & 1));
queue            1518 drivers/cdrom/cdrom.c 		BUG_ON(!queue);	/* shouldn't be called from VFS path */
queue            2176 drivers/cdrom/cdrom.c 	struct request_queue *q = cdi->disk->queue;
queue             728 drivers/cdrom/gdrom.c 	gd.disk->queue = gd.gdrom_rq;
queue             113 drivers/char/apm-emulation.c 	struct apm_queue	queue;
queue             197 drivers/char/apm-emulation.c 			queue_add_event(&as->queue, event);
queue             212 drivers/char/apm-emulation.c 	if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
queue             215 drivers/char/apm-emulation.c 	wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
queue             217 drivers/char/apm-emulation.c 	while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
queue             218 drivers/char/apm-emulation.c 		event = queue_get_event(&as->queue);
queue             245 drivers/char/apm-emulation.c 	return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
queue             538 drivers/char/apm-emulation.c 				queue_add_event(&as->queue, apm_event);
queue              65 drivers/char/ipmi/bt-bmc.c 	wait_queue_head_t	queue;
queue             205 drivers/char/ipmi/bt-bmc.c 	if (wait_event_interruptible(bt_bmc->queue,
queue             279 drivers/char/ipmi/bt-bmc.c 	if (wait_event_interruptible(bt_bmc->queue,
queue             343 drivers/char/ipmi/bt-bmc.c 	poll_wait(file, &bt_bmc->queue, wait);
queue             371 drivers/char/ipmi/bt-bmc.c 	wake_up(&bt_bmc->queue);
queue             392 drivers/char/ipmi/bt-bmc.c 	wake_up(&bt_bmc->queue);
queue             468 drivers/char/ipmi/bt-bmc.c 	init_waitqueue_head(&bt_bmc->queue);
queue             123 drivers/char/ipmi/kcs_bmc.c 			wake_up_interruptible(&kcs_bmc->queue);
queue             263 drivers/char/ipmi/kcs_bmc.c 	poll_wait(filp, &kcs_bmc->queue, wait);
queue             282 drivers/char/ipmi/kcs_bmc.c 		wait_event_interruptible(kcs_bmc->queue,
queue             438 drivers/char/ipmi/kcs_bmc.c 	init_waitqueue_head(&kcs_bmc->queue);
queue              83 drivers/char/ipmi/kcs_bmc.h 	wait_queue_head_t queue;
queue             240 drivers/char/tpm/st33zp24/st33zp24.c 			wait_queue_head_t *queue, bool check_cancel)
queue             270 drivers/char/tpm/st33zp24/st33zp24.c 			ret = wait_event_interruptible_timeout(*queue,
queue             171 drivers/char/tpm/tpm_i2c_nuvoton.c 				     u32 timeout, wait_queue_head_t *queue)
queue             173 drivers/char/tpm/tpm_i2c_nuvoton.c 	if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) {
queue             179 drivers/char/tpm/tpm_i2c_nuvoton.c 		rc = wait_event_interruptible_timeout(*queue,
queue             220 drivers/char/tpm/tpm_i2c_nuvoton.c 					   wait_queue_head_t *queue)
queue             225 drivers/char/tpm/tpm_i2c_nuvoton.c 					 timeout, queue);
queue              48 drivers/char/tpm/tpm_tis_core.c 		unsigned long timeout, wait_queue_head_t *queue,
queue              68 drivers/char/tpm/tpm_tis_core.c 		rc = wait_event_interruptible_timeout(*queue,
queue              56 drivers/char/tpm/xen-tpmfront.c 		unsigned long timeout, wait_queue_head_t *queue,
queue              76 drivers/char/tpm/xen-tpmfront.c 		rc = wait_event_interruptible_timeout(*queue,
queue             200 drivers/crypto/atmel-aes.c 	struct crypto_queue	queue;
queue             944 drivers/crypto/atmel-aes.c 		ret = crypto_enqueue_request(&dd->queue, new_areq);
queue             949 drivers/crypto/atmel-aes.c 	backlog = crypto_get_backlog(&dd->queue);
queue             950 drivers/crypto/atmel-aes.c 	areq = crypto_dequeue_request(&dd->queue);
queue            2656 drivers/crypto/atmel-aes.c 	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
queue             142 drivers/crypto/atmel-sha.c 	struct crypto_queue	queue;
queue            1062 drivers/crypto/atmel-sha.c 		ret = ahash_enqueue_request(&dd->queue, req);
queue            1069 drivers/crypto/atmel-sha.c 	backlog = crypto_get_backlog(&dd->queue);
queue            1070 drivers/crypto/atmel-sha.c 	async_req = crypto_dequeue_request(&dd->queue);
queue            2763 drivers/crypto/atmel-sha.c 	crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
queue             104 drivers/crypto/atmel-tdes.c 	struct crypto_queue	queue;
queue             596 drivers/crypto/atmel-tdes.c 		ret = ablkcipher_enqueue_request(&dd->queue, req);
queue             601 drivers/crypto/atmel-tdes.c 	backlog = crypto_get_backlog(&dd->queue);
queue             602 drivers/crypto/atmel-tdes.c 	async_req = crypto_dequeue_request(&dd->queue);
queue            1256 drivers/crypto/atmel-tdes.c 	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
queue             261 drivers/crypto/axis/artpec6_crypto.c 	struct list_head queue; /* waiting for pdma fifo space */
queue             468 drivers/crypto/axis/artpec6_crypto.c 		list_add_tail(&req->list, &ac->queue);
queue            2051 drivers/crypto/axis/artpec6_crypto.c 	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
queue            2052 drivers/crypto/axis/artpec6_crypto.c 		req = list_first_entry(&ac->queue,
queue            2886 drivers/crypto/axis/artpec6_crypto.c 	INIT_LIST_HEAD(&ac->queue);
queue             180 drivers/crypto/caam/dpseci.c 			u8 queue, const struct dpseci_rx_queue_cfg *cfg)
queue             191 drivers/crypto/caam/dpseci.c 	cmd_params->queue = queue;
queue             214 drivers/crypto/caam/dpseci.c 			u8 queue, struct dpseci_rx_queue_attr *attr)
queue             224 drivers/crypto/caam/dpseci.c 	cmd_params->queue = queue;
queue             254 drivers/crypto/caam/dpseci.c 			u8 queue, struct dpseci_tx_queue_attr *attr)
queue             265 drivers/crypto/caam/dpseci.c 	cmd_params->queue = queue;
queue             154 drivers/crypto/caam/dpseci.h 			u8 queue, const struct dpseci_rx_queue_cfg *cfg);
queue             173 drivers/crypto/caam/dpseci.h 			u8 queue, struct dpseci_rx_queue_attr *attr);
queue             186 drivers/crypto/caam/dpseci.h 			u8 queue, struct dpseci_tx_queue_attr *attr);
queue              85 drivers/crypto/caam/dpseci_cmd.h 	u8 queue;
queue              64 drivers/crypto/cavium/cpt/cptvf.h 	struct command_queue queue[CPT_NUM_QS_PER_VF];
queue              87 drivers/crypto/cavium/cpt/cptvf.h 	struct pending_queue queue[CPT_NUM_QS_PER_VF];
queue              91 drivers/crypto/cavium/cpt/cptvf.h 	for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
queue              92 drivers/crypto/cavium/cpt/cptvf.h 	     q = &qinfo->queue[i])
queue              84 drivers/crypto/cavium/cpt/cptvf_main.c 	struct pending_queue *queue;
queue              86 drivers/crypto/cavium/cpt/cptvf_main.c 	for_each_pending_queue(pqinfo, queue, i) {
queue              87 drivers/crypto/cavium/cpt/cptvf_main.c 		if (!queue->head)
queue              91 drivers/crypto/cavium/cpt/cptvf_main.c 		kzfree((queue->head));
queue              93 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->front = 0;
queue              94 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->rear = 0;
queue             109 drivers/crypto/cavium/cpt/cptvf_main.c 	struct pending_queue *queue = NULL;
queue             116 drivers/crypto/cavium/cpt/cptvf_main.c 	for_each_pending_queue(pqinfo, queue, i) {
queue             117 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->head = kzalloc((size), GFP_KERNEL);
queue             118 drivers/crypto/cavium/cpt/cptvf_main.c 		if (!queue->head) {
queue             123 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->front = 0;
queue             124 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->rear = 0;
queue             125 drivers/crypto/cavium/cpt/cptvf_main.c 		atomic64_set((&queue->pending_count), (0));
queue             128 drivers/crypto/cavium/cpt/cptvf_main.c 		spin_lock_init(&queue->lock);
queue             173 drivers/crypto/cavium/cpt/cptvf_main.c 	struct command_queue *queue = NULL;
queue             180 drivers/crypto/cavium/cpt/cptvf_main.c 		queue = &cqinfo->queue[i];
queue             181 drivers/crypto/cavium/cpt/cptvf_main.c 		if (hlist_empty(&cqinfo->queue[i].chead))
queue             184 drivers/crypto/cavium/cpt/cptvf_main.c 		hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
queue             195 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->nchunks = 0;
queue             196 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->idx = 0;
queue             209 drivers/crypto/cavium/cpt/cptvf_main.c 	struct command_queue *queue = NULL;
queue             227 drivers/crypto/cavium/cpt/cptvf_main.c 		queue = &cqinfo->queue[i];
queue             228 drivers/crypto/cavium/cpt/cptvf_main.c 		INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
queue             242 drivers/crypto/cavium/cpt/cptvf_main.c 					i, queue->nchunks);
queue             248 drivers/crypto/cavium/cpt/cptvf_main.c 			if (queue->nchunks == 0) {
queue             250 drivers/crypto/cavium/cpt/cptvf_main.c 					       &cqinfo->queue[i].chead);
queue             257 drivers/crypto/cavium/cpt/cptvf_main.c 			queue->nchunks++;
queue             269 drivers/crypto/cavium/cpt/cptvf_main.c 		queue->qhead = curr;
queue             270 drivers/crypto/cavium/cpt/cptvf_main.c 		spin_lock_init(&queue->lock);
queue             653 drivers/crypto/cavium/cpt/cptvf_main.c 	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
queue              36 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	struct pending_queue *queue = &pqinfo->queue[qno];
queue              38 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	queue->front++;
queue              39 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	if (unlikely(queue->front == pqinfo->qlen))
queue              40 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		queue->front = 0;
queue             228 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	struct command_queue *queue;
queue             240 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	queue = &qinfo->queue[qno];
queue             242 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	spin_lock(&queue->lock);
queue             243 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
queue             246 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	if (++queue->idx >= queue->qhead->size / 64) {
queue             249 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		hlist_for_each(node, &queue->chead) {
queue             252 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 			if (chunk == queue->qhead) {
queue             255 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 				queue->qhead = chunk;
queue             259 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		queue->idx = 0;
queue             265 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	spin_unlock(&queue->lock);
queue             333 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	struct pending_queue *pqueue = &pqinfo->queue[qno];
queue             412 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	int ret = 0, clear = 0, queue = 0;
queue             481 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	queue = 0;
queue             482 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	pqueue = &cptvf->pqinfo.queue[queue];
queue             486 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 		process_pending_queue(cptvf, &cptvf->pqinfo, queue);
queue             495 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 			process_pending_queue(cptvf, &cptvf->pqinfo, queue);
queue             501 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 			queue, pqueue->rear, pqueue->front);
queue             530 drivers/crypto/cavium/cpt/cptvf_reqmanager.c 	ret = send_cpt_command(cptvf, &cptinst, queue);
queue             120 drivers/crypto/cavium/zip/zip_deflate.c 	u32 queue;
queue             127 drivers/crypto/cavium/zip/zip_deflate.c 	queue = zip_load_instr(zip_cmd, zip_dev);
queue             146 drivers/crypto/cavium/zip/zip_deflate.c 		zip_update_cmd_bufs(zip_dev, queue);
queue              57 drivers/crypto/cavium/zip/zip_device.c static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
queue              59 drivers/crypto/cavium/zip/zip_device.c 	return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
queue              81 drivers/crypto/cavium/zip/zip_device.c 	u32 queue = 0;
queue              91 drivers/crypto/cavium/zip/zip_device.c 		queue = 0;
queue              93 drivers/crypto/cavium/zip/zip_device.c 		queue = 1;
queue              95 drivers/crypto/cavium/zip/zip_device.c 	zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
queue              98 drivers/crypto/cavium/zip/zip_device.c 	spin_lock(&zip_dev->iq[queue].lock);
queue             109 drivers/crypto/cavium/zip/zip_device.c 	zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
queue             110 drivers/crypto/cavium/zip/zip_device.c 	zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
queue             112 drivers/crypto/cavium/zip/zip_device.c 	consumed = zip_cmd_queue_consumed(zip_dev, queue);
queue             117 drivers/crypto/cavium/zip/zip_device.c 		memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
queue             119 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
queue             122 drivers/crypto/cavium/zip/zip_device.c 		ncb_ptr = zip_dev->iq[queue].sw_head;
queue             125 drivers/crypto/cavium/zip/zip_device.c 			ncb_ptr, zip_dev->iq[queue].sw_head - 16);
queue             128 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
queue             130 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].free_flag = 1;
queue             134 drivers/crypto/cavium/zip/zip_device.c 		ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
queue             137 drivers/crypto/cavium/zip/zip_device.c 			*ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
queue             139 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].pend_cnt++;
queue             144 drivers/crypto/cavium/zip/zip_device.c 		memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
queue             146 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
queue             148 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].pend_cnt++;
queue             151 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
queue             152 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].hw_tail);
queue             155 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].pend_cnt);
queue             161 drivers/crypto/cavium/zip/zip_device.c 		      (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
queue             164 drivers/crypto/cavium/zip/zip_device.c 	spin_unlock(&zip_dev->iq[queue].lock);
queue             166 drivers/crypto/cavium/zip/zip_device.c 	return queue;
queue             175 drivers/crypto/cavium/zip/zip_device.c void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
queue             178 drivers/crypto/cavium/zip/zip_device.c 	spin_lock(&zip_dev->iq[queue].lock);
queue             181 drivers/crypto/cavium/zip/zip_device.c 	if (zip_dev->iq[queue].free_flag == 1) {
queue             184 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].free_flag = 0;
queue             187 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
queue             190 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
queue             193 drivers/crypto/cavium/zip/zip_device.c 	zip_dev->iq[queue].done_cnt++;
queue             194 drivers/crypto/cavium/zip/zip_device.c 	zip_dev->iq[queue].pend_cnt--;
queue             197 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
queue             198 drivers/crypto/cavium/zip/zip_device.c 		zip_dev->iq[queue].hw_tail);
queue             199 drivers/crypto/cavium/zip/zip_device.c 	zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
queue             201 drivers/crypto/cavium/zip/zip_device.c 	spin_unlock(&zip_dev->iq[queue].lock);
queue             133 drivers/crypto/cavium/zip/zip_inflate.c 	u32 queue;
queue             141 drivers/crypto/cavium/zip/zip_inflate.c 	queue = zip_load_instr(zip_cmd, zip_dev);
queue             169 drivers/crypto/cavium/zip/zip_inflate.c 		zip_update_cmd_bufs(zip_dev, queue);
queue             173 drivers/crypto/cavium/zip/zip_inflate.c 	zip_update_cmd_bufs(zip_dev, queue);
queue             117 drivers/crypto/cavium/zip/zip_main.h void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
queue            1833 drivers/crypto/chelsio/chtls/chtls_cm.c 			   struct chtls_dev *cdev, int status, int queue)
queue            1845 drivers/crypto/chelsio/chtls/chtls_cm.c 		req->status = (queue << 1);
queue            1853 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
queue            1881 drivers/crypto/chelsio/chtls/chtls_cm.c 				 int status, int queue)
queue            1893 drivers/crypto/chelsio/chtls/chtls_cm.c 		req->status = (queue << 1) | status;
queue            1899 drivers/crypto/chelsio/chtls/chtls_cm.c 	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
queue            1920 drivers/crypto/chelsio/chtls/chtls_cm.c 	int queue;
queue            1924 drivers/crypto/chelsio/chtls/chtls_cm.c 	queue = csk->txq_idx;
queue            1929 drivers/crypto/chelsio/chtls/chtls_cm.c 		       CPL_ABORT_NO_RST, queue);
queue            1957 drivers/crypto/chelsio/chtls/chtls_cm.c 		int queue = csk->txq_idx;
queue            1960 drivers/crypto/chelsio/chtls/chtls_cm.c 		send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
queue            1975 drivers/crypto/chelsio/chtls/chtls_cm.c 	int queue = csk->txq_idx;
queue            2011 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
queue             427 drivers/crypto/hifn_795x.c 	struct crypto_queue	queue;
queue            1821 drivers/crypto/hifn_795x.c 				 dev->success, dev->queue.qlen, dev->queue.max_qlen,
queue            1928 drivers/crypto/hifn_795x.c 	while ((async_req = crypto_dequeue_request(&dev->queue))) {
queue            1990 drivers/crypto/hifn_795x.c 		err = ablkcipher_enqueue_request(&dev->queue, req);
queue            2046 drivers/crypto/hifn_795x.c 		backlog = crypto_get_backlog(&dev->queue);
queue            2047 drivers/crypto/hifn_795x.c 		async_req = crypto_dequeue_request(&dev->queue);
queue            2077 drivers/crypto/hifn_795x.c 	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
queue            2457 drivers/crypto/hifn_795x.c 	if (dev->started < HIFN_QUEUE_LENGTH &&	dev->queue.qlen)
queue            2534 drivers/crypto/hifn_795x.c 	crypto_init_queue(&dev->queue, 1);
queue             344 drivers/crypto/hisilicon/qm.c static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
queue             351 drivers/crypto/hisilicon/qm.c 		queue, cmd, (unsigned long long)dma_addr);
queue             356 drivers/crypto/hisilicon/qm.c 	mailbox.queue_num = queue;
queue             231 drivers/crypto/hisilicon/sec/sec_algs.c 	struct device *dev = ctx->queue->dev_info->dev;
queue             383 drivers/crypto/hisilicon/sec/sec_algs.c static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
queue             400 drivers/crypto/hisilicon/sec/sec_algs.c 		if (!queue->havesoftqueue ||
queue             401 drivers/crypto/hisilicon/sec/sec_algs.c 		    (kfifo_is_empty(&queue->softqueue) &&
queue             402 drivers/crypto/hisilicon/sec/sec_algs.c 		     sec_queue_empty(queue))) {
queue             403 drivers/crypto/hisilicon/sec/sec_algs.c 			ret = sec_queue_send(queue, &el->req, sec_req);
queue             411 drivers/crypto/hisilicon/sec/sec_algs.c 			kfifo_put(&queue->softqueue, el);
queue             431 drivers/crypto/hisilicon/sec/sec_algs.c 	struct device *dev = ctx->queue->dev_info->dev;
queue             451 drivers/crypto/hisilicon/sec/sec_algs.c 	mutex_lock(&ctx->queue->queuelock);
queue             483 drivers/crypto/hisilicon/sec/sec_algs.c 	if (ctx->queue->havesoftqueue &&
queue             484 drivers/crypto/hisilicon/sec/sec_algs.c 	    !kfifo_is_empty(&ctx->queue->softqueue) &&
queue             485 drivers/crypto/hisilicon/sec/sec_algs.c 	    sec_queue_empty(ctx->queue)) {
queue             486 drivers/crypto/hisilicon/sec/sec_algs.c 		ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
queue             493 drivers/crypto/hisilicon/sec/sec_algs.c 			sec_queue_send(ctx->queue, &nextrequest->req,
queue             500 drivers/crypto/hisilicon/sec/sec_algs.c 		if (sec_queue_can_enqueue(ctx->queue,
queue             502 drivers/crypto/hisilicon/sec/sec_algs.c 		    (ctx->queue->havesoftqueue &&
queue             503 drivers/crypto/hisilicon/sec/sec_algs.c 		     kfifo_avail(&ctx->queue->softqueue) >
queue             505 drivers/crypto/hisilicon/sec/sec_algs.c 			sec_send_request(backlog_req, ctx->queue);
queue             511 drivers/crypto/hisilicon/sec/sec_algs.c 	mutex_unlock(&ctx->queue->queuelock);
queue             516 drivers/crypto/hisilicon/sec/sec_algs.c 	sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
queue             712 drivers/crypto/hisilicon/sec/sec_algs.c 	struct sec_queue *queue = ctx->queue;
queue             714 drivers/crypto/hisilicon/sec/sec_algs.c 	struct sec_dev_info *info = queue->dev_info;
queue             799 drivers/crypto/hisilicon/sec/sec_algs.c 	mutex_lock(&queue->queuelock);
queue             808 drivers/crypto/hisilicon/sec/sec_algs.c 	if ((!sec_queue_can_enqueue(queue, steps) &&
queue             809 drivers/crypto/hisilicon/sec/sec_algs.c 	     (!queue->havesoftqueue ||
queue             810 drivers/crypto/hisilicon/sec/sec_algs.c 	      kfifo_avail(&queue->softqueue) > steps)) ||
queue             815 drivers/crypto/hisilicon/sec/sec_algs.c 			mutex_unlock(&queue->queuelock);
queue             819 drivers/crypto/hisilicon/sec/sec_algs.c 		mutex_unlock(&queue->queuelock);
queue             822 drivers/crypto/hisilicon/sec/sec_algs.c 	ret = sec_send_request(sec_req, queue);
queue             823 drivers/crypto/hisilicon/sec/sec_algs.c 	mutex_unlock(&queue->queuelock);
queue             878 drivers/crypto/hisilicon/sec/sec_algs.c 	ctx->queue = sec_queue_alloc_start_safe();
queue             879 drivers/crypto/hisilicon/sec/sec_algs.c 	if (IS_ERR(ctx->queue))
queue             880 drivers/crypto/hisilicon/sec/sec_algs.c 		return PTR_ERR(ctx->queue);
queue             882 drivers/crypto/hisilicon/sec/sec_algs.c 	mutex_init(&ctx->queue->queuelock);
queue             883 drivers/crypto/hisilicon/sec/sec_algs.c 	ctx->queue->havesoftqueue = false;
queue             891 drivers/crypto/hisilicon/sec/sec_algs.c 	struct device *dev = ctx->queue->dev_info->dev;
queue             898 drivers/crypto/hisilicon/sec/sec_algs.c 	sec_queue_stop_release(ctx->queue);
queue             910 drivers/crypto/hisilicon/sec/sec_algs.c 	INIT_KFIFO(ctx->queue->softqueue);
queue             911 drivers/crypto/hisilicon/sec/sec_algs.c 	ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
queue             916 drivers/crypto/hisilicon/sec/sec_algs.c 	ctx->queue->havesoftqueue = true;
queue             925 drivers/crypto/hisilicon/sec/sec_algs.c 	kfifo_free(&ctx->queue->softqueue);
queue             227 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_map_io(struct sec_queue *queue)
queue             229 drivers/crypto/hisilicon/sec/sec_drv.c 	struct device *dev = queue->dev_info->dev;
queue             234 drivers/crypto/hisilicon/sec/sec_drv.c 				    2 + queue->queue_id);
queue             237 drivers/crypto/hisilicon/sec/sec_drv.c 			queue->queue_id);
queue             240 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->regs = ioremap(res->start, resource_size(res));
queue             241 drivers/crypto/hisilicon/sec/sec_drv.c 	if (!queue->regs)
queue             247 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_unmap_io(struct sec_queue *queue)
queue             249 drivers/crypto/hisilicon/sec/sec_drv.c 	 iounmap(queue->regs);
queue             252 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
queue             254 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *addr = queue->regs +  SEC_Q_ARUSER_CFG_REG;
queue             267 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
queue             269 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
queue             519 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
queue             521 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
queue             536 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
queue             538 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
queue             553 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
queue             555 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *base = queue->regs;
queue             566 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_depth(struct sec_queue *queue, u32 depth)
queue             568 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
queue             578 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
queue             580 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
queue             581 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
queue             584 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
queue             587 drivers/crypto/hisilicon/sec/sec_drv.c 		       queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
queue             589 drivers/crypto/hisilicon/sec/sec_drv.c 		       queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
queue             592 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
queue             595 drivers/crypto/hisilicon/sec/sec_drv.c 		       queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
queue             597 drivers/crypto/hisilicon/sec/sec_drv.c 		       queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
queue             600 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_irq_disable(struct sec_queue *queue)
queue             602 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
queue             605 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_irq_enable(struct sec_queue *queue)
queue             607 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
queue             610 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_abn_irq_disable(struct sec_queue *queue)
queue             612 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
queue             615 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_stop(struct sec_queue *queue)
queue             617 drivers/crypto/hisilicon/sec/sec_drv.c 	disable_irq(queue->task_irq);
queue             618 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_irq_disable(queue);
queue             619 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
queue             622 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_start(struct sec_queue *queue)
queue             624 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_irq_enable(queue);
queue             625 drivers/crypto/hisilicon/sec/sec_drv.c 	enable_irq(queue->task_irq);
queue             626 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->expected = 0;
queue             627 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
queue             628 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
queue             651 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_free(struct sec_queue *queue)
queue             653 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_dev_info *info = queue->dev_info;
queue             655 drivers/crypto/hisilicon/sec/sec_drv.c 	if (queue->queue_id >= SEC_Q_NUM) {
queue             656 drivers/crypto/hisilicon/sec/sec_drv.c 		dev_err(info->dev, "No queue %d\n", queue->queue_id);
queue             660 drivers/crypto/hisilicon/sec/sec_drv.c 	if (!queue->in_use) {
queue             661 drivers/crypto/hisilicon/sec/sec_drv.c 		dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
queue             666 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->in_use = false;
queue             681 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue *queue = q;
queue             682 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
queue             683 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
queue             687 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *base = queue->regs;
queue             701 drivers/crypto/hisilicon/sec/sec_drv.c 		set_bit(q_id, queue->unprocessed);
queue             702 drivers/crypto/hisilicon/sec/sec_drv.c 		if (q_id == queue->expected)
queue             703 drivers/crypto/hisilicon/sec/sec_drv.c 			while (test_bit(queue->expected, queue->unprocessed)) {
queue             704 drivers/crypto/hisilicon/sec/sec_drv.c 				clear_bit(queue->expected, queue->unprocessed);
queue             705 drivers/crypto/hisilicon/sec/sec_drv.c 				msg = msg_ring->vaddr + queue->expected;
queue             708 drivers/crypto/hisilicon/sec/sec_drv.c 						queue->shadow[queue->expected]);
queue             709 drivers/crypto/hisilicon/sec/sec_drv.c 				queue->shadow[queue->expected] = NULL;
queue             710 drivers/crypto/hisilicon/sec/sec_drv.c 				queue->expected = (queue->expected + 1) %
queue             723 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_irq_enable(queue);
queue             728 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_irq_init(struct sec_queue *queue)
queue             730 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_dev_info *info = queue->dev_info;
queue             731 drivers/crypto/hisilicon/sec/sec_drv.c 	int irq = queue->task_irq;
queue             735 drivers/crypto/hisilicon/sec/sec_drv.c 				   IRQF_TRIGGER_RISING, queue->name, queue);
queue             745 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_irq_uninit(struct sec_queue *queue)
queue             747 drivers/crypto/hisilicon/sec/sec_drv.c 	free_irq(queue->task_irq, queue);
queue             774 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue *queue;
queue             776 drivers/crypto/hisilicon/sec/sec_drv.c 	queue = sec_alloc_queue(info);
queue             777 drivers/crypto/hisilicon/sec/sec_drv.c 	if (IS_ERR(queue)) {
queue             779 drivers/crypto/hisilicon/sec/sec_drv.c 			PTR_ERR(queue));
queue             780 drivers/crypto/hisilicon/sec/sec_drv.c 		return queue;
queue             783 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_start(queue);
queue             785 drivers/crypto/hisilicon/sec/sec_drv.c 	return queue;
queue             799 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue *queue = ERR_PTR(-ENODEV);
queue             806 drivers/crypto/hisilicon/sec/sec_drv.c 	queue = sec_queue_alloc_start(info);
queue             811 drivers/crypto/hisilicon/sec/sec_drv.c 	return queue;
queue             821 drivers/crypto/hisilicon/sec/sec_drv.c int sec_queue_stop_release(struct sec_queue *queue)
queue             823 drivers/crypto/hisilicon/sec/sec_drv.c 	struct device *dev = queue->dev_info->dev;
queue             826 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_stop(queue);
queue             828 drivers/crypto/hisilicon/sec/sec_drv.c 	ret = sec_queue_free(queue);
queue             842 drivers/crypto/hisilicon/sec/sec_drv.c bool sec_queue_empty(struct sec_queue *queue)
queue             844 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
queue             857 drivers/crypto/hisilicon/sec/sec_drv.c int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
queue             859 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
queue             860 drivers/crypto/hisilicon/sec/sec_drv.c 	void __iomem *base = queue->regs;
queue             871 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->shadow[write] = ctx;
queue             884 drivers/crypto/hisilicon/sec/sec_drv.c bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
queue             886 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
queue             891 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_hw_init(struct sec_queue *queue)
queue             893 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
queue             894 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
queue             895 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_ar_pkgattr(queue, 1);
queue             896 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_aw_pkgattr(queue, 1);
queue             899 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_reorder(queue, true);
queue             902 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
queue             904 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
queue             906 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
queue             908 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
queue             910 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_errbase_addr(queue, queue->ring_db.paddr);
queue             912 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
queue             914 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_abn_irq_disable(queue);
queue             915 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_irq_disable(queue);
queue             916 drivers/crypto/hisilicon/sec/sec_drv.c 	writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
queue            1001 drivers/crypto/hisilicon/sec/sec_drv.c 				struct sec_queue *queue, int queue_id)
queue            1003 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->dev_info = info;
queue            1004 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->queue_id = queue_id;
queue            1005 drivers/crypto/hisilicon/sec/sec_drv.c 	snprintf(queue->name, sizeof(queue->name),
queue            1006 drivers/crypto/hisilicon/sec/sec_drv.c 		 "%s_%d", dev_name(info->dev), queue->queue_id);
queue            1077 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_res_cfg(struct sec_queue *queue)
queue            1079 drivers/crypto/hisilicon/sec/sec_drv.c 	struct device *dev = queue->dev_info->dev;
queue            1080 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
queue            1081 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
queue            1082 drivers/crypto/hisilicon/sec/sec_drv.c 	struct sec_queue_ring_db *ring_db = &queue->ring_db;
queue            1107 drivers/crypto/hisilicon/sec/sec_drv.c 	queue->task_irq = platform_get_irq(to_platform_device(dev),
queue            1108 drivers/crypto/hisilicon/sec/sec_drv.c 					   queue->queue_id * 2 + 1);
queue            1109 drivers/crypto/hisilicon/sec/sec_drv.c 	if (queue->task_irq <= 0) {
queue            1117 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
queue            1118 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_db.paddr);
queue            1120 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
queue            1121 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_cq.paddr);
queue            1123 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
queue            1124 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_cmd.paddr);
queue            1129 drivers/crypto/hisilicon/sec/sec_drv.c static void sec_queue_free_ring_pages(struct sec_queue *queue)
queue            1131 drivers/crypto/hisilicon/sec/sec_drv.c 	struct device *dev = queue->dev_info->dev;
queue            1133 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
queue            1134 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_db.paddr);
queue            1135 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
queue            1136 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_cq.paddr);
queue            1137 drivers/crypto/hisilicon/sec/sec_drv.c 	dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
queue            1138 drivers/crypto/hisilicon/sec/sec_drv.c 			  queue->ring_cmd.paddr);
queue            1141 drivers/crypto/hisilicon/sec/sec_drv.c static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
queue            1146 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_base_init(info, queue, queue_id);
queue            1148 drivers/crypto/hisilicon/sec/sec_drv.c 	ret = sec_queue_res_cfg(queue);
queue            1152 drivers/crypto/hisilicon/sec/sec_drv.c 	ret = sec_queue_map_io(queue);
queue            1155 drivers/crypto/hisilicon/sec/sec_drv.c 		sec_queue_free_ring_pages(queue);
queue            1159 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_hw_init(queue);
queue            1165 drivers/crypto/hisilicon/sec/sec_drv.c 			       struct sec_queue *queue)
queue            1167 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_unmap_io(queue);
queue            1168 drivers/crypto/hisilicon/sec/sec_drv.c 	sec_queue_free_ring_pages(queue);
queue             247 drivers/crypto/hisilicon/sec/sec_drv.h 	struct sec_queue *queue;
queue             417 drivers/crypto/hisilicon/sec/sec_drv.h int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
queue             418 drivers/crypto/hisilicon/sec/sec_drv.h bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
queue             419 drivers/crypto/hisilicon/sec/sec_drv.h int sec_queue_stop_release(struct sec_queue *queue);
queue             421 drivers/crypto/hisilicon/sec/sec_drv.h bool sec_queue_empty(struct sec_queue *queue);
queue             131 drivers/crypto/img-hash.c 	struct crypto_queue	queue;
queue             505 drivers/crypto/img-hash.c 		res = ahash_enqueue_request(&hdev->queue, req);
queue             512 drivers/crypto/img-hash.c 	backlog = crypto_get_backlog(&hdev->queue);
queue             513 drivers/crypto/img-hash.c 	async_req = crypto_dequeue_request(&hdev->queue);
queue             958 drivers/crypto/img-hash.c 	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
queue             785 drivers/crypto/inside-secure/safexcel.c 		backlog = crypto_get_backlog(&priv->ring[ring].queue);
queue             786 drivers/crypto/inside-secure/safexcel.c 		req = crypto_dequeue_request(&priv->ring[ring].queue);
queue            1507 drivers/crypto/inside-secure/safexcel.c 		crypto_init_queue(&priv->ring[i].queue,
queue             629 drivers/crypto/inside-secure/safexcel.h 	struct crypto_queue queue;
queue             817 drivers/crypto/inside-secure/safexcel_cipher.c 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
queue             965 drivers/crypto/inside-secure/safexcel_cipher.c 	crypto_enqueue_request(&priv->ring[ring].queue, base);
queue            1041 drivers/crypto/inside-secure/safexcel_cipher.c 	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
queue             446 drivers/crypto/inside-secure/safexcel_hash.c 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
queue             537 drivers/crypto/inside-secure/safexcel_hash.c 	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
queue             629 drivers/crypto/inside-secure/safexcel_hash.c 	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
queue              43 drivers/crypto/marvell/cesa.c 	*backlog = crypto_get_backlog(&engine->queue);
queue              44 drivers/crypto/marvell/cesa.c 	req = crypto_dequeue_request(&engine->queue);
queue             176 drivers/crypto/marvell/cesa.c 	ret = crypto_enqueue_request(&engine->queue, req);
queue             551 drivers/crypto/marvell/cesa.c 		crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
queue             456 drivers/crypto/marvell/cesa.h 	struct crypto_queue queue;
queue             520 drivers/crypto/mediatek/mtk-aes.c 		ret = crypto_enqueue_request(&aes->queue, new_areq);
queue             525 drivers/crypto/mediatek/mtk-aes.c 	backlog = crypto_get_backlog(&aes->queue);
queue             526 drivers/crypto/mediatek/mtk-aes.c 	areq = crypto_dequeue_request(&aes->queue);
queue            1219 drivers/crypto/mediatek/mtk-aes.c 		crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
queue             147 drivers/crypto/mediatek/mtk-platform.h 	struct crypto_queue queue;
queue             184 drivers/crypto/mediatek/mtk-platform.h 	struct crypto_queue queue;
queue             661 drivers/crypto/mediatek/mtk-sha.c 		ret = ahash_enqueue_request(&sha->queue, req);
queue             668 drivers/crypto/mediatek/mtk-sha.c 	backlog = crypto_get_backlog(&sha->queue);
queue             669 drivers/crypto/mediatek/mtk-sha.c 	async_req = crypto_dequeue_request(&sha->queue);
queue            1222 drivers/crypto/mediatek/mtk-sha.c 		crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
queue              80 drivers/crypto/mxs-dcp.c 	struct crypto_queue		queue[DCP_MAX_CHANS];
queue             403 drivers/crypto/mxs-dcp.c 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
queue             404 drivers/crypto/mxs-dcp.c 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
queue             464 drivers/crypto/mxs-dcp.c 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
queue             701 drivers/crypto/mxs-dcp.c 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
queue             702 drivers/crypto/mxs-dcp.c 		arq = crypto_dequeue_request(&sdcp->queue[chan]);
queue             778 drivers/crypto/mxs-dcp.c 	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
queue            1081 drivers/crypto/mxs-dcp.c 		crypto_init_queue(&sdcp->queue[i], 50);
queue              70 drivers/crypto/n2_core.c 	struct spu_queue	*queue;
queue            1648 drivers/crypto/n2_core.c 	struct spu_queue *p = qr->queue;
queue            1663 drivers/crypto/n2_core.c 	struct spu_qreg qr = { .queue = p, .type = q_type };
queue            1071 drivers/crypto/omap-aes.c 	return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
queue            1098 drivers/crypto/omap-aes.c 		dd->engine->queue.max_qlen = value;
queue             230 drivers/crypto/omap-sham.c 	struct crypto_queue	queue;
queue            1125 drivers/crypto/omap-sham.c 		ret = ahash_enqueue_request(&dd->queue, req);
queue            1130 drivers/crypto/omap-sham.c 	backlog = crypto_get_backlog(&dd->queue);
queue            1131 drivers/crypto/omap-sham.c 	async_req = crypto_dequeue_request(&dd->queue);
queue            2038 drivers/crypto/omap-sham.c 	return sprintf(buf, "%d\n", dd->queue.max_qlen);
queue            2063 drivers/crypto/omap-sham.c 	dd->queue.max_qlen = value;
queue            2103 drivers/crypto/omap-sham.c 	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
queue              82 drivers/crypto/qce/core.c 		ret = crypto_enqueue_request(&qce->queue, req);
queue              90 drivers/crypto/qce/core.c 	backlog = crypto_get_backlog(&qce->queue);
queue              91 drivers/crypto/qce/core.c 	async_req = crypto_dequeue_request(&qce->queue);
queue             222 drivers/crypto/qce/core.c 	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
queue              30 drivers/crypto/qce/core.h 	struct crypto_queue queue;
queue             191 drivers/crypto/rockchip/rk3288_crypto.c 	ret = crypto_enqueue_request(&dev->queue, async_req);
queue             212 drivers/crypto/rockchip/rk3288_crypto.c 	backlog   = crypto_get_backlog(&dev->queue);
queue             213 drivers/crypto/rockchip/rk3288_crypto.c 	async_req = crypto_dequeue_request(&dev->queue);
queue             395 drivers/crypto/rockchip/rk3288_crypto.c 	crypto_init_queue(&crypto_info->queue, 50);
queue             193 drivers/crypto/rockchip/rk3288_crypto.h 	struct crypto_queue		queue;
queue             315 drivers/crypto/s5p-sss.c 	struct crypto_queue		queue;
queue            2011 drivers/crypto/s5p-sss.c 	backlog   = crypto_get_backlog(&dev->queue);
queue            2012 drivers/crypto/s5p-sss.c 	async_req = crypto_dequeue_request(&dev->queue);
queue            2038 drivers/crypto/s5p-sss.c 	err = ablkcipher_enqueue_request(&dev->queue, req);
queue            2297 drivers/crypto/s5p-sss.c 	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
queue             202 drivers/crypto/sahara.c 	struct crypto_queue	queue;
queue             651 drivers/crypto/sahara.c 	err = ablkcipher_enqueue_request(&dev->queue, req);
queue            1059 drivers/crypto/sahara.c 		backlog = crypto_get_backlog(&dev->queue);
queue            1060 drivers/crypto/sahara.c 		async_req = crypto_dequeue_request(&dev->queue);
queue            1108 drivers/crypto/sahara.c 	ret = crypto_enqueue_request(&dev->queue, &req->base);
queue            1474 drivers/crypto/sahara.c 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
queue              88 drivers/dma/at_hdmac.c 	return list_first_entry(&atchan->queue,
queue             264 drivers/dma/at_hdmac.c 	list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
queue             499 drivers/dma/at_hdmac.c 	if (!list_empty(&atchan->queue))
queue             504 drivers/dma/at_hdmac.c 	list_splice_init(&atchan->queue, &atchan->active_list);
queue             555 drivers/dma/at_hdmac.c 	list_splice_init(&atchan->queue, atchan->active_list.prev);
queue             685 drivers/dma/at_hdmac.c 		list_add_tail(&desc->desc_node, &atchan->queue);
queue            1446 drivers/dma/at_hdmac.c 	list_splice_init(&atchan->queue, &list);
queue            1621 drivers/dma/at_hdmac.c 	BUG_ON(!list_empty(&atchan->queue));
queue            1902 drivers/dma/at_hdmac.c 		INIT_LIST_HEAD(&atchan->queue);
queue             265 drivers/dma/at_hdmac_regs.h 	struct list_head	queue;
queue            1303 drivers/dma/coh901318.c 	struct list_head queue;
queue            1573 drivers/dma/coh901318.c 	list_add_tail(&desc->node, &cohc->queue);
queue            1579 drivers/dma/coh901318.c 	return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
queue            1669 drivers/dma/coh901318.c 	list_for_each(pos, &cohc->queue) {
queue            2616 drivers/dma/coh901318.c 			INIT_LIST_HEAD(&cohc->queue);
queue              72 drivers/dma/dw/core.c 	list_add_tail(&desc->desc_node, &dwc->queue);
queue             228 drivers/dma/dw/core.c 	if (list_empty(&dwc->queue))
queue             231 drivers/dma/dw/core.c 	list_move(dwc->queue.next, &dwc->active_list);
queue             445 drivers/dma/dw/core.c 	list_move(dwc->queue.next, dwc->active_list.prev);
queue             868 drivers/dma/dw/core.c 	list_splice_init(&dwc->queue, &list);
queue            1027 drivers/dma/dw/core.c 	BUG_ON(!list_empty(&dwc->queue));
queue            1159 drivers/dma/dw/core.c 		INIT_LIST_HEAD(&dwc->queue);
queue             279 drivers/dma/dw/regs.h 	struct list_head	queue;
queue             178 drivers/dma/ep93xx_dma.c 	struct list_head		queue;
queue             730 drivers/dma/ep93xx_dma.c 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
queue             736 drivers/dma/ep93xx_dma.c 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
queue             850 drivers/dma/ep93xx_dma.c 		list_add_tail(&desc->node, &edmac->queue);
queue             957 drivers/dma/ep93xx_dma.c 	BUG_ON(!list_empty(&edmac->queue));
queue            1216 drivers/dma/ep93xx_dma.c 	list_splice_init(&edmac->queue, &list);
queue            1352 drivers/dma/ep93xx_dma.c 		INIT_LIST_HEAD(&edmac->queue);
queue             159 drivers/dma/fsl-qdma.c 	u8 queue;
queue             169 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue		*queue;
queue             208 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue	*queue;
queue             295 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
queue             387 drivers/dma/fsl-qdma.c static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
queue             392 drivers/dma/fsl-qdma.c 	for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
queue             397 drivers/dma/fsl-qdma.c 			dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
queue             403 drivers/dma/fsl-qdma.c 			dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
queue             408 drivers/dma/fsl-qdma.c 		list_add_tail(&comp_temp->list, &queue->comp_free);
queue             414 drivers/dma/fsl-qdma.c 	dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
queue             422 drivers/dma/fsl-qdma.c 				 &queue->comp_free, list) {
queue             424 drivers/dma/fsl-qdma.c 			dma_pool_free(queue->comp_pool,
queue             428 drivers/dma/fsl-qdma.c 			dma_pool_free(queue->desc_pool,
queue             448 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *queue = fsl_chan->queue;
queue             451 drivers/dma/fsl-qdma.c 		spin_lock_irqsave(&queue->queue_lock, flags);
queue             452 drivers/dma/fsl-qdma.c 		if (!list_empty(&queue->comp_free)) {
queue             453 drivers/dma/fsl-qdma.c 			comp_temp = list_first_entry(&queue->comp_free,
queue             458 drivers/dma/fsl-qdma.c 			spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             462 drivers/dma/fsl-qdma.c 		spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             624 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
queue             638 drivers/dma/fsl-qdma.c 		   __this_cpu_read(pre.queue) &&
queue             645 drivers/dma/fsl-qdma.c 		__this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
queue             824 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
queue             942 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
queue             974 drivers/dma/fsl-qdma.c 	fsl_queue = fsl_comp->qchan->queue;
queue             985 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
queue            1020 drivers/dma/fsl-qdma.c 	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
queue            1149 drivers/dma/fsl-qdma.c 	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
queue            1150 drivers/dma/fsl-qdma.c 	if (!fsl_qdma->queue)
queue            1168 drivers/dma/fsl-qdma.c 		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
queue             871 drivers/dma/ipu/ipu_idmac.c 	list_add_tail(&desc->list, &ichan->queue);
queue             922 drivers/dma/ipu/ipu_idmac.c 	INIT_LIST_HEAD(&ichan->queue);
queue            1132 drivers/dma/ipu/ipu_idmac.c 	if ((*desc)->list.next == &ichan->queue)
queue            1206 drivers/dma/ipu/ipu_idmac.c 	if (unlikely(list_empty(&ichan->queue))) {
queue            1229 drivers/dma/ipu/ipu_idmac.c 	desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
queue            1411 drivers/dma/ipu/ipu_idmac.c 	list_for_each_safe(list, tmp, &ichan->queue)
queue            1441 drivers/dma/ipu/ipu_idmac.c 	list_splice_init(&ichan->queue, &ichan->free_list);
queue             102 drivers/dma/mediatek/mtk-cqdma.c 	struct list_head queue;
queue             278 drivers/dma/mediatek/mtk-cqdma.c 		if (list_empty(&pc->queue))
queue             284 drivers/dma/mediatek/mtk-cqdma.c 		list_add_tail(&cvd->node, &pc->queue);
queue             303 drivers/dma/mediatek/mtk-cqdma.c 	list_for_each_entry(cvd, &cvc->pc->queue, node)
queue             320 drivers/dma/mediatek/mtk-cqdma.c 	cvd = list_first_entry_or_null(&pc->queue,
queue             351 drivers/dma/mediatek/mtk-cqdma.c 	cvd = list_first_entry_or_null(&pc->queue,
queue             427 drivers/dma/mediatek/mtk-cqdma.c 	list_for_each_entry(vd, &cvc->pc->queue, node)
queue             819 drivers/dma/mediatek/mtk-cqdma.c 		INIT_LIST_HEAD(&cqdma->pc[i]->queue);
queue             100 drivers/dma/pch_dma.c 	struct list_head	queue;
queue             173 drivers/dma/pch_dma.c 	return list_first_entry(&pd_chan->queue,
queue             368 drivers/dma/pch_dma.c 	if (!list_empty(&pd_chan->queue))
queue             372 drivers/dma/pch_dma.c 	list_splice_init(&pd_chan->queue, &pd_chan->active_list);
queue             385 drivers/dma/pch_dma.c 	list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
queue             419 drivers/dma/pch_dma.c 		list_add_tail(&desc->desc_node, &pd_chan->queue);
queue             536 drivers/dma/pch_dma.c 	BUG_ON(!list_empty(&pd_chan->queue));
queue             663 drivers/dma/pch_dma.c 	list_splice_init(&pd_chan->queue, &list);
queue             898 drivers/dma/pch_dma.c 		INIT_LIST_HEAD(&pd_chan->queue);
queue             469 drivers/dma/ste_dma40.c 	struct list_head		 queue;
queue             970 drivers/dma/ste_dma40.c 	return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
queue            2572 drivers/dma/ste_dma40.c 	list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
queue            2802 drivers/dma/ste_dma40.c 		INIT_LIST_HEAD(&d40c->queue);
queue              79 drivers/dma/timb_dma.c 	struct list_head	queue;
queue             281 drivers/dma/timb_dma.c 	BUG_ON(list_empty(&td_chan->queue));
queue             284 drivers/dma/timb_dma.c 	td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
queue             314 drivers/dma/timb_dma.c 		list_add_tail(&td_desc->desc_node, &td_chan->queue);
queue             443 drivers/dma/timb_dma.c 	BUG_ON(!list_empty(&td_chan->queue));
queue             483 drivers/dma/timb_dma.c 	if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
queue             555 drivers/dma/timb_dma.c 	list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
queue             585 drivers/dma/timb_dma.c 			if (!list_empty(&td_chan->queue))
queue             699 drivers/dma/timb_dma.c 		INIT_LIST_HEAD(&td_chan->queue);
queue             177 drivers/dma/txx9dmac.c 	return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
queue             445 drivers/dma/txx9dmac.c 	} while (!list_empty(&dc->queue));
queue             458 drivers/dma/txx9dmac.c 	if (!list_empty(&dc->queue)) {
queue             521 drivers/dma/txx9dmac.c 	if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
queue             598 drivers/dma/txx9dmac.c 	if (!list_empty(&dc->queue)) {
queue             702 drivers/dma/txx9dmac.c 	list_add_tail(&desc->desc_node, &dc->queue);
queue             909 drivers/dma/txx9dmac.c 	list_splice_init(&dc->queue, &list);
queue             968 drivers/dma/txx9dmac.c 	if (!list_empty(&dc->queue)) {
queue            1055 drivers/dma/txx9dmac.c 	BUG_ON(!list_empty(&dc->queue));
queue            1137 drivers/dma/txx9dmac.c 	INIT_LIST_HEAD(&dc->queue);
queue             173 drivers/dma/txx9dmac.h 	struct list_head	queue;
queue             173 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 							  adev->gfx.kiq.ring.queue),
queue             179 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 			uint32_t queue, uint32_t vmid)
queue             184 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	nv_grbm_select(adev, mec, pipe, queue, vmid);
queue             210 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 			uint32_t queue, uint32_t vmid)
queue             213 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
queue             166 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 			uint32_t queue, uint32_t vmid)
queue             169 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c 	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
queue              90 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 			uint32_t queue, uint32_t vmid)
queue              95 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	soc15_grbm_select(adev, mec, pipe, queue, vmid);
queue             109 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
queue             112 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
queue             137 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
queue             156 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
queue              38 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				int pipe, int queue)
queue              45 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	bit += queue;
queue              51 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				 int *mec, int *pipe, int *queue)
queue              53 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
queue              62 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				     int mec, int pipe, int queue)
queue              64 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
queue              69 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			       int me, int pipe, int queue)
queue              76 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	bit += queue;
queue              82 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				int *me, int *pipe, int *queue)
queue              84 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	*queue = bit % adev->gfx.me.num_queue_per_pipe;
queue              92 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				    int me, int pipe, int queue)
queue              94 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
queue             196 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	int i, queue, pipe, mec;
queue             201 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		queue = i % adev->gfx.mec.num_queue_per_pipe;
queue             213 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			if (mec == 0 && queue < 2)
queue             234 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	int i, queue, pipe, me;
queue             237 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		queue = i % adev->gfx.me.num_queue_per_pipe;
queue             247 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		if (me == 0 && queue < 1)
queue             260 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	int mec, pipe, queue;
queue             270 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
queue             277 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		if ((mec == 1 && pipe > 1) || queue != 0)
queue             282 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring->queue = queue;
queue             314 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue             513 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 							kiq_ring->queue);
queue             199 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				 u32 queue, u32 vmid);
queue             376 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				int pipe, int queue);
queue             378 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				 int *mec, int *pipe, int *queue);
queue             380 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				     int pipe, int queue);
queue             382 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 			       int pipe, int queue);
queue             384 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				int *me, int *pipe, int *queue);
queue             386 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				    int pipe, int queue);
queue             198 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	u32			queue;
queue             869 drivers/gpu/drm/amd/amdgpu/cik.c 		     u32 me, u32 pipe, u32 queue, u32 vmid)
queue             875 drivers/gpu/drm/amd/amdgpu/cik.c 		((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
queue              30 drivers/gpu/drm/amd/amdgpu/cik.h 		     u32 me, u32 pipe, u32 queue, u32 vmid);
queue             283 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
queue            1240 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				   int me, int pipe, int queue)
queue            1250 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->queue = queue;
queue            1259 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            1270 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				       int mec, int pipe, int queue)
queue            1281 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->queue = queue;
queue            1288 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            3012 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
queue            3162 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3180 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3515 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3522 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3544 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3659 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			 i, ring->me, ring->pipe, ring->queue);
queue            3671 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			 i, ring->me, ring->pipe, ring->queue);
queue            5017 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
queue            5087 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			    ring->queue == queue_id)
queue            3136 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->queue = i;
queue            3137 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            3089 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            4399 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 					int mec, int pipe, int queue)
queue            4408 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->queue = queue;
queue            4413 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            1915 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 					int mec, int pipe, int queue)
queue            1926 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->queue = queue;
queue            1933 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            4369 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
queue            4422 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
queue            4654 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            4663 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            4687 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            5067 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            5162 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            6352 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            6751 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
queue            6779 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			    ring->queue == queue_id)
queue            2157 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				      int mec, int pipe, int queue)
queue            2168 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->queue = queue;
queue            2175 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
queue            3336 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
queue            3389 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
queue            3714 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3723 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3747 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            3971 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				adev->gfx.kiq.ring.queue, 0);
queue            5231 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
queue            5686 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
queue            5714 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			    ring->queue == queue_id)
queue             132 drivers/gpu/drm/amd/amdgpu/nv.c 		     u32 me, u32 pipe, u32 queue, u32 vmid)
queue             138 drivers/gpu/drm/amd/amdgpu/nv.c 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
queue              30 drivers/gpu/drm/amd/amdgpu/nv.h 		    u32 me, u32 pipe, u32 queue, u32 vmid);
queue              30 drivers/gpu/drm/amd/amdgpu/si.h 		     u32 me, u32 pipe, u32 queue, u32 vmid);
queue             287 drivers/gpu/drm/amd/amdgpu/soc15.c 		     u32 me, u32 pipe, u32 queue, u32 vmid)
queue             293 drivers/gpu/drm/amd/amdgpu/soc15.c 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
queue              71 drivers/gpu/drm/amd/amdgpu/soc15.h 		    u32 me, u32 pipe, u32 queue, u32 vmid);
queue             360 drivers/gpu/drm/amd/amdgpu/vi.c 		     u32 me, u32 pipe, u32 queue, u32 vmid)
queue             366 drivers/gpu/drm/amd/amdgpu/vi.c 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
queue              30 drivers/gpu/drm/amd/amdgpu/vi.h 		    u32 me, u32 pipe, u32 queue, u32 vmid);
queue             223 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c 			dbgdev->kq->queue->properties.queue_id);
queue              55 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q);
queue              58 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q);
queue              59 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
queue              61 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q);
queue             135 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
queue             180 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             196 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			struct queue *q)
queue             249 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             269 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q,
queue             309 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			q->pipe, q->queue);
queue             340 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 					q->queue, &q->properties, current->mm);
queue             382 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
queue             400 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			q->queue = bit;
queue             409 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
queue             417 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             419 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	dqm->allocated_queues[q->pipe] |= (1 << q->queue);
queue             427 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             455 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				q->pipe, q->queue);
queue             485 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             496 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int update_queue(struct device_queue_manager *dqm, struct queue *q)
queue             529 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
queue             560 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 						   q->pipe, q->queue,
queue             572 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q;
queue             598 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
queue             615 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q;
queue             652 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q;
queue             706 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				       q->queue, &q->properties, mm);
queue             725 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q;
queue             858 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	int pipe, queue;
queue             876 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
queue             877 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			if (test_bit(pipe_offset + queue,
queue             879 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				dqm->allocated_queues[pipe] |= 1 << queue;
queue             915 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue             954 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue            1132 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
queue            1341 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				struct queue *q)
queue            1498 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q, *next;
queue            1537 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			  struct queue *q,
queue            1572 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	struct queue *q, *next;
queue            1886 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 	int pipe, queue;
queue            1905 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
queue            1906 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 			if (!test_bit(pipe_offset + queue,
queue            1911 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
queue            1916 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				  pipe, queue);
queue            1924 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		for (queue = 0;
queue            1925 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		     queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue            1926 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 		     queue++) {
queue            1928 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
queue            1933 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 				  pipe, queue);
queue              86 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 				struct queue *q,
queue              91 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 				struct queue *q);
queue              94 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 				struct queue *q);
queue             135 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 				  struct queue *q,
queue             151 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 				struct queue *q,
queue              39 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
queue              42 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c 				struct queue *q,
queue             180 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
queue             197 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c 				struct queue *q,
queue              31 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
queue              83 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
queue              32 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
queue              80 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
queue              45 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
queue              48 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c 			struct queue *q,
queue             228 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
queue             245 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c 			struct queue *q,
queue             129 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	if (init_queue(&kq->queue, &prop) != 0)
queue             132 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->queue->device = dev;
queue             133 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->queue->process = kfd_get_process(current);
queue             135 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->queue->mqd_mem_obj = kq->mqd_mgr->allocate_mqd(kq->mqd_mgr->dev,
queue             136 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					&kq->queue->properties);
queue             137 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	if (!kq->queue->mqd_mem_obj)
queue             139 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
queue             140 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					kq->queue->mqd_mem_obj,
queue             141 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					&kq->queue->gart_mqd_addr,
queue             142 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					&kq->queue->properties);
queue             146 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 		kq->queue->pipe = KFD_CIK_HIQ_PIPE;
queue             147 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 		kq->queue->queue = KFD_CIK_HIQ_QUEUE;
queue             148 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 		kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
queue             149 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 				kq->queue->pipe, kq->queue->queue,
queue             150 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 				&kq->queue->properties, NULL);
queue             164 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	print_queue(kq->queue);
queue             168 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->mqd_mgr->free_mqd(kq->mqd_mgr, kq->queue->mqd, kq->queue->mqd_mem_obj);
queue             170 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	uninit_queue(kq->queue);
queue             188 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
queue             190 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					kq->queue->mqd,
queue             193 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					kq->queue->pipe,
queue             194 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					kq->queue->queue);
queue             195 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
queue             198 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	kq->mqd_mgr->free_mqd(kq->mqd_mgr, kq->queue->mqd,
queue             199 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 				kq->queue->mqd_mem_obj);
queue             206 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 					kq->queue->properties.doorbell_ptr);
queue             207 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	uninit_queue(kq->queue);
queue             228 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 	queue_size_dwords = kq->queue->properties.queue_size / 4;
queue             292 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c 			(kq->queue->properties.queue_size / 4);
queue              74 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h 	struct queue		*queue;
queue              51 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c 	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
queue              67 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c 	write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
queue             152 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c 		struct queue *q, bool is_static)
queue              66 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c 	write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
queue             176 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c 		struct queue *q, bool is_static)
queue              66 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c 	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
queue             183 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c 		struct queue *q, bool is_static)
queue             129 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c 	struct queue *q;
queue             165 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c 			if (!kq->queue->properties.is_active)
queue             169 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c 				kq->queue->queue, qpd->is_debug);
queue             173 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c 						kq->queue,
queue             188 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c 				q->queue, qpd->is_debug);
queue             251 drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h 			uint32_t queue:6;
queue             498 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 	uint32_t queue;
queue             867 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int init_queue(struct queue **q, const struct queue_properties *properties);
queue             868 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void uninit_queue(struct queue *q);
queue             870 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void print_queue(struct queue *q);
queue             893 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 	struct queue *q;
queue             952 drivers/gpu/drm/amd/amdkfd/kfd_priv.h 			struct queue *q, bool is_static);
queue              38 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
queue             166 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 				struct kfd_dev *dev, struct queue **q,
queue             199 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	struct queue *q;
queue             289 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 		kq->queue->properties.queue_id = *qid;
queue             504 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 	struct queue *q;
queue             532 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c 			q = pqn->kq->queue;
queue              45 drivers/gpu/drm/amd/amdkfd/kfd_queue.c void print_queue(struct queue *q)
queue              66 drivers/gpu/drm/amd/amdkfd/kfd_queue.c int init_queue(struct queue **q, const struct queue_properties *properties)
queue              68 drivers/gpu/drm/amd/amdkfd/kfd_queue.c 	struct queue *tmp_q;
queue              80 drivers/gpu/drm/amd/amdkfd/kfd_queue.c void uninit_queue(struct queue *q)
queue            1462 drivers/gpu/drm/drm_atomic_helper.c 		ret = wait_event_timeout(dev->vblank[i].queue,
queue             199 drivers/gpu/drm/drm_irq.c 			wake_up(&vblank->queue);
queue             463 drivers/gpu/drm/drm_vblank.c 		init_waitqueue_head(&vblank->queue);
queue             501 drivers/gpu/drm/drm_vblank.c 	return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
queue            1097 drivers/gpu/drm/drm_vblank.c 	ret = wait_event_timeout(vblank->queue,
queue            1157 drivers/gpu/drm/drm_vblank.c 	wake_up(&vblank->queue);
queue            1676 drivers/gpu/drm/drm_vblank.c 		wait = wait_event_interruptible_timeout(vblank->queue,
queue            1778 drivers/gpu/drm/drm_vblank.c 	wake_up(&vblank->queue);
queue             489 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	execlists->queue = RB_ROOT_CACHED;
queue            1073 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
queue             255 drivers/gpu/drm/i915/gt/intel_engine_types.h 	struct rb_root_cached queue;
queue             308 drivers/gpu/drm/i915/gt/intel_lrc.c 	rb = rb_first_cached(&execlists->queue);
queue             507 drivers/gpu/drm/i915/gt/intel_lrc.c 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
queue            1268 drivers/gpu/drm/i915/gt/intel_lrc.c 	while ((rb = rb_first_cached(&execlists->queue))) {
queue            1334 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_erase_cached(&p->node, &execlists->queue);
queue            1697 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
queue            2599 drivers/gpu/drm/i915/gt/intel_lrc.c 	while ((rb = rb_first_cached(&execlists->queue))) {
queue            2608 drivers/gpu/drm/i915/gt/intel_lrc.c 		rb_erase_cached(&p->node, &execlists->queue);
queue            2637 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists->queue = RB_ROOT_CACHED;
queue            2655 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
queue            3951 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
queue            1185 drivers/gpu/drm/i915/gt/intel_reset.c 		wait_event(gt->reset.queue,
queue            1209 drivers/gpu/drm/i915/gt/intel_reset.c 	wake_up_all(&gt->reset.queue);
queue            1224 drivers/gpu/drm/i915/gt/intel_reset.c 		if (wait_event_interruptible(gt->reset.queue,
queue            1258 drivers/gpu/drm/i915/gt/intel_reset.c 	if (wait_event_interruptible(gt->reset.queue,
queue            1268 drivers/gpu/drm/i915/gt/intel_reset.c 	init_waitqueue_head(&gt->reset.queue);
queue              45 drivers/gpu/drm/i915/gt/intel_reset_types.h 	wait_queue_head_t queue;
queue             562 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	while ((rb = rb_first_cached(&execlists->queue))) {
queue             583 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		rb_erase_cached(&p->node, &execlists->queue);
queue             716 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	while ((rb = rb_first_cached(&execlists->queue))) {
queue             727 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		rb_erase_cached(&p->node, &execlists->queue);
queue             734 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	execlists->queue = RB_ROOT_CACHED;
queue            3564 drivers/gpu/drm/i915/i915_debugfs.c 	wait_event(i915->gt.reset.queue,
queue              51 drivers/gpu/drm/i915/i915_scheduler.c 	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
queue              52 drivers/gpu/drm/i915/i915_scheduler.c 		   rb_first(&execlists->queue.rb_root));
queue              55 drivers/gpu/drm/i915/i915_scheduler.c 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
queue              92 drivers/gpu/drm/i915/i915_scheduler.c 	parent = &execlists->queue.rb_root.rb_node;
queue             131 drivers/gpu/drm/i915/i915_scheduler.c 	rb_insert_color_cached(&p->node, &execlists->queue, first);
queue              22 drivers/gpu/drm/i915/selftests/igt_reset.c 		wait_event(gt->reset.queue,
queue              42 drivers/gpu/drm/i915/selftests/igt_reset.c 	wake_up_all(&gt->reset.queue);
queue              21 drivers/gpu/drm/msm/adreno/a6xx_hfi.c static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
queue              24 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	struct a6xx_hfi_queue_header *header = queue->header;
queue              32 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	hdr = queue->data[index];
queue              46 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		data[i] = queue->data[index];
queue              55 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
queue              57 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	struct a6xx_hfi_queue_header *header = queue->header;
queue              60 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	spin_lock(&queue->lock);
queue              66 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		spin_unlock(&queue->lock);
queue              71 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		queue->data[index] = data[i];
queue              76 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	spin_unlock(&queue->lock);
queue              85 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
queue             108 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
queue             154 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
queue             158 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
queue             164 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
queue             314 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		struct a6xx_hfi_queue *queue = &gmu->queues[i];
queue             316 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		if (!queue->header)
queue             319 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		if (queue->header->read_index != queue->header->write_index)
queue             322 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		queue->header->read_index = 0;
queue             323 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		queue->header->write_index = 0;
queue             327 drivers/gpu/drm/msm/adreno/a6xx_hfi.c static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
queue             331 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	spin_lock_init(&queue->lock);
queue             332 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	queue->header = header;
queue             333 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	queue->data = virt;
queue             334 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	atomic_set(&queue->seqnum, 0);
queue             535 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
queue            1135 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
queue             875 drivers/gpu/drm/msm/msm_drv.c 	struct msm_gpu_submitqueue *queue;
queue             887 drivers/gpu/drm/msm/msm_drv.c 	queue = msm_submitqueue_get(file->driver_priv, args->queueid);
queue             888 drivers/gpu/drm/msm/msm_drv.c 	if (!queue)
queue             891 drivers/gpu/drm/msm/msm_drv.c 	ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
queue             894 drivers/gpu/drm/msm/msm_drv.c 	msm_submitqueue_put(queue);
queue             139 drivers/gpu/drm/msm/msm_gem.h 	struct msm_gpu_submitqueue *queue;
queue              29 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
queue              48 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->queue = queue;
queue              49 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->ring = gpu->rb[queue->prio];
queue              67 drivers/gpu/drm/msm/msm_gem_submit.c 	msm_submitqueue_put(submit->queue);
queue             407 drivers/gpu/drm/msm/msm_gem_submit.c 	struct msm_gpu_submitqueue *queue;
queue             431 drivers/gpu/drm/msm/msm_gem_submit.c 	queue = msm_submitqueue_get(ctx, args->queueid);
queue             432 drivers/gpu/drm/msm/msm_gem_submit.c 	if (!queue)
queue             438 drivers/gpu/drm/msm/msm_gem_submit.c 	ring = gpu->rb[queue->prio];
queue             475 drivers/gpu/drm/msm/msm_gem_submit.c 	submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
queue             439 drivers/gpu/drm/msm/msm_gpu.c 		submit->queue->faults++;
queue             288 drivers/gpu/drm/msm/msm_gpu.h static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
queue             290 drivers/gpu/drm/msm/msm_gpu.h 	if (queue)
queue             291 drivers/gpu/drm/msm/msm_gpu.h 		kref_put(&queue->ref, msm_submitqueue_destroy);
queue              12 drivers/gpu/drm/msm/msm_submitqueue.c 	struct msm_gpu_submitqueue *queue = container_of(kref,
queue              15 drivers/gpu/drm/msm/msm_submitqueue.c 	kfree(queue);
queue              60 drivers/gpu/drm/msm/msm_submitqueue.c 	struct msm_gpu_submitqueue *queue;
queue              65 drivers/gpu/drm/msm/msm_submitqueue.c 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue              67 drivers/gpu/drm/msm/msm_submitqueue.c 	if (!queue)
queue              70 drivers/gpu/drm/msm/msm_submitqueue.c 	kref_init(&queue->ref);
queue              71 drivers/gpu/drm/msm/msm_submitqueue.c 	queue->flags = flags;
queue              77 drivers/gpu/drm/msm/msm_submitqueue.c 		queue->prio = prio;
queue              82 drivers/gpu/drm/msm/msm_submitqueue.c 	queue->id = ctx->queueid++;
queue              85 drivers/gpu/drm/msm/msm_submitqueue.c 		*id = queue->id;
queue              87 drivers/gpu/drm/msm/msm_submitqueue.c 	list_add_tail(&queue->node, &ctx->submitqueues);
queue             116 drivers/gpu/drm/msm/msm_submitqueue.c static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
queue             119 drivers/gpu/drm/msm/msm_submitqueue.c 	size_t size = min_t(size_t, args->len, sizeof(queue->faults));
queue             124 drivers/gpu/drm/msm/msm_submitqueue.c 		args->len = sizeof(queue->faults);
queue             131 drivers/gpu/drm/msm/msm_submitqueue.c 	ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
queue             139 drivers/gpu/drm/msm/msm_submitqueue.c 	struct msm_gpu_submitqueue *queue;
queue             145 drivers/gpu/drm/msm/msm_submitqueue.c 	queue = msm_submitqueue_get(ctx, args->id);
queue             146 drivers/gpu/drm/msm/msm_submitqueue.c 	if (!queue)
queue             150 drivers/gpu/drm/msm/msm_submitqueue.c 		ret = msm_submitqueue_query_faults(queue, args);
queue             152 drivers/gpu/drm/msm/msm_submitqueue.c 	msm_submitqueue_put(queue);
queue              11 drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h 	struct nvkm_msgqueue *queue;
queue              11 drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h 	struct nvkm_msgqueue *queue;
queue              32 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c 	nvkm_msgqueue_del(&sec2->queue);
queue              64 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c 	if (!sec2->queue) {
queue              70 drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c 	nvkm_msgqueue_recv(sec2->queue);
queue              36 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
queue              40 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	mutex_lock(&queue->mutex);
queue              42 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
queue              48 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue              54 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
queue              56 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	mutex_unlock(&queue->mutex);
queue              60 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
queue              65 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	head = nvkm_falcon_rd32(falcon, queue->head_reg);
queue              66 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
queue              72 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue              79 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	head = nvkm_falcon_rd32(falcon, queue->head_reg);
queue              81 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (head < queue->position)
queue              82 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		queue->position = queue->offset;
queue              84 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	tail = queue->position;
queue              99 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
queue             105 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue             111 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	err = msg_queue_open(priv, queue);
queue             113 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		nvkm_error(subdev, "fail to open queue %d\n", queue->index);
queue             117 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (msg_queue_empty(priv, queue)) {
queue             122 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
queue             139 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
queue             149 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	msg_queue_close(priv, queue, (err >= 0));
queue             155 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue             163 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	head = nvkm_falcon_rd32(falcon, queue->head_reg);
queue             164 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
queue             167 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		free = queue->offset + queue->size - head;
queue             172 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 			head = queue->offset;
queue             183 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue             186 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
queue             187 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->position += ALIGN(size, QUEUE_ALIGNMENT);
queue             196 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
queue             204 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	err = cmd_queue_push(priv, queue, &cmd, cmd.size);
queue             206 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
queue             208 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		nvkm_error(subdev, "queue %d rewinded\n", queue->index);
queue             210 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->position = queue->offset;
queue             214 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue             221 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	mutex_lock(&queue->mutex);
queue             223 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
queue             225 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		mutex_unlock(&queue->mutex);
queue             229 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
queue             232 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		cmd_queue_rewind(priv, queue);
queue             238 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
queue             244 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
queue             246 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	mutex_unlock(&queue->mutex);
queue             251 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	  struct nvkm_msgqueue_queue *queue)
queue             260 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		ret = cmd_queue_open(priv, queue, cmd->size);
queue             266 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	ret = cmd_queue_push(priv, queue, cmd, cmd->size);
queue             272 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	cmd_queue_close(priv, queue, commit);
queue             325 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	struct nvkm_msgqueue_queue *queue;
queue             332 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue = priv->func->cmd_queue(priv, prio);
queue             333 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (IS_ERR(queue))
queue             334 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		return PTR_ERR(queue);
queue             347 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	ret = cmd_write(priv, cmd, queue);
queue             435 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 			   struct nvkm_msgqueue_queue *queue)
queue             451 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		while (msg_queue_read(priv, queue, hdr) > 0)
queue             457 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
queue             459 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!queue || !queue->func || !queue->func->init_func)
queue             462 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->func->init_func->gen_cmdline(queue, buf);
queue             466 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
queue             471 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!queue || !queue->func->acr_func)
queue             475 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (queue->func->acr_func->boot_multiple_falcons)
queue             476 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		return queue->func->acr_func->boot_multiple_falcons(queue,
queue             480 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!queue->func->acr_func->boot_falcon)
queue             484 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		int ret = queue->func->acr_func->boot_falcon(queue, falcon);
queue             495 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		  const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
queue             502 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		ret = msgqueue_0137c63d_new(falcon, sb, queue);
queue             505 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		ret = msgqueue_0137bca5_new(falcon, sb, queue);
queue             510 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		ret = msgqueue_0148cdec_new(falcon, sb, queue);
queue             520 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		(*queue)->fw_version = version;
queue             527 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
queue             529 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (*queue) {
queue             530 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		(*queue)->func->dtor(*queue);
queue             531 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		*queue = NULL;
queue             536 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
queue             538 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!queue->func || !queue->func->recv) {
queue             539 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		const struct nvkm_subdev *subdev = queue->falcon->owner;
queue             545 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->func->recv(queue);
queue             549 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
queue             552 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	if (!queue)
queue             555 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->init_msg_received = false;
queue             556 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	reinit_completion(&queue->init_done);
queue             564 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		   struct nvkm_msgqueue *queue)
queue             568 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->func = func;
queue             569 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	queue->falcon = falcon;
queue             570 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	mutex_init(&queue->seq_lock);
queue             572 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 		queue->seq[i].id = i;
queue             574 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c 	init_completion(&queue->init_done);
queue             117 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h 	void (*recv)(struct nvkm_msgqueue *queue);
queue              41 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
queue              56 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
queue              59 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
queue              64 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
queue              66 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
queue              74 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
queue              76 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
queue              78 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		&priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
queue              91 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
queue             117 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c static int acr_init_wpr(struct nvkm_msgqueue *queue);
queue             153 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		struct nvkm_msgqueue_queue *queue = &priv->queue[i];
queue             155 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		mutex_init(&queue->mutex);
queue             157 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		queue->index = init->queue_info[i].index;
queue             158 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		queue->offset = init->queue_info[i].offset;
queue             159 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		queue->size = init->queue_info[i].size;
queue             162 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 			queue->head_reg = 0x4a0 + (queue->index * 4);
queue             163 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 			queue->tail_reg = 0x4b0 + (queue->index * 4);
queue             165 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 			queue->head_reg = 0x4c8;
queue             166 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 			queue->tail_reg = 0x4cc;
queue             171 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 			   i, queue->index, queue->offset, queue->size);
queue             196 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c acr_init_wpr_callback(struct nvkm_msgqueue *queue,
queue             203 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	const struct nvkm_subdev *subdev = queue->falcon->owner;
queue             212 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	complete_all(&queue->init_done);
queue             216 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c acr_init_wpr(struct nvkm_msgqueue *queue)
queue             236 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
queue             343 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
queue             352 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
queue             353 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
queue             375 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
queue             377 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	kfree(msgqueue_0137c63d(queue));
queue             391 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		      struct nvkm_msgqueue **queue)
queue             399 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	*queue = &ret->base;
queue             417 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 		      struct nvkm_msgqueue **queue)
queue             425 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c 	*queue = &ret->base.base;
queue              42 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
queue              48 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
queue              51 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
queue              53 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
queue              57 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
queue              59 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
queue              61 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		&priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
queue              75 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
queue             123 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		struct nvkm_msgqueue_queue *queue = &priv->queue[id];
queue             125 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		mutex_init(&queue->mutex);
queue             127 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		queue->index = init->queue_info[i].index;
queue             128 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		queue->offset = init->queue_info[i].offset;
queue             129 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		queue->size = init->queue_info[i].size;
queue             132 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 			queue->head_reg = 0xa30 + (queue->index * 8);
queue             133 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 			queue->tail_reg = 0xa34 + (queue->index * 8);
queue             135 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 			queue->head_reg = 0xa00 + (queue->index * 8);
queue             136 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 			queue->tail_reg = 0xa04 + (queue->index * 8);
queue             141 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 			   id, queue->index, queue->offset, queue->size);
queue             235 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
queue             237 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	kfree(msgqueue_0148cdec(queue));
queue             251 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 		      struct nvkm_msgqueue **queue)
queue             259 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c 	*queue = &ret->base;
queue             147 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	nvkm_msgqueue_del(&pmu->queue);
queue              30 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c 	if (!pmu->queue) {
queue              36 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c 	nvkm_msgqueue_recv(pmu->queue);
queue             999 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	struct nvkm_msgqueue *queue;
queue            1021 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 		queue = sb->subdev.device->pmu->queue;
queue            1024 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 		queue = sb->subdev.device->sec2->queue;
queue            1034 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
queue              79 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
queue              86 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	nvkm_msgqueue_write_cmdline(queue, buf);
queue              89 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	nvkm_msgqueue_reinit(queue);
queue             114 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 				sb, &pmu->queue);
queue             129 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
queue             152 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 				sb, &sec->queue);
queue             169 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
queue              34 drivers/gpu/drm/panfrost/panfrost_job.c 	struct panfrost_queue_state queue[NUM_JOB_SLOTS];
queue              49 drivers/gpu/drm/panfrost/panfrost_job.c 	int queue;
queue              67 drivers/gpu/drm/panfrost/panfrost_job.c 	switch (f->queue) {
queue              94 drivers/gpu/drm/panfrost/panfrost_job.c 	fence->queue = js_num;
queue              95 drivers/gpu/drm/panfrost/panfrost_job.c 	fence->seqno = ++js->queue[js_num].emit_seqno;
queue              97 drivers/gpu/drm/panfrost/panfrost_job.c 		       js->queue[js_num].fence_context, fence->seqno);
queue             404 drivers/gpu/drm/panfrost/panfrost_job.c 		struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
queue             429 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
queue             433 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_start(&pfdev->js->queue[i].sched, true);
queue             475 drivers/gpu/drm/panfrost/panfrost_job.c 			drm_sched_fault(&pfdev->js->queue[j].sched);
queue             525 drivers/gpu/drm/panfrost/panfrost_job.c 		js->queue[j].fence_context = dma_fence_context_alloc(1);
queue             527 drivers/gpu/drm/panfrost/panfrost_job.c 		ret = drm_sched_init(&js->queue[j].sched,
queue             543 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_fini(&js->queue[j].sched);
queue             556 drivers/gpu/drm/panfrost/panfrost_job.c 		drm_sched_fini(&js->queue[j].sched);
queue             568 drivers/gpu/drm/panfrost/panfrost_job.c 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
queue             591 drivers/gpu/drm/panfrost/panfrost_job.c 		if (atomic_read(&js->queue[i].sched.hw_rq_count))
queue            1855 drivers/gpu/drm/radeon/cik.c 			    u32 me, u32 pipe, u32 queue, u32 vmid)
queue            1860 drivers/gpu/drm/radeon/cik.c 			     QUEUEID(queue & 0x7));
queue            4162 drivers/gpu/drm/radeon/cik.c 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
queue            4181 drivers/gpu/drm/radeon/cik.c 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
queue            4203 drivers/gpu/drm/radeon/cik.c 	cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
queue            4621 drivers/gpu/drm/radeon/cik.c 				rdev->ring[idx].queue, 0);
queue            8431 drivers/gpu/drm/radeon/cik.c 	ring->queue = 0; /* first queue */
queue            8443 drivers/gpu/drm/radeon/cik.c 	ring->queue = 1; /* second queue */
queue             858 drivers/gpu/drm/radeon/radeon.h 	u32 queue;
queue             150 drivers/gpu/drm/v3d/v3d_drv.c 		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
queue              88 drivers/gpu/drm/v3d/v3d_drv.h 	struct v3d_queue_state queue[V3D_MAX_QUEUES];
queue             162 drivers/gpu/drm/v3d/v3d_drv.h 	enum v3d_queue queue;
queue             308 drivers/gpu/drm/v3d/v3d_drv.h struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
queue               6 drivers/gpu/drm/v3d/v3d_fence.c struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
queue              15 drivers/gpu/drm/v3d/v3d_fence.c 	fence->queue = queue;
queue              16 drivers/gpu/drm/v3d/v3d_fence.c 	fence->seqno = ++v3d->queue[queue].emit_seqno;
queue              18 drivers/gpu/drm/v3d/v3d_fence.c 		       v3d->queue[queue].fence_context, fence->seqno);
queue              32 drivers/gpu/drm/v3d/v3d_fence.c 	switch (f->queue) {
queue             467 drivers/gpu/drm/v3d/v3d_gem.c 	     struct v3d_job *job, enum v3d_queue queue)
queue             471 drivers/gpu/drm/v3d/v3d_gem.c 	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
queue             481 drivers/gpu/drm/v3d/v3d_gem.c 	drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
queue             835 drivers/gpu/drm/v3d/v3d_gem.c 		v3d->queue[i].fence_context = dma_fence_context_alloc(1);
queue             271 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_stop(&v3d->queue[q].sched, sched_job);
queue             280 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_resubmit_jobs(&v3d->queue[q].sched);
queue             284 drivers/gpu/drm/v3d/v3d_sched.c 		drm_sched_start(&v3d->queue[q].sched, true);
queue             400 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
queue             410 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
queue             422 drivers/gpu/drm/v3d/v3d_sched.c 	ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
queue             435 drivers/gpu/drm/v3d/v3d_sched.c 		ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
queue             447 drivers/gpu/drm/v3d/v3d_sched.c 		ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
queue             469 drivers/gpu/drm/v3d/v3d_sched.c 		if (v3d->queue[q].sched.ready)
queue             470 drivers/gpu/drm/v3d/v3d_sched.c 			drm_sched_fini(&v3d->queue[q].sched);
queue             395 drivers/gpu/drm/via/via_dmablit.c via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
queue             410 drivers/gpu/drm/via/via_dmablit.c 	if (queue && active) {
queue             414 drivers/gpu/drm/via/via_dmablit.c 		*queue = blitq->blit_queue + slot;
queue             432 drivers/gpu/drm/via/via_dmablit.c 	wait_queue_head_t *queue;
queue             435 drivers/gpu/drm/via/via_dmablit.c 	if (via_dmablit_active(blitq, engine, handle, &queue)) {
queue             436 drivers/gpu/drm/via/via_dmablit.c 		VIA_WAIT_ON(ret, *queue, 3 * HZ,
queue             162 drivers/gpu/drm/via/via_drv.h #define VIA_WAIT_ON( ret, queue, timeout, condition )		\
queue             166 drivers/gpu/drm/via/via_drv.h 	add_wait_queue(&(queue), &entry);			\
queue             183 drivers/gpu/drm/via/via_drv.h 	remove_wait_queue(&(queue), &entry);			\
queue              48 drivers/gpu/drm/vmwgfx/ttm_lock.c 	init_waitqueue_head(&lock->queue);
queue              57 drivers/gpu/drm/vmwgfx/ttm_lock.c 		wake_up_all(&lock->queue);
queue              79 drivers/gpu/drm/vmwgfx/ttm_lock.c 		ret = wait_event_interruptible(lock->queue,
queue              82 drivers/gpu/drm/vmwgfx/ttm_lock.c 		wait_event(lock->queue, __ttm_read_lock(lock));
queue             112 drivers/gpu/drm/vmwgfx/ttm_lock.c 			(lock->queue, __ttm_read_trylock(lock, &locked));
queue             114 drivers/gpu/drm/vmwgfx/ttm_lock.c 		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
queue             128 drivers/gpu/drm/vmwgfx/ttm_lock.c 	wake_up_all(&lock->queue);
queue             153 drivers/gpu/drm/vmwgfx/ttm_lock.c 		ret = wait_event_interruptible(lock->queue,
queue             158 drivers/gpu/drm/vmwgfx/ttm_lock.c 			wake_up_all(&lock->queue);
queue             162 drivers/gpu/drm/vmwgfx/ttm_lock.c 		wait_event(lock->queue, __ttm_write_lock(lock));
queue             171 drivers/gpu/drm/vmwgfx/ttm_lock.c 	wake_up_all(&lock->queue);
queue             193 drivers/gpu/drm/vmwgfx/ttm_lock.c 	wait_event(lock->queue, __ttm_suspend_lock(lock));
queue              70 drivers/gpu/drm/vmwgfx/ttm_lock.h 	wait_queue_head_t queue;
queue            1049 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
queue            1050 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
queue            1051 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h extern int vmw_marker_push(struct vmw_marker_queue *queue,
queue            1053 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h extern int vmw_marker_pull(struct vmw_marker_queue *queue,
queue            1056 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 			struct vmw_marker_queue *queue, uint32_t us);
queue              37 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c void vmw_marker_queue_init(struct vmw_marker_queue *queue)
queue              39 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	INIT_LIST_HEAD(&queue->head);
queue              40 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	queue->lag = 0;
queue              41 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	queue->lag_time = ktime_get_raw_ns();
queue              42 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_lock_init(&queue->lock);
queue              45 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
queue              49 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_lock(&queue->lock);
queue              50 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	list_for_each_entry_safe(marker, next, &queue->head, head) {
queue              53 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_unlock(&queue->lock);
queue              56 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c int vmw_marker_push(struct vmw_marker_queue *queue,
queue              66 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_lock(&queue->lock);
queue              67 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	list_add_tail(&marker->head, &queue->head);
queue              68 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_unlock(&queue->lock);
queue              73 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c int vmw_marker_pull(struct vmw_marker_queue *queue,
queue              80 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_lock(&queue->lock);
queue              83 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	if (list_empty(&queue->head)) {
queue              84 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		queue->lag = 0;
queue              85 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		queue->lag_time = now;
queue              90 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	list_for_each_entry_safe(marker, next, &queue->head, head) {
queue              94 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		queue->lag = now - marker->submitted;
queue              95 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		queue->lag_time = now;
queue             102 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_unlock(&queue->lock);
queue             107 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
queue             111 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_lock(&queue->lock);
queue             113 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	queue->lag += now - queue->lag_time;
queue             114 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	queue->lag_time = now;
queue             115 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	spin_unlock(&queue->lock);
queue             116 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	return queue->lag;
queue             120 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c static bool vmw_lag_lt(struct vmw_marker_queue *queue,
queue             125 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	return vmw_fifo_lag(queue) <= cond;
queue             129 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		 struct vmw_marker_queue *queue, uint32_t us)
queue             135 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 	while (!vmw_lag_lt(queue, us)) {
queue             136 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		spin_lock(&queue->lock);
queue             137 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		if (list_empty(&queue->head))
queue             140 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 			marker = list_first_entry(&queue->head,
queue             144 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		spin_unlock(&queue->lock);
queue             152 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c 		(void) vmw_marker_pull(queue, seqno);
queue              37 drivers/gpu/host1x/intr.c 				struct list_head *queue)
queue              42 drivers/gpu/host1x/intr.c 	list_for_each_entry_reverse(pos, queue, list)
queue              48 drivers/gpu/host1x/intr.c 	list_add(&waiter->list, queue);
queue              43 drivers/hid/hid-wiimote-core.c 	struct wiimote_queue *queue = container_of(work, struct wiimote_queue,
queue              45 drivers/hid/hid-wiimote-core.c 	struct wiimote_data *wdata = container_of(queue, struct wiimote_data,
queue              46 drivers/hid/hid-wiimote-core.c 						  queue);
queue              50 drivers/hid/hid-wiimote-core.c 	spin_lock_irqsave(&wdata->queue.lock, flags);
queue              52 drivers/hid/hid-wiimote-core.c 	while (wdata->queue.head != wdata->queue.tail) {
queue              53 drivers/hid/hid-wiimote-core.c 		spin_unlock_irqrestore(&wdata->queue.lock, flags);
queue              55 drivers/hid/hid-wiimote-core.c 				 wdata->queue.outq[wdata->queue.tail].data,
queue              56 drivers/hid/hid-wiimote-core.c 				 wdata->queue.outq[wdata->queue.tail].size);
queue              62 drivers/hid/hid-wiimote-core.c 		spin_lock_irqsave(&wdata->queue.lock, flags);
queue              64 drivers/hid/hid-wiimote-core.c 		wdata->queue.tail = (wdata->queue.tail + 1) % WIIMOTE_BUFSIZE;
queue              67 drivers/hid/hid-wiimote-core.c 	spin_unlock_irqrestore(&wdata->queue.lock, flags);
queue              79 drivers/hid/hid-wiimote-core.c 		spin_lock_irqsave(&wdata->queue.lock, flags);
queue              93 drivers/hid/hid-wiimote-core.c 	spin_lock_irqsave(&wdata->queue.lock, flags);
queue              95 drivers/hid/hid-wiimote-core.c 	memcpy(wdata->queue.outq[wdata->queue.head].data, buffer, count);
queue              96 drivers/hid/hid-wiimote-core.c 	wdata->queue.outq[wdata->queue.head].size = count;
queue              97 drivers/hid/hid-wiimote-core.c 	newhead = (wdata->queue.head + 1) % WIIMOTE_BUFSIZE;
queue              99 drivers/hid/hid-wiimote-core.c 	if (wdata->queue.head == wdata->queue.tail) {
queue             100 drivers/hid/hid-wiimote-core.c 		wdata->queue.head = newhead;
queue             101 drivers/hid/hid-wiimote-core.c 		schedule_work(&wdata->queue.worker);
queue             102 drivers/hid/hid-wiimote-core.c 	} else if (newhead != wdata->queue.tail) {
queue             103 drivers/hid/hid-wiimote-core.c 		wdata->queue.head = newhead;
queue             114 drivers/hid/hid-wiimote-core.c 	spin_unlock_irqrestore(&wdata->queue.lock, flags);
queue            1744 drivers/hid/hid-wiimote-core.c 	spin_lock_init(&wdata->queue.lock);
queue            1745 drivers/hid/hid-wiimote-core.c 	INIT_WORK(&wdata->queue.worker, wiimote_queue_worker);
queue            1779 drivers/hid/hid-wiimote-core.c 	cancel_work_sync(&wdata->queue.worker);
queue             160 drivers/hid/hid-wiimote.h 	struct wiimote_queue queue;
queue             117 drivers/hsi/clients/hsi_char.c 							struct list_head *queue)
queue             122 drivers/hsi/clients/hsi_char.c 	list_add_tail(&msg->link, queue);
queue             127 drivers/hsi/clients/hsi_char.c 							struct list_head *queue)
queue             134 drivers/hsi/clients/hsi_char.c 	if (list_empty(queue))
queue             137 drivers/hsi/clients/hsi_char.c 	msg = list_first_entry(queue, struct hsi_msg, link);
queue             320 drivers/hsi/controllers/omap_ssi_port.c static int ssi_start_transfer(struct list_head *queue)
queue             325 drivers/hsi/controllers/omap_ssi_port.c 	if (list_empty(queue))
queue             327 drivers/hsi/controllers/omap_ssi_port.c 	msg = list_first_entry(queue, struct hsi_msg, link);
queue             381 drivers/hsi/controllers/omap_ssi_port.c 	struct list_head *queue;
queue             394 drivers/hsi/controllers/omap_ssi_port.c 		queue = &omap_port->txqueue[msg->channel];
queue             397 drivers/hsi/controllers/omap_ssi_port.c 		queue = &omap_port->rxqueue[msg->channel];
queue             403 drivers/hsi/controllers/omap_ssi_port.c 	list_add_tail(&msg->link, queue);
queue             404 drivers/hsi/controllers/omap_ssi_port.c 	err = ssi_start_transfer(queue);
queue             434 drivers/hsi/controllers/omap_ssi_port.c static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
queue             439 drivers/hsi/controllers/omap_ssi_port.c 	list_for_each_safe(node, tmp, queue) {
queue             642 drivers/hsi/controllers/omap_ssi_port.c 							struct list_head *queue)
queue             650 drivers/hsi/controllers/omap_ssi_port.c 		err = ssi_start_transfer(queue);
queue             652 drivers/hsi/controllers/omap_ssi_port.c 			msg = list_first_entry(queue, struct hsi_msg, link);
queue             893 drivers/hsi/controllers/omap_ssi_port.c static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
queue             904 drivers/hsi/controllers/omap_ssi_port.c 	msg = list_first_entry(queue, struct hsi_msg, link);
queue             954 drivers/hsi/controllers/omap_ssi_port.c 	ssi_transfer(omap_port, queue);
queue             191 drivers/i2c/busses/i2c-imx.c 	wait_queue_head_t	queue;
queue             455 drivers/i2c/busses/i2c-imx.c 	wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
queue             603 drivers/i2c/busses/i2c-imx.c 		wake_up(&i2c_imx->queue);
queue            1124 drivers/i2c/busses/i2c-imx.c 	init_waitqueue_head(&i2c_imx->queue);
queue              65 drivers/i2c/busses/i2c-mpc.c 	wait_queue_head_t queue;
queue              96 drivers/i2c/busses/i2c-mpc.c 		wake_up(&i2c->queue);
queue             144 drivers/i2c/busses/i2c-mpc.c 		result = wait_event_timeout(i2c->queue,
queue             674 drivers/i2c/busses/i2c-mpc.c 	init_waitqueue_head(&i2c->queue);
queue              29 drivers/i2c/busses/i2c-octeon-core.c 	wake_up(&i2c->queue);
queue              64 drivers/i2c/busses/i2c-octeon-core.c 	time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
queue             158 drivers/i2c/busses/i2c-octeon-core.c 	time_left = wait_event_timeout(i2c->queue,
queue             102 drivers/i2c/busses/i2c-octeon-core.h 	wait_queue_head_t queue;
queue             108 drivers/i2c/busses/i2c-octeon-platdrv.c 	wake_up(&i2c->queue);
queue             192 drivers/i2c/busses/i2c-octeon-platdrv.c 	init_waitqueue_head(&i2c->queue);
queue             183 drivers/i2c/busses/i2c-thunderx-pcidrv.c 	init_waitqueue_head(&i2c->queue);
queue              96 drivers/ide/ide-atapi.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             101 drivers/ide/ide-atapi.c 		error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
queue             110 drivers/ide/ide-atapi.c 	blk_execute_rq(drive->queue, disk, rq, 0);
queue             202 drivers/ide/ide-atapi.c 		sense_rq = blk_mq_alloc_request(drive->queue, REQ_OP_DRV_IN,
queue             212 drivers/ide/ide-atapi.c 	err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
queue             187 drivers/ide/ide-cd.c 			if (queue_logical_block_size(drive->queue) == 2048)
queue             262 drivers/ide/ide-cd.c 		blk_mq_delay_kick_requeue_list(drive->queue, 1);
queue             454 drivers/ide/ide-cd.c 		rq = blk_get_request(drive->queue,
queue             461 drivers/ide/ide-cd.c 			error = blk_rq_map_kern(drive->queue, rq, buffer,
queue             469 drivers/ide/ide-cd.c 		blk_execute_rq(drive->queue, info->disk, rq, 0);
queue             599 drivers/ide/ide-cd.c 		return ide_cdrom_prep_fs(drive->queue, rq);
queue             803 drivers/ide/ide-cd.c 	struct request_queue *q = drive->queue;
queue             855 drivers/ide/ide-cd.c 		struct request_queue *q = drive->queue;
queue            1102 drivers/ide/ide-cd.c 	blk_queue_logical_block_size(drive->queue,
queue            1517 drivers/ide/ide-cd.c 	struct request_queue *q = drive->queue;
queue             299 drivers/ide/ide-cd_ioctl.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             302 drivers/ide/ide-cd_ioctl.c 	blk_execute_rq(drive->queue, cd->disk, rq, 0);
queue             162 drivers/ide/ide-devsets.c 	struct request_queue *q = drive->queue;
queue             480 drivers/ide/ide-disk.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             485 drivers/ide/ide-disk.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
queue             554 drivers/ide/ide-disk.c 	blk_queue_write_cache(drive->queue, wc, false);
queue             655 drivers/ide/ide-disk.c 	struct request_queue *q = drive->queue;
queue             501 drivers/ide/ide-floppy.c 		blk_queue_max_hw_sectors(drive->queue, 64);
queue             509 drivers/ide/ide-floppy.c 		blk_queue_max_hw_sectors(drive->queue, 64);
queue             239 drivers/ide/ide-io.c 	cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
queue             446 drivers/ide/ide-io.c 	struct request_queue *q = drive->queue;
queue             568 drivers/ide/ide-io.c 	ide_drive_t *drive = hctx->queue->queuedata;
queue             129 drivers/ide/ide-ioctls.c 		rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             131 drivers/ide/ide-ioctls.c 		blk_execute_rq(drive->queue, NULL, rq, 0);
queue             226 drivers/ide/ide-ioctls.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             230 drivers/ide/ide-ioctls.c 	blk_execute_rq(drive->queue, NULL, rq, 1);
queue              13 drivers/ide/ide-park.c 	struct request_queue *q = drive->queue;
queue              22 drivers/ide/ide-pm.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue              30 drivers/ide/ide-pm.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
queue              67 drivers/ide/ide-pm.c 	blk_mq_start_stopped_hw_queues(drive->queue, true);
queue              80 drivers/ide/ide-pm.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
queue             202 drivers/ide/ide-pm.c 	struct request_queue *q = drive->queue;
queue             245 drivers/ide/ide-pm.c 		struct request_queue *q = drive->queue;
queue             821 drivers/ide/ide-probe.c 	drive->queue = q;
queue             972 drivers/ide/ide-probe.c 	disk->queue = drive->queue;
queue             986 drivers/ide/ide-probe.c 	blk_cleanup_queue(drive->queue);
queue             987 drivers/ide/ide-probe.c 	drive->queue = NULL;
queue            1166 drivers/ide/ide-probe.c 	blk_mq_quiesce_queue(drive->queue);
queue            1180 drivers/ide/ide-probe.c 	blk_mq_unquiesce_queue(drive->queue);
queue             857 drivers/ide/ide-tape.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
queue             864 drivers/ide/ide-tape.c 		ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
queue             870 drivers/ide/ide-tape.c 	blk_execute_rq(drive->queue, tape->disk, rq, 0);
queue             426 drivers/ide/ide-taskfile.c 	rq = blk_get_request(drive->queue,
queue             438 drivers/ide/ide-taskfile.c 		error = blk_rq_map_kern(drive->queue, rq, buf,
queue             447 drivers/ide/ide-taskfile.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
queue             101 drivers/iio/buffer/industrialio-buffer-dma.c 	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
queue             104 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_put(&block->queue->buffer);
queue             167 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue, size_t size)
queue             175 drivers/iio/buffer/industrialio-buffer-dma.c 	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
queue             184 drivers/iio/buffer/industrialio-buffer-dma.c 	block->queue = queue;
queue             188 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_get(&queue->buffer);
queue             195 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = block->queue;
queue             203 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->outgoing);
queue             216 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = block->queue;
queue             219 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irqsave(&queue->list_lock, flags);
queue             221 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irqrestore(&queue->list_lock, flags);
queue             224 drivers/iio/buffer/industrialio-buffer-dma.c 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
queue             238 drivers/iio/buffer/industrialio-buffer-dma.c void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
queue             244 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irqsave(&queue->list_lock, flags);
queue             251 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irqrestore(&queue->list_lock, flags);
queue             253 drivers/iio/buffer/industrialio-buffer-dma.c 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
queue             283 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
queue             295 drivers/iio/buffer/industrialio-buffer-dma.c 	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
queue             296 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->buffer.length, 2);
queue             298 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             301 drivers/iio/buffer/industrialio-buffer-dma.c 	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
queue             304 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->fileio.block_size = size;
queue             305 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->fileio.active_block = NULL;
queue             307 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irq(&queue->list_lock);
queue             308 drivers/iio/buffer/industrialio-buffer-dma.c 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
queue             309 drivers/iio/buffer/industrialio-buffer-dma.c 		block = queue->fileio.blocks[i];
queue             321 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->outgoing);
queue             322 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irq(&queue->list_lock);
queue             324 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->incoming);
queue             326 drivers/iio/buffer/industrialio-buffer-dma.c 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
queue             327 drivers/iio/buffer/industrialio-buffer-dma.c 		if (queue->fileio.blocks[i]) {
queue             328 drivers/iio/buffer/industrialio-buffer-dma.c 			block = queue->fileio.blocks[i];
queue             341 drivers/iio/buffer/industrialio-buffer-dma.c 			block = iio_dma_buffer_alloc_block(queue, size);
queue             346 drivers/iio/buffer/industrialio-buffer-dma.c 			queue->fileio.blocks[i] = block;
queue             350 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->incoming);
queue             354 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             360 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
queue             370 drivers/iio/buffer/industrialio-buffer-dma.c 	if (!queue->ops)
queue             375 drivers/iio/buffer/industrialio-buffer-dma.c 	ret = queue->ops->submit(queue, block);
queue             404 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
queue             407 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             408 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->active = true;
queue             409 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
queue             411 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_submit_block(queue, block);
queue             413 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             430 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
queue             432 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             433 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->active = false;
queue             435 drivers/iio/buffer/industrialio-buffer-dma.c 	if (queue->ops && queue->ops->abort)
queue             436 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->ops->abort(queue);
queue             437 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             443 drivers/iio/buffer/industrialio-buffer-dma.c static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
queue             448 drivers/iio/buffer/industrialio-buffer-dma.c 	} else if (queue->active) {
queue             449 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_submit_block(queue, block);
queue             452 drivers/iio/buffer/industrialio-buffer-dma.c 		list_add_tail(&block->head, &queue->incoming);
queue             457 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue)
queue             461 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irq(&queue->list_lock);
queue             462 drivers/iio/buffer/industrialio-buffer-dma.c 	block = list_first_entry_or_null(&queue->outgoing, struct
queue             468 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irq(&queue->list_lock);
queue             485 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
queue             492 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             494 drivers/iio/buffer/industrialio-buffer-dma.c 	if (!queue->fileio.active_block) {
queue             495 drivers/iio/buffer/industrialio-buffer-dma.c 		block = iio_dma_buffer_dequeue(queue);
queue             500 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.pos = 0;
queue             501 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.active_block = block;
queue             503 drivers/iio/buffer/industrialio-buffer-dma.c 		block = queue->fileio.active_block;
queue             507 drivers/iio/buffer/industrialio-buffer-dma.c 	if (n > block->bytes_used - queue->fileio.pos)
queue             508 drivers/iio/buffer/industrialio-buffer-dma.c 		n = block->bytes_used - queue->fileio.pos;
queue             510 drivers/iio/buffer/industrialio-buffer-dma.c 	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
queue             515 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->fileio.pos += n;
queue             517 drivers/iio/buffer/industrialio-buffer-dma.c 	if (queue->fileio.pos == block->bytes_used) {
queue             518 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.active_block = NULL;
queue             519 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_dma_buffer_enqueue(queue, block);
queue             525 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             540 drivers/iio/buffer/industrialio-buffer-dma.c 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
queue             551 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             552 drivers/iio/buffer/industrialio-buffer-dma.c 	if (queue->fileio.active_block)
queue             553 drivers/iio/buffer/industrialio-buffer-dma.c 		data_available += queue->fileio.active_block->size;
queue             555 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irq(&queue->list_lock);
queue             556 drivers/iio/buffer/industrialio-buffer-dma.c 	list_for_each_entry(block, &queue->outgoing, head)
queue             558 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irq(&queue->list_lock);
queue             559 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             611 drivers/iio/buffer/industrialio-buffer-dma.c int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
queue             614 drivers/iio/buffer/industrialio-buffer-dma.c 	iio_buffer_init(&queue->buffer);
queue             615 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->buffer.length = PAGE_SIZE;
queue             616 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->buffer.watermark = queue->buffer.length / 2;
queue             617 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->dev = dev;
queue             618 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->ops = ops;
queue             620 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->incoming);
queue             621 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->outgoing);
queue             623 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_init(&queue->lock);
queue             624 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_init(&queue->list_lock);
queue             637 drivers/iio/buffer/industrialio-buffer-dma.c void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
queue             641 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_lock(&queue->lock);
queue             643 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_lock_irq(&queue->list_lock);
queue             644 drivers/iio/buffer/industrialio-buffer-dma.c 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
queue             645 drivers/iio/buffer/industrialio-buffer-dma.c 		if (!queue->fileio.blocks[i])
queue             647 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
queue             649 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->outgoing);
queue             650 drivers/iio/buffer/industrialio-buffer-dma.c 	spin_unlock_irq(&queue->list_lock);
queue             652 drivers/iio/buffer/industrialio-buffer-dma.c 	INIT_LIST_HEAD(&queue->incoming);
queue             654 drivers/iio/buffer/industrialio-buffer-dma.c 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
queue             655 drivers/iio/buffer/industrialio-buffer-dma.c 		if (!queue->fileio.blocks[i])
queue             657 drivers/iio/buffer/industrialio-buffer-dma.c 		iio_buffer_block_put(queue->fileio.blocks[i]);
queue             658 drivers/iio/buffer/industrialio-buffer-dma.c 		queue->fileio.blocks[i] = NULL;
queue             660 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->fileio.active_block = NULL;
queue             661 drivers/iio/buffer/industrialio-buffer-dma.c 	queue->ops = NULL;
queue             663 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_unlock(&queue->lock);
queue             675 drivers/iio/buffer/industrialio-buffer-dma.c void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
queue             677 drivers/iio/buffer/industrialio-buffer-dma.c 	mutex_destroy(&queue->lock);
queue              31 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	struct iio_dma_buffer_queue queue;
queue              43 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
queue              51 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_lock_irqsave(&block->queue->list_lock, flags);
queue              53 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
queue              57 drivers/iio/buffer/industrialio-buffer-dmaengine.c static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
queue              61 drivers/iio/buffer/industrialio-buffer-dmaengine.c 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
queue              82 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
queue              84 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
queue              91 drivers/iio/buffer/industrialio-buffer-dmaengine.c static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
queue              94 drivers/iio/buffer/industrialio-buffer-dmaengine.c 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
queue              97 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
queue             105 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	iio_dma_buffer_release(&dmaengine_buffer->queue);
queue             179 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
queue             182 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
queue             184 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	return &dmaengine_buffer->queue.buffer;
queue             203 drivers/iio/buffer/industrialio-buffer-dmaengine.c 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
queue             109 drivers/infiniband/hw/cxgb3/cxio_hal.c 		cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
queue             170 drivers/infiniband/hw/cxgb3/cxio_hal.c 	cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
queue             172 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!cq->queue) {
queue             279 drivers/infiniband/hw/cxgb3/cxio_hal.c 	wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
queue             282 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!wq->queue)
queue             311 drivers/infiniband/hw/cxgb3/cxio_hal.c 			  * sizeof(struct t3_cqe) + 1, cq->queue,
queue             321 drivers/infiniband/hw/cxgb3/cxio_hal.c 			  * sizeof(union t3_wr), wq->queue,
queue             692 drivers/infiniband/hw/cxgb3/cxio_wr.h 	union t3_wr *queue;		/* DMA accessible memory */
queue             722 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_cqe *queue;
queue             738 drivers/infiniband/hw/cxgb3/cxio_wr.h 		&cq->queue[1 << cq->size_log2])->cq_err;
queue             744 drivers/infiniband/hw/cxgb3/cxio_wr.h 	 &cq->queue[1 << cq->size_log2])->cq_err = 1;
queue             749 drivers/infiniband/hw/cxgb3/cxio_wr.h 	wq->queue->wq_in_err.err |= 1;
queue             754 drivers/infiniband/hw/cxgb3/cxio_wr.h 	wq->queue->wq_in_err.err |= 2;
queue             759 drivers/infiniband/hw/cxgb3/cxio_wr.h 	wq->queue->wq_in_err.err &= ~2;
queue             764 drivers/infiniband/hw/cxgb3/cxio_wr.h 	return !(wq->queue->wq_in_err.err & 2);
queue             771 drivers/infiniband/hw/cxgb3/cxio_wr.h 	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
queue             796 drivers/infiniband/hw/cxgb3/cxio_wr.h 	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
queue             178 drivers/infiniband/hw/cxgb3/iwch_provider.c 		mm->addr = virt_to_phys(chp->cq.queue);
queue             836 drivers/infiniband/hw/cxgb3/iwch_provider.c 		mm1->addr = virt_to_phys(qhp->wq.queue);
queue             175 drivers/infiniband/hw/cxgb3/iwch_qp.c 			wqe = (union t3_wr *)(wq->queue +
queue             386 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe = (union t3_wr *) (qhp->wq.queue + idx);
queue             496 drivers/infiniband/hw/cxgb3/iwch_qp.c 		wqe = (union t3_wr *) (qhp->wq.queue + idx);
queue             804 drivers/infiniband/hw/cxgb3/iwch_qp.c 	union t3_wr *wqe = qhp->wq.queue;
queue              65 drivers/infiniband/hw/cxgb4/cq.c 			  cq->memsize, cq->queue,
queue              98 drivers/infiniband/hw/cxgb4/cq.c 	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
queue             100 drivers/infiniband/hw/cxgb4/cq.c 	if (!cq->queue) {
queue             108 drivers/infiniband/hw/cxgb4/cq.c 		((u8 *)cq->queue + (cq->size - 1) *
queue             109 drivers/infiniband/hw/cxgb4/cq.c 		 (sizeof(*cq->queue) / 2)))->qp_err;
queue             112 drivers/infiniband/hw/cxgb4/cq.c 		((u8 *)cq->queue + (cq->size - 1) *
queue             113 drivers/infiniband/hw/cxgb4/cq.c 		 sizeof(*cq->queue)))->qp_err;
queue             174 drivers/infiniband/hw/cxgb4/cq.c 	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
queue             486 drivers/infiniband/hw/cxgb4/cq.c 		srq->queue[srq->size].status.host_wq_pidx =
queue            1056 drivers/infiniband/hw/cxgb4/cq.c 			(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
queue            1118 drivers/infiniband/hw/cxgb4/cq.c 		mm->addr = virt_to_phys(chp->cq.queue);
queue             102 drivers/infiniband/hw/cxgb4/qp.c 	dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
queue             123 drivers/infiniband/hw/cxgb4/qp.c 	sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
queue             131 drivers/infiniband/hw/cxgb4/qp.c 	sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
queue             133 drivers/infiniband/hw/cxgb4/qp.c 	if (!sq->queue)
queue             135 drivers/infiniband/hw/cxgb4/qp.c 	sq->phys_addr = virt_to_phys(sq->queue);
queue             163 drivers/infiniband/hw/cxgb4/qp.c 				  wq->rq.memsize, wq->rq.queue,
queue             260 drivers/infiniband/hw/cxgb4/qp.c 	memset(wq->sq.queue, 0, wq->sq.memsize);
queue             264 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
queue             268 drivers/infiniband/hw/cxgb4/qp.c 		if (!wq->rq.queue) {
queue             273 drivers/infiniband/hw/cxgb4/qp.c 			 wq->sq.queue,
queue             274 drivers/infiniband/hw/cxgb4/qp.c 			 (unsigned long long)virt_to_phys(wq->sq.queue),
queue             275 drivers/infiniband/hw/cxgb4/qp.c 			 wq->rq.queue,
queue             276 drivers/infiniband/hw/cxgb4/qp.c 			 (unsigned long long)virt_to_phys(wq->rq.queue));
queue             393 drivers/infiniband/hw/cxgb4/qp.c 				  wq->rq.memsize, wq->rq.queue,
queue             429 drivers/infiniband/hw/cxgb4/qp.c 			if (dstp == (u8 *)&sq->queue[sq->size])
queue             430 drivers/infiniband/hw/cxgb4/qp.c 				dstp = (u8 *)sq->queue;
queue             431 drivers/infiniband/hw/cxgb4/qp.c 			if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
queue             434 drivers/infiniband/hw/cxgb4/qp.c 				len = (u8 *)&sq->queue[sq->size] - dstp;
queue             533 drivers/infiniband/hw/cxgb4/qp.c 			ret = build_isgl((__be64 *)sq->queue,
queue             534 drivers/infiniband/hw/cxgb4/qp.c 					 (__be64 *)&sq->queue[sq->size],
queue             584 drivers/infiniband/hw/cxgb4/qp.c 			ret = build_isgl((__be64 *)sq->queue,
queue             585 drivers/infiniband/hw/cxgb4/qp.c 					 (__be64 *)&sq->queue[sq->size],
queue             646 drivers/infiniband/hw/cxgb4/qp.c 		build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
queue             650 drivers/infiniband/hw/cxgb4/qp.c 	build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
queue             705 drivers/infiniband/hw/cxgb4/qp.c 	wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
queue             763 drivers/infiniband/hw/cxgb4/qp.c 	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
queue             764 drivers/infiniband/hw/cxgb4/qp.c 			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
queue             868 drivers/infiniband/hw/cxgb4/qp.c 			if (++p == (__be64 *)&sq->queue[sq->size])
queue             869 drivers/infiniband/hw/cxgb4/qp.c 				p = (__be64 *)sq->queue;
queue             874 drivers/infiniband/hw/cxgb4/qp.c 			if (++p == (__be64 *)&sq->queue[sq->size])
queue             875 drivers/infiniband/hw/cxgb4/qp.c 				p = (__be64 *)sq->queue;
queue            1141 drivers/infiniband/hw/cxgb4/qp.c 		wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
queue            1295 drivers/infiniband/hw/cxgb4/qp.c 		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
queue            2169 drivers/infiniband/hw/cxgb4/qp.c 		sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
queue            2175 drivers/infiniband/hw/cxgb4/qp.c 			sizeof(*qhp->wq.rq.queue);
queue            2298 drivers/infiniband/hw/cxgb4/qp.c 			rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
queue            2326 drivers/infiniband/hw/cxgb4/qp.c 			&qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
queue            2329 drivers/infiniband/hw/cxgb4/qp.c 			&qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
queue            2331 drivers/infiniband/hw/cxgb4/qp.c 			&qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
queue            2510 drivers/infiniband/hw/cxgb4/qp.c 			  wq->memsize, wq->queue,
queue            2553 drivers/infiniband/hw/cxgb4/qp.c 	wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
queue            2555 drivers/infiniband/hw/cxgb4/qp.c 	if (!wq->queue)
queue            2629 drivers/infiniband/hw/cxgb4/qp.c 			__func__, srq->idx, wq->qid, srq->pdid, wq->queue,
queue            2630 drivers/infiniband/hw/cxgb4/qp.c 			(u64)virt_to_phys(wq->queue), wq->bar2_va,
queue            2636 drivers/infiniband/hw/cxgb4/qp.c 			  wq->memsize, wq->queue,
queue            2657 drivers/infiniband/hw/cxgb4/qp.c 	dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
queue            2660 drivers/infiniband/hw/cxgb4/qp.c 		if (dst >= (u64 *)&srq->queue[srq->size])
queue            2661 drivers/infiniband/hw/cxgb4/qp.c 			dst = (u64 *)srq->queue;
queue            2663 drivers/infiniband/hw/cxgb4/qp.c 		if (dst >= (u64 *)&srq->queue[srq->size])
queue            2664 drivers/infiniband/hw/cxgb4/qp.c 			dst = (u64 *)srq->queue;
queue            2727 drivers/infiniband/hw/cxgb4/qp.c 		sizeof(*srq->wq.queue);
queue            2768 drivers/infiniband/hw/cxgb4/qp.c 		srq_key_mm->addr = virt_to_phys(srq->wq.queue);
queue             402 drivers/infiniband/hw/cxgb4/restrack.c 	hwcqes[0] = chp->cq.queue[idx];
queue             405 drivers/infiniband/hw/cxgb4/restrack.c 	hwcqes[1] = chp->cq.queue[idx];
queue             333 drivers/infiniband/hw/cxgb4/t4.h 	union t4_wr *queue;
queue             362 drivers/infiniband/hw/cxgb4/t4.h 	union  t4_recv_wr *queue;
queue             399 drivers/infiniband/hw/cxgb4/t4.h 	union t4_recv_wr *queue;
queue             438 drivers/infiniband/hw/cxgb4/t4.h 	srq->queue[srq->size].status.host_pidx = srq->pidx;
queue             468 drivers/infiniband/hw/cxgb4/t4.h 	srq->queue[srq->size].status.host_cidx = srq->cidx;
queue             477 drivers/infiniband/hw/cxgb4/t4.h 	srq->queue[srq->size].status.host_cidx = srq->cidx;
queue             519 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
queue             568 drivers/infiniband/hw/cxgb4/t4.h 	return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
queue             674 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.queue[wq->rq.size].status.db_off = 1;
queue             679 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.queue[wq->rq.size].status.db_off = 0;
queue             684 drivers/infiniband/hw/cxgb4/t4.h 	return !wq->rq.queue[wq->rq.size].status.db_off;
queue             692 drivers/infiniband/hw/cxgb4/t4.h 	struct t4_cqe *queue;
queue             771 drivers/infiniband/hw/cxgb4/t4.h 	cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
queue             792 drivers/infiniband/hw/cxgb4/t4.h 	return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
queue             805 drivers/infiniband/hw/cxgb4/t4.h 	if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
queue             809 drivers/infiniband/hw/cxgb4/t4.h 	} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
queue             813 drivers/infiniband/hw/cxgb4/t4.h 		*cqe = &cq->queue[cq->cidx];
queue             469 drivers/infiniband/hw/hfi1/tid_rdma.c 			       struct tid_queue *queue)
queue             475 drivers/infiniband/hw/hfi1/tid_rdma.c 	priv = list_first_entry_or_null(&queue->queue_head,
queue             503 drivers/infiniband/hw/hfi1/tid_rdma.c 			       struct tid_queue *queue, struct rvt_qp *qp)
queue             511 drivers/infiniband/hw/hfi1/tid_rdma.c 	fqp = first_qp(rcd, queue);
queue             534 drivers/infiniband/hw/hfi1/tid_rdma.c 			       struct tid_queue *queue, struct rvt_qp *qp)
queue             545 drivers/infiniband/hw/hfi1/tid_rdma.c 	queue->dequeue++;
queue             560 drivers/infiniband/hw/hfi1/tid_rdma.c 				  struct tid_queue *queue, struct rvt_qp *qp)
queue             569 drivers/infiniband/hw/hfi1/tid_rdma.c 		list_add_tail(&priv->tid_wait, &queue->queue_head);
queue             570 drivers/infiniband/hw/hfi1/tid_rdma.c 		priv->tid_enqueue = ++queue->enqueue;
queue             664 drivers/infiniband/hw/hfi1/tid_rdma.c static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
queue             678 drivers/infiniband/hw/hfi1/tid_rdma.c 		queue->dequeue++;
queue             789 drivers/infiniband/hw/hfi1/tid_rdma.c 		goto queue;
queue             793 drivers/infiniband/hw/hfi1/tid_rdma.c 		goto queue;
queue             809 drivers/infiniband/hw/hfi1/tid_rdma.c queue:
queue            1489 drivers/infiniband/hw/hfi1/tid_rdma.c 		goto queue;
queue            1497 drivers/infiniband/hw/hfi1/tid_rdma.c 		goto queue;
queue            1529 drivers/infiniband/hw/hfi1/tid_rdma.c queue:
queue            3403 drivers/infiniband/hw/hfi1/tid_rdma.c 			     struct tid_queue *queue)
queue            3405 drivers/infiniband/hw/hfi1/tid_rdma.c 	return qpriv->tid_enqueue - queue->dequeue;
queue            1090 drivers/infiniband/hw/i40iw/i40iw_uk.c void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
queue            1111 drivers/infiniband/hw/i40iw/i40iw_uk.c 		if ((void *)(unsigned long)comp_ctx == queue)
queue             420 drivers/infiniband/hw/i40iw/i40iw_user.h void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
queue             163 drivers/infiniband/hw/mthca/mthca_cq.c 		return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
queue             165 drivers/infiniband/hw/mthca/mthca_cq.c 		return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
queue             355 drivers/infiniband/hw/mthca/mthca_cq.c 			      &buf->queue, &buf->is_direct,
queue             368 drivers/infiniband/hw/mthca/mthca_cq.c 	mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
queue             186 drivers/infiniband/hw/mthca/mthca_provider.h 	union mthca_buf		queue;
queue             239 drivers/infiniband/hw/mthca/mthca_provider.h 	union mthca_buf		queue;
queue             282 drivers/infiniband/hw/mthca/mthca_provider.h 	union mthca_buf	       queue;
queue             211 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
queue             213 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
queue             220 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.direct.buf + qp->send_wqe_offset +
queue             223 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.page_list[(qp->send_wqe_offset +
queue            1069 drivers/infiniband/hw/mthca/mthca_qp.c 			      &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
queue            1085 drivers/infiniband/hw/mthca/mthca_qp.c 		       &qp->queue, qp->is_direct, &qp->mr);
queue              77 drivers/infiniband/hw/mthca/mthca_srq.c 		return srq->queue.direct.buf + (n << srq->wqe_shift);
queue              79 drivers/infiniband/hw/mthca/mthca_srq.c 		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
queue             149 drivers/infiniband/hw/mthca/mthca_srq.c 	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
queue             171 drivers/infiniband/hw/mthca/mthca_srq.c 			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
queue              81 drivers/infiniband/sw/rdmavt/cq.c 		u_wc = cq->queue;
queue             296 drivers/infiniband/sw/rdmavt/cq.c 		cq->queue = u_wc;
queue             358 drivers/infiniband/sw/rdmavt/cq.c 		if (cq->queue) {
queue             359 drivers/infiniband/sw/rdmavt/cq.c 			if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
queue             360 drivers/infiniband/sw/rdmavt/cq.c 				RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
queue             425 drivers/infiniband/sw/rdmavt/cq.c 		old_u_wc = cq->queue;
queue             460 drivers/infiniband/sw/rdmavt/cq.c 		cq->queue = u_wc;
queue             171 drivers/infiniband/sw/rxe/rxe_comp.c 	wqe = queue_head(qp->sq.queue);
queue             449 drivers/infiniband/sw/rxe/rxe_comp.c 		advance_consumer(qp->sq.queue);
queue             452 drivers/infiniband/sw/rxe/rxe_comp.c 		advance_consumer(qp->sq.queue);
queue             547 drivers/infiniband/sw/rxe/rxe_comp.c 	while ((wqe = queue_head(qp->sq.queue))) {
queue             552 drivers/infiniband/sw/rxe/rxe_comp.c 			advance_consumer(qp->sq.queue);
queue              55 drivers/infiniband/sw/rxe/rxe_cq.c 		count = queue_count(cq->queue);
queue              90 drivers/infiniband/sw/rxe/rxe_cq.c 	cq->queue = rxe_queue_init(rxe, &cqe,
queue              92 drivers/infiniband/sw/rxe/rxe_cq.c 	if (!cq->queue) {
queue              98 drivers/infiniband/sw/rxe/rxe_cq.c 			   cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
queue             100 drivers/infiniband/sw/rxe/rxe_cq.c 		vfree(cq->queue->buf);
queue             101 drivers/infiniband/sw/rxe/rxe_cq.c 		kfree(cq->queue);
queue             123 drivers/infiniband/sw/rxe/rxe_cq.c 	err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
queue             139 drivers/infiniband/sw/rxe/rxe_cq.c 	if (unlikely(queue_full(cq->queue))) {
queue             151 drivers/infiniband/sw/rxe/rxe_cq.c 	memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
queue             158 drivers/infiniband/sw/rxe/rxe_cq.c 	advance_producer(cq->queue);
queue             183 drivers/infiniband/sw/rxe/rxe_cq.c 	if (cq->queue)
queue             184 drivers/infiniband/sw/rxe/rxe_cq.c 		rxe_queue_cleanup(cq->queue);
queue             250 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->sq.queue = rxe_queue_init(rxe,
queue             253 drivers/infiniband/sw/rxe/rxe_qp.c 	if (!qp->sq.queue)
queue             257 drivers/infiniband/sw/rxe/rxe_qp.c 			   qp->sq.queue->buf, qp->sq.queue->buf_size,
queue             258 drivers/infiniband/sw/rxe/rxe_qp.c 			   &qp->sq.queue->ip);
queue             261 drivers/infiniband/sw/rxe/rxe_qp.c 		vfree(qp->sq.queue->buf);
queue             262 drivers/infiniband/sw/rxe/rxe_qp.c 		kfree(qp->sq.queue);
queue             266 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->req.wqe_index	= producer_index(qp->sq.queue);
queue             304 drivers/infiniband/sw/rxe/rxe_qp.c 		qp->rq.queue = rxe_queue_init(rxe,
queue             307 drivers/infiniband/sw/rxe/rxe_qp.c 		if (!qp->rq.queue)
queue             311 drivers/infiniband/sw/rxe/rxe_qp.c 				   qp->rq.queue->buf, qp->rq.queue->buf_size,
queue             312 drivers/infiniband/sw/rxe/rxe_qp.c 				   &qp->rq.queue->ip);
queue             314 drivers/infiniband/sw/rxe/rxe_qp.c 			vfree(qp->rq.queue->buf);
queue             315 drivers/infiniband/sw/rxe/rxe_qp.c 			kfree(qp->rq.queue);
queue             374 drivers/infiniband/sw/rxe/rxe_qp.c 	rxe_queue_cleanup(qp->sq.queue);
queue             506 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue) {
queue             521 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue) {
queue             524 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_queue_reset(qp->sq.queue);
queue             548 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue) {
queue             559 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue) {
queue             798 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue) {
queue             811 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->sq.queue)
queue             812 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_queue_cleanup(qp->sq.queue);
queue             817 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->rq.queue)
queue             818 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_queue_cleanup(qp->rq.queue);
queue              97 drivers/infiniband/sw/rxe/rxe_queue.h void rxe_queue_cleanup(struct rxe_queue *queue);
queue              76 drivers/infiniband/sw/rxe/rxe_req.c 	qp->req.wqe_index	= consumer_index(qp->sq.queue);
queue              80 drivers/infiniband/sw/rxe/rxe_req.c 	for (wqe_index = consumer_index(qp->sq.queue);
queue              81 drivers/infiniband/sw/rxe/rxe_req.c 		wqe_index != producer_index(qp->sq.queue);
queue              82 drivers/infiniband/sw/rxe/rxe_req.c 		wqe_index = next_index(qp->sq.queue, wqe_index)) {
queue              83 drivers/infiniband/sw/rxe/rxe_req.c 		wqe = addr_from_index(qp->sq.queue, wqe_index);
queue             134 drivers/infiniband/sw/rxe/rxe_req.c 	struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
queue             151 drivers/infiniband/sw/rxe/rxe_req.c 				consumer_index(qp->sq.queue)) ||
queue             174 drivers/infiniband/sw/rxe/rxe_req.c 	if (qp->req.wqe_index == producer_index(qp->sq.queue))
queue             177 drivers/infiniband/sw/rxe/rxe_req.c 	wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
queue             185 drivers/infiniband/sw/rxe/rxe_req.c 		     (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
queue             580 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
queue             610 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = consumer_index(qp->sq.queue);
queue             661 drivers/infiniband/sw/rxe/rxe_req.c 		qp->req.wqe_index = next_index(qp->sq.queue,
queue             706 drivers/infiniband/sw/rxe/rxe_req.c 			qp->req.wqe_index = next_index(qp->sq.queue,
queue             319 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_queue *q = srq->rq.queue;
queue             368 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.wqe = queue_head(qp->rq.queue);
queue             395 drivers/infiniband/sw/rxe/rxe_resp.c 		qp->resp.wqe = queue_head(qp->rq.queue);
queue             940 drivers/infiniband/sw/rxe/rxe_resp.c 		advance_consumer(qp->rq.queue);
queue            1216 drivers/infiniband/sw/rxe/rxe_resp.c 	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
queue            1217 drivers/infiniband/sw/rxe/rxe_resp.c 		advance_consumer(qp->rq.queue);
queue              76 drivers/infiniband/sw/rxe/rxe_srq.c 		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
queue              79 drivers/infiniband/sw/rxe/rxe_srq.c 				 srq->rq.queue->buf->index_mask);
queue             128 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.queue = q;
queue             154 drivers/infiniband/sw/rxe/rxe_srq.c 	struct rxe_queue *q = srq->rq.queue;
queue             179 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.queue = NULL;
queue             253 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(queue_full(rq->queue))) {
queue             267 drivers/infiniband/sw/rxe/rxe_verbs.c 	recv_wqe = producer_addr(rq->queue);
queue             285 drivers/infiniband/sw/rxe/rxe_verbs.c 	advance_producer(rq->queue);
queue             370 drivers/infiniband/sw/rxe/rxe_verbs.c 	attr->max_wr = srq->rq.queue->buf->index_mask;
queue             380 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (srq->rq.queue)
queue             381 drivers/infiniband/sw/rxe/rxe_verbs.c 		rxe_queue_cleanup(srq->rq.queue);
queue             648 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(queue_full(sq->queue))) {
queue             653 drivers/infiniband/sw/rxe/rxe_verbs.c 	send_wqe = producer_addr(sq->queue);
queue             665 drivers/infiniband/sw/rxe/rxe_verbs.c 	advance_producer(sq->queue);
queue             856 drivers/infiniband/sw/rxe/rxe_verbs.c 		cqe = queue_head(cq->queue);
queue             861 drivers/infiniband/sw/rxe/rxe_verbs.c 		advance_consumer(cq->queue);
queue             871 drivers/infiniband/sw/rxe/rxe_verbs.c 	int count = queue_count(cq->queue);
queue             886 drivers/infiniband/sw/rxe/rxe_verbs.c 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
queue              90 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_queue	*queue;
queue             111 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_queue	*queue;
queue             119 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_queue	*queue;
queue             218 drivers/infiniband/sw/siw/siw.h 	struct siw_cqe *queue;
queue              55 drivers/infiniband/sw/siw/siw_cq.c 	cqe = &cq->queue[cq->cq_get % cq->num_cqe];
queue            1062 drivers/infiniband/sw/siw/siw_qp.c 		cqe = &cq->queue[idx];
queue            1119 drivers/infiniband/sw/siw/siw_qp.c 		cqe = &cq->queue[idx];
queue            1098 drivers/infiniband/sw/siw/siw_verbs.c 	vfree(cq->queue);
queue            1135 drivers/infiniband/sw/siw/siw_verbs.c 		cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
queue            1138 drivers/infiniband/sw/siw/siw_verbs.c 		cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
queue            1141 drivers/infiniband/sw/siw/siw_verbs.c 	if (cq->queue == NULL) {
queue            1150 drivers/infiniband/sw/siw/siw_verbs.c 	cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
queue            1159 drivers/infiniband/sw/siw/siw_verbs.c 			siw_create_uobj(ctx, cq->queue,
queue            1183 drivers/infiniband/sw/siw/siw_verbs.c 	if (cq && cq->queue) {
queue            1189 drivers/infiniband/sw/siw/siw_verbs.c 		vfree(cq->queue);
queue             433 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff_head   queue;
queue             451 drivers/infiniband/ulp/ipoib/ipoib.h 	struct sk_buff_head queue;
queue            1036 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		while ((skb = __skb_dequeue(&p->neigh->queue)))
queue             613 drivers/infiniband/ulp/ipoib/ipoib_main.c 	while ((skb = __skb_dequeue(&path->queue)))
queue             803 drivers/infiniband/ulp/ipoib/ipoib_main.c 		while ((skb = __skb_dequeue(&path->queue)))
queue             832 drivers/infiniband/ulp/ipoib/ipoib_main.c 			while ((skb = __skb_dequeue(&neigh->queue)))
queue             888 drivers/infiniband/ulp/ipoib/ipoib_main.c 	skb_queue_head_init(&path->queue);
queue             994 drivers/infiniband/ulp/ipoib/ipoib_main.c 			if (skb_queue_len(&neigh->queue) <
queue             997 drivers/infiniband/ulp/ipoib/ipoib_main.c 				__skb_queue_tail(&neigh->queue, skb);
queue            1000 drivers/infiniband/ulp/ipoib/ipoib_main.c 					   skb_queue_len(&neigh->queue));
queue            1015 drivers/infiniband/ulp/ipoib/ipoib_main.c 		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
queue            1017 drivers/infiniband/ulp/ipoib/ipoib_main.c 			__skb_queue_tail(&neigh->queue, skb);
queue            1071 drivers/infiniband/ulp/ipoib/ipoib_main.c 		if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
queue            1073 drivers/infiniband/ulp/ipoib/ipoib_main.c 			__skb_queue_tail(&path->queue, skb);
queue            1169 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
queue            1172 drivers/infiniband/ulp/ipoib/ipoib_main.c 		__skb_queue_tail(&neigh->queue, skb);
queue            1290 drivers/infiniband/ulp/ipoib/ipoib_main.c 			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
queue            1375 drivers/infiniband/ulp/ipoib/ipoib_main.c 	skb_queue_head_init(&neigh->queue);
queue            1448 drivers/infiniband/ulp/ipoib/ipoib_main.c 	while ((skb = __skb_dequeue(&neigh->queue))) {
queue             117 drivers/input/rmi4/rmi_f54.c 	struct vb2_queue queue;
queue             694 drivers/input/rmi4/rmi_f54.c 	f54->queue = rmi_f54_queue;
queue             695 drivers/input/rmi4/rmi_f54.c 	f54->queue.drv_priv = f54;
queue             696 drivers/input/rmi4/rmi_f54.c 	f54->queue.lock = &f54->lock;
queue             697 drivers/input/rmi4/rmi_f54.c 	f54->queue.dev = &fn->dev;
queue             699 drivers/input/rmi4/rmi_f54.c 	ret = vb2_queue_init(&f54->queue);
queue             707 drivers/input/rmi4/rmi_f54.c 	f54->vdev.queue = &f54->queue;
queue              29 drivers/input/serio/serio_raw.c 	unsigned char queue[SERIO_RAW_QUEUE_LEN];
queue             148 drivers/input/serio/serio_raw.c 		*c = serio_raw->queue[serio_raw->tail];
queue             278 drivers/input/serio/serio_raw.c 	serio_raw->queue[head] = data;
queue             248 drivers/input/touchscreen/atmel_mxt_ts.c 	struct vb2_queue queue;
queue            2602 drivers/input/touchscreen/atmel_mxt_ts.c 	dbg->queue = mxt_queue;
queue            2603 drivers/input/touchscreen/atmel_mxt_ts.c 	dbg->queue.drv_priv = data;
queue            2604 drivers/input/touchscreen/atmel_mxt_ts.c 	dbg->queue.lock = &dbg->lock;
queue            2605 drivers/input/touchscreen/atmel_mxt_ts.c 	dbg->queue.dev = &data->client->dev;
queue            2607 drivers/input/touchscreen/atmel_mxt_ts.c 	error = vb2_queue_init(&dbg->queue);
queue            2615 drivers/input/touchscreen/atmel_mxt_ts.c 	dbg->vdev.queue = &dbg->queue;
queue             217 drivers/input/touchscreen/sur40.c 	struct vb2_queue queue;
queue             536 drivers/input/touchscreen/sur40.c 	if (!vb2_start_streaming_called(&sur40->queue))
queue             733 drivers/input/touchscreen/sur40.c 	sur40->queue = sur40_queue;
queue             734 drivers/input/touchscreen/sur40.c 	sur40->queue.drv_priv = sur40;
queue             735 drivers/input/touchscreen/sur40.c 	sur40->queue.lock = &sur40->lock;
queue             736 drivers/input/touchscreen/sur40.c 	sur40->queue.dev = sur40->dev;
queue             739 drivers/input/touchscreen/sur40.c 	error = vb2_queue_init(&sur40->queue);
queue             747 drivers/input/touchscreen/sur40.c 	sur40->vdev.queue = &sur40->queue;
queue              82 drivers/iommu/iova.c 	struct iova_fq __percpu *queue;
queue              88 drivers/iommu/iova.c 	queue = alloc_percpu(struct iova_fq);
queue              89 drivers/iommu/iova.c 	if (!queue)
queue              98 drivers/iommu/iova.c 		fq = per_cpu_ptr(queue, cpu);
queue             107 drivers/iommu/iova.c 	iovad->fq = queue;
queue              33 drivers/ipack/devices/ipoctal.c 	wait_queue_head_t		queue;
queue             395 drivers/lightnvm/core.c 	tdisk->queue = tqueue;
queue             437 drivers/lightnvm/core.c 	tdisk->queue = NULL;
queue             453 drivers/lightnvm/core.c 	struct request_queue *q = tdisk->queue;
queue            1149 drivers/lightnvm/pblk-init.c 	struct request_queue *tqueue = tdisk->queue;
queue              23 drivers/mailbox/ti-msgmgr.c #define Q_DATA_OFFSET(proxy, queue, reg)	\
queue              24 drivers/mailbox/ti-msgmgr.c 		     ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
queue              25 drivers/mailbox/ti-msgmgr.c #define Q_STATE_OFFSET(queue)			((queue) * 0x4)
queue             683 drivers/md/bcache/request.c 		generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
queue            1097 drivers/md/bcache/request.c 	generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
queue            1281 drivers/md/bcache/request.c 	g->queue->make_request_fn		= cached_dev_make_request;
queue            1282 drivers/md/bcache/request.c 	g->queue->backing_dev_info->congested_fn = cached_dev_congested;
queue            1390 drivers/md/bcache/request.c 	g->queue->make_request_fn		= flash_dev_make_request;
queue            1391 drivers/md/bcache/request.c 	g->queue->backing_dev_info->congested_fn = flash_dev_congested;
queue             795 drivers/md/bcache/super.c 		if (disk->queue)
queue             796 drivers/md/bcache/super.c 			blk_cleanup_queue(disk->queue);
queue             866 drivers/md/bcache/super.c 	d->disk->queue			= q;
queue             878 drivers/md/bcache/super.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
queue             879 drivers/md/bcache/super.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
queue             880 drivers/md/bcache/super.c 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
queue            1338 drivers/md/bcache/super.c 	dc->disk.disk->queue->backing_dev_info->ra_pages =
queue            1339 drivers/md/bcache/super.c 		max(dc->disk.disk->queue->backing_dev_info->ra_pages,
queue             269 drivers/md/dm-cache-policy-smq.c static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
queue             287 drivers/md/dm-cache-policy-smq.c static unsigned q_size(struct queue *q)
queue             295 drivers/md/dm-cache-policy-smq.c static void q_push(struct queue *q, struct entry *e)
queue             305 drivers/md/dm-cache-policy-smq.c static void q_push_front(struct queue *q, struct entry *e)
queue             315 drivers/md/dm-cache-policy-smq.c static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
queue             325 drivers/md/dm-cache-policy-smq.c static void q_del(struct queue *q, struct entry *e)
queue             335 drivers/md/dm-cache-policy-smq.c static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
queue             357 drivers/md/dm-cache-policy-smq.c static struct entry *q_pop(struct queue *q)
queue             372 drivers/md/dm-cache-policy-smq.c static struct entry *__redist_pop_from(struct queue *q, unsigned level)
queue             386 drivers/md/dm-cache-policy-smq.c static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
queue             405 drivers/md/dm-cache-policy-smq.c static void q_set_targets(struct queue *q)
queue             427 drivers/md/dm-cache-policy-smq.c static void q_redistribute(struct queue *q)
queue             470 drivers/md/dm-cache-policy-smq.c static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
queue             823 drivers/md/dm-cache-policy-smq.c 	struct queue hotspot;
queue             824 drivers/md/dm-cache-policy-smq.c 	struct queue clean;
queue             825 drivers/md/dm-cache-policy-smq.c 	struct queue dirty;
queue             880 drivers/md/dm-cache-policy-smq.c 	struct queue *q = &mq->dirty;
queue             893 drivers/md/dm-cache-policy-smq.c 	struct queue *q = &mq->clean;
queue              51 drivers/md/dm-core.h 	struct request_queue *queue;
queue            3142 drivers/md/dm-integrity.c 	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
queue              62 drivers/md/dm-rq.c 	return queue_is_mq(md->queue);
queue             566 drivers/md/dm-rq.c 	q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
queue            2128 drivers/md/dm-table.c 	struct request_queue *queue;
queue            2134 drivers/md/dm-table.c 	queue = dm_get_md_queue(md);
queue            2135 drivers/md/dm-table.c 	if (queue)
queue            2136 drivers/md/dm-table.c 		blk_mq_run_hw_queues(queue, true);
queue             428 drivers/md/dm.c 	return md->queue;
queue             645 drivers/md/dm.c 	if (queue_is_mq(md->queue))
queue             646 drivers/md/dm.c 		return blk_mq_queue_inflight(md->queue);
queue             658 drivers/md/dm.c 	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
queue             673 drivers/md/dm.c 	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
queue             954 drivers/md/dm.c 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
queue             983 drivers/md/dm.c 		    !bio->bi_disk->queue->limits.max_discard_sectors)
queue             986 drivers/md/dm.c 			 !bio->bi_disk->queue->limits.max_write_same_sectors)
queue             989 drivers/md/dm.c 			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
queue            1289 drivers/md/dm.c 		trace_block_bio_remap(clone->bi_disk->queue, clone,
queue            1649 drivers/md/dm.c 							  GFP_NOIO, &md->queue->bio_split);
queue            1665 drivers/md/dm.c 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
queue            1729 drivers/md/dm.c 		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
queue            1732 drivers/md/dm.c 		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
queue            1764 drivers/md/dm.c 			blk_queue_split(md->queue, &bio);
queue            1813 drivers/md/dm.c 			struct backing_dev_info *bdi = md->queue->backing_dev_info;
queue            1901 drivers/md/dm.c 	if (md->queue)
queue            1902 drivers/md/dm.c 		blk_cleanup_queue(md->queue);
queue            1963 drivers/md/dm.c 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
queue            1964 drivers/md/dm.c 	if (!md->queue)
queue            1966 drivers/md/dm.c 	md->queue->queuedata = md;
queue            1972 drivers/md/dm.c 	blk_queue_make_request(md->queue, dm_make_request);
queue            1986 drivers/md/dm.c 	md->disk->queue = md->queue;
queue            2129 drivers/md/dm.c 	struct request_queue *q = md->queue;
queue            2262 drivers/md/dm.c 	return &md->queue->limits;
queue            2268 drivers/md/dm.c 	md->queue->backing_dev_info->congested_data = md;
queue            2269 drivers/md/dm.c 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
queue            2305 drivers/md/dm.c 	dm_table_set_restrictions(t, md->queue, &limits);
queue            2382 drivers/md/dm.c 	blk_set_queue_dying(md->queue);
queue            2517 drivers/md/dm.c 			limits = md->queue->limits;
queue            2638 drivers/md/dm.c 		dm_stop_queue(md->queue);
queue            2661 drivers/md/dm.c 			dm_start_queue(md->queue);
queue            2738 drivers/md/dm.c 		dm_start_queue(md->queue);
queue            1030 drivers/md/md-bitmap.c 				if (bitmap->mddev->queue)
queue            1031 drivers/md/md-bitmap.c 					blk_add_trace_msg(bitmap->mddev->queue,
queue            1258 drivers/md/md-bitmap.c 	if (bitmap->mddev->queue)
queue            1259 drivers/md/md-bitmap.c 		blk_add_trace_msg(bitmap->mddev->queue,
queue             133 drivers/md/md-linear.c 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
queue             135 drivers/md/md-linear.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
queue             279 drivers/md/md-linear.c 		     !blk_queue_discard(bio->bi_disk->queue))) {
queue             284 drivers/md/md-linear.c 			trace_block_bio_remap(bio->bi_disk->queue,
queue            1615 drivers/md/md.c 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
queue            2029 drivers/md/md.c 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
queue            2679 drivers/md/md.c 	if (mddev->queue)
queue            2680 drivers/md/md.c 		blk_add_trace_msg(mddev->queue, "md md_update_sb");
queue            3975 drivers/md/md.c 	blk_set_stacking_limits(&mddev->queue->limits);
queue            5377 drivers/md/md.c 	if (mddev->queue)
queue            5378 drivers/md/md.c 		blk_cleanup_queue(mddev->queue);
queue            5481 drivers/md/md.c 	mddev->queue = blk_alloc_queue(GFP_KERNEL);
queue            5482 drivers/md/md.c 	if (!mddev->queue)
queue            5484 drivers/md/md.c 	mddev->queue->queuedata = mddev;
queue            5486 drivers/md/md.c 	blk_queue_make_request(mddev->queue, md_make_request);
queue            5487 drivers/md/md.c 	blk_set_stacking_limits(&mddev->queue->limits);
queue            5491 drivers/md/md.c 		blk_cleanup_queue(mddev->queue);
queue            5492 drivers/md/md.c 		mddev->queue = NULL;
queue            5505 drivers/md/md.c 	disk->queue = mddev->queue;
queue            5506 drivers/md/md.c 	blk_queue_write_cache(mddev->queue, true, true);
queue            5790 drivers/md/md.c 	if (mddev->queue) {
queue            5803 drivers/md/md.c 			blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
queue            5805 drivers/md/md.c 			blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
queue            5806 drivers/md/md.c 		mddev->queue->backing_dev_info->congested_data = mddev;
queue            5807 drivers/md/md.c 		mddev->queue->backing_dev_info->congested_fn = md_congested;
queue            6048 drivers/md/md.c 	if (mddev->queue)
queue            6049 drivers/md/md.c 		blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
queue            6189 drivers/md/md.c 		mddev->queue->backing_dev_info->congested_fn = NULL;
queue            7032 drivers/md/md.c 		else if (mddev->queue) {
queue            8793 drivers/md/md.c 			mddev->queue) {
queue             442 drivers/md/md.h 	struct request_queue		*queue;	/* for plugging ... */
queue             783 drivers/md/md.h 	    !bio->bi_disk->queue->limits.max_write_same_sectors)
queue             784 drivers/md/md.h 		mddev->queue->limits.max_write_same_sectors = 0;
queue             790 drivers/md/md.h 	    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
queue             791 drivers/md/md.h 		mddev->queue->limits.max_write_zeroes_sectors = 0;
queue             107 drivers/md/raid0.c 				      rdev1->bdev->bd_disk->queue));
queue             396 drivers/md/raid0.c 	if (mddev->queue) {
queue             400 drivers/md/raid0.c 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
queue             401 drivers/md/raid0.c 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
queue             402 drivers/md/raid0.c 		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
queue             403 drivers/md/raid0.c 		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
queue             405 drivers/md/raid0.c 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
queue             406 drivers/md/raid0.c 		blk_queue_io_opt(mddev->queue,
queue             416 drivers/md/raid0.c 			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
queue             418 drivers/md/raid0.c 			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
queue             428 drivers/md/raid0.c 	if (mddev->queue) {
queue             440 drivers/md/raid0.c 		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
queue             441 drivers/md/raid0.c 			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
queue             632 drivers/md/raid0.c 		trace_block_bio_remap(bio->bi_disk->queue, bio,
queue              49 drivers/md/raid1.c 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
queue             816 drivers/md/raid1.c 				    !blk_queue_discard(bio->bi_disk->queue)))
queue            1320 drivers/md/raid1.c 	        trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
queue            1537 drivers/md/raid1.c 			trace_block_bio_remap(mbio->bi_disk->queue,
queue            1800 drivers/md/raid1.c 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue            1801 drivers/md/raid1.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
queue            3111 drivers/md/raid1.c 	if (mddev->queue) {
queue            3112 drivers/md/raid1.c 		blk_queue_max_write_same_sectors(mddev->queue, 0);
queue            3113 drivers/md/raid1.c 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
queue            3159 drivers/md/raid1.c 	if (mddev->queue) {
queue            3162 drivers/md/raid1.c 						mddev->queue);
queue            3165 drivers/md/raid1.c 						  mddev->queue);
queue              78 drivers/md/raid10.c 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
queue             916 drivers/md/raid10.c 					    !blk_queue_discard(bio->bi_disk->queue)))
queue            1101 drivers/md/raid10.c 				    !blk_queue_discard(bio->bi_disk->queue)))
queue            1221 drivers/md/raid10.c 	        trace_block_bio_remap(read_bio->bi_disk->queue,
queue            1271 drivers/md/raid10.c 		trace_block_bio_remap(mbio->bi_disk->queue,
queue            1815 drivers/md/raid10.c 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue            1816 drivers/md/raid10.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
queue            3763 drivers/md/raid10.c 	if (mddev->queue) {
queue            3764 drivers/md/raid10.c 		blk_queue_max_discard_sectors(mddev->queue,
queue            3766 drivers/md/raid10.c 		blk_queue_max_write_same_sectors(mddev->queue, 0);
queue            3767 drivers/md/raid10.c 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
queue            3768 drivers/md/raid10.c 		blk_queue_io_min(mddev->queue, chunk_size);
queue            3770 drivers/md/raid10.c 			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
queue            3772 drivers/md/raid10.c 			blk_queue_io_opt(mddev->queue, chunk_size *
queue            3815 drivers/md/raid10.c 	if (mddev->queue) {
queue            3818 drivers/md/raid10.c 						mddev->queue);
queue            3821 drivers/md/raid10.c 						  mddev->queue);
queue            3888 drivers/md/raid10.c 	if (mddev->queue) {
queue            3897 drivers/md/raid10.c 		if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
queue            3898 drivers/md/raid10.c 			mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
queue            4741 drivers/md/raid10.c 	if (conf->mddev->queue) {
queue            4745 drivers/md/raid10.c 		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
queue            4746 drivers/md/raid10.c 			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
queue            1413 drivers/md/raid5-ppl.c 		ppl_conf->block_size = queue_logical_block_size(mddev->queue);
queue            1148 drivers/md/raid5.c 				trace_block_bio_remap(bi->bi_disk->queue,
queue            1198 drivers/md/raid5.c 				trace_block_bio_remap(rbi->bi_disk->queue,
queue            3922 drivers/md/raid5.c 		if (conf->mddev->queue)
queue            3923 drivers/md/raid5.c 			blk_add_trace_msg(conf->mddev->queue,
queue            4006 drivers/md/raid5.c 		if (rcw && conf->mddev->queue)
queue            4007 drivers/md/raid5.c 			blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
queue            5282 drivers/md/raid5.c 			trace_block_bio_remap(align_bi->bi_disk->queue,
queue            5457 drivers/md/raid5.c 	if (mddev->queue)
queue            5458 drivers/md/raid5.c 		trace_block_unplug(mddev->queue, cnt, !from_schedule);
queue            6557 drivers/md/raid5.c 			mddev->queue->backing_dev_info->capabilities |=
queue            6560 drivers/md/raid5.c 			mddev->queue->backing_dev_info->capabilities &=
queue            7424 drivers/md/raid5.c 	if (mddev->queue) {
queue            7433 drivers/md/raid5.c 		if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
queue            7434 drivers/md/raid5.c 			mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
queue            7437 drivers/md/raid5.c 		blk_queue_io_min(mddev->queue, chunk_size);
queue            7438 drivers/md/raid5.c 		blk_queue_io_opt(mddev->queue, chunk_size *
queue            7440 drivers/md/raid5.c 		mddev->queue->limits.raid_partial_stripes_expensive = 1;
queue            7450 drivers/md/raid5.c 		mddev->queue->limits.discard_alignment = stripe;
queue            7451 drivers/md/raid5.c 		mddev->queue->limits.discard_granularity = stripe;
queue            7453 drivers/md/raid5.c 		blk_queue_max_write_same_sectors(mddev->queue, 0);
queue            7454 drivers/md/raid5.c 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
queue            7479 drivers/md/raid5.c 		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
queue            7480 drivers/md/raid5.c 		    mddev->queue->limits.discard_granularity >= stripe)
queue            7482 drivers/md/raid5.c 						mddev->queue);
queue            7485 drivers/md/raid5.c 						mddev->queue);
queue            7487 drivers/md/raid5.c 		blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
queue            8026 drivers/md/raid5.c 		if (conf->mddev->queue) {
queue            8030 drivers/md/raid5.c 			if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
queue            8031 drivers/md/raid5.c 				conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
queue              84 drivers/media/common/saa7146/saa7146_fops.c 		list_add_tail(&buf->vb.queue,&q->queue);
queue             125 drivers/media/common/saa7146/saa7146_fops.c 	if (!list_empty(&q->queue)) {
queue             127 drivers/media/common/saa7146/saa7146_fops.c 		buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue);
queue             128 drivers/media/common/saa7146/saa7146_fops.c 		list_del(&buf->vb.queue);
queue             129 drivers/media/common/saa7146/saa7146_fops.c 		if (!list_empty(&q->queue))
queue             130 drivers/media/common/saa7146/saa7146_fops.c 			next = list_entry(q->queue.next,struct saa7146_buf, vb.queue);
queue             133 drivers/media/common/saa7146/saa7146_fops.c 			buf, q->queue.prev, q->queue.next);
queue             368 drivers/media/common/saa7146/saa7146_vbi.c 	INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
queue            1179 drivers/media/common/saa7146/saa7146_video.c 	INIT_LIST_HEAD(&vv->video_dmaq.queue);
queue            1392 drivers/media/common/videobuf2/videobuf2-core.c 	.queue = vb2_req_queue,
queue             902 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vdev->queue->owner && vdev->queue->owner != file->private_data;
queue             911 drivers/media/common/videobuf2/videobuf2-v4l2.c 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
queue             913 drivers/media/common/videobuf2/videobuf2-v4l2.c 	fill_buf_caps(vdev->queue, &p->capabilities);
queue             918 drivers/media/common/videobuf2/videobuf2-v4l2.c 	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
queue             922 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = p->count ? file->private_data : NULL;
queue             931 drivers/media/common/videobuf2/videobuf2-v4l2.c 	int res = vb2_verify_memory_type(vdev->queue, p->memory,
queue             934 drivers/media/common/videobuf2/videobuf2-v4l2.c 	p->index = vdev->queue->num_buffers;
queue             935 drivers/media/common/videobuf2/videobuf2-v4l2.c 	fill_buf_caps(vdev->queue, &p->capabilities);
queue             947 drivers/media/common/videobuf2/videobuf2-v4l2.c 	res = vb2_create_bufs(vdev->queue, p);
queue             949 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = file->private_data;
queue             961 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
queue             970 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_querybuf(vdev->queue, p);
queue             980 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
queue             990 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
queue            1000 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_streamon(vdev->queue, i);
queue            1010 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_streamoff(vdev->queue, i);
queue            1020 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_expbuf(vdev->queue, p);
queue            1030 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_mmap(vdev->queue, vma);
queue            1040 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (file->private_data == vdev->queue->owner) {
queue            1041 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vb2_queue_release(vdev->queue);
queue            1042 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = NULL;
queue            1053 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
queue            1063 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
queue            1066 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (!(vdev->queue->io_modes & VB2_WRITE))
queue            1072 drivers/media/common/videobuf2/videobuf2-v4l2.c 	err = vb2_write(vdev->queue, buf, count, ppos,
queue            1074 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (vdev->queue->fileio)
queue            1075 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = file->private_data;
queue            1087 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
queue            1090 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (!(vdev->queue->io_modes & VB2_READ))
queue            1096 drivers/media/common/videobuf2/videobuf2-v4l2.c 	err = vb2_read(vdev->queue, buf, count, ppos,
queue            1098 drivers/media/common/videobuf2/videobuf2-v4l2.c 	if (vdev->queue->fileio)
queue            1099 drivers/media/common/videobuf2/videobuf2-v4l2.c 		vdev->queue->owner = file->private_data;
queue            1110 drivers/media/common/videobuf2/videobuf2-v4l2.c 	struct vb2_queue *q = vdev->queue;
queue            1126 drivers/media/common/videobuf2/videobuf2-v4l2.c 	res = vb2_poll(vdev->queue, file, wait);
queue            1143 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
queue            1208 drivers/media/common/videobuf2/videobuf2-v4l2.c 		if (obj->ops->queue)
queue            1209 drivers/media/common/videobuf2/videobuf2-v4l2.c 			obj->ops->queue(obj);
queue              86 drivers/media/dvb-core/dmxdev.c 		ret = wait_event_interruptible(src->queue,
queue             371 drivers/media/dvb-core/dmxdev.c 	wake_up(&dmxdevfilter->buffer.queue);
queue             396 drivers/media/dvb-core/dmxdev.c 		wake_up(&dmxdevfilter->buffer.queue);
queue             427 drivers/media/dvb-core/dmxdev.c 	wake_up(&dmxdevfilter->buffer.queue);
queue             471 drivers/media/dvb-core/dmxdev.c 			wake_up(&buffer->queue);
queue             482 drivers/media/dvb-core/dmxdev.c 	wake_up(&buffer->queue);
queue             858 drivers/media/dvb-core/dmxdev.c 	wake_up(&dmxdevfilter->buffer.queue);
queue            1198 drivers/media/dvb-core/dmxdev.c 	poll_wait(file, &dmxdevfilter->buffer.queue, wait);
queue            1349 drivers/media/dvb-core/dmxdev.c 	poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
queue              45 drivers/media/dvb-core/dvb_ringbuffer.c 	init_waitqueue_head(&rbuf->queue);
queue             130 drivers/media/dvb-core/dvb_ringbuffer.c 	wake_up(&rbuf->queue);
queue            1424 drivers/media/dvb-frontends/rtl2832_sdr.c 	dev->vdev.queue = &dev->vb_queue;
queue            1425 drivers/media/dvb-frontends/rtl2832_sdr.c 	dev->vdev.queue->lock = &dev->vb_queue_lock;
queue             759 drivers/media/i2c/video-i2c.c 	struct vb2_queue *queue;
queue             789 drivers/media/i2c/video-i2c.c 	queue = &data->vb_vidq;
queue             790 drivers/media/i2c/video-i2c.c 	queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
queue             791 drivers/media/i2c/video-i2c.c 	queue->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR | VB2_READ;
queue             792 drivers/media/i2c/video-i2c.c 	queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue             793 drivers/media/i2c/video-i2c.c 	queue->drv_priv = data;
queue             794 drivers/media/i2c/video-i2c.c 	queue->buf_struct_size = sizeof(struct video_i2c_buffer);
queue             795 drivers/media/i2c/video-i2c.c 	queue->min_buffers_needed = 1;
queue             796 drivers/media/i2c/video-i2c.c 	queue->ops = &video_i2c_video_qops;
queue             797 drivers/media/i2c/video-i2c.c 	queue->mem_ops = &vb2_vmalloc_memops;
queue             799 drivers/media/i2c/video-i2c.c 	ret = vb2_queue_init(queue);
queue             803 drivers/media/i2c/video-i2c.c 	data->vdev.queue = queue;
queue             804 drivers/media/i2c/video-i2c.c 	data->vdev.queue->lock = &data->queue_lock;
queue            1660 drivers/media/pci/bt8xx/bttv-driver.c 	list_add_tail(&buf->vb.queue,&btv->capture);
queue            3473 drivers/media/pci/bt8xx/bttv-driver.c 		item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue);
queue            3481 drivers/media/pci/bt8xx/bttv-driver.c 		    (item->vb.queue.next != &btv->capture)) {
queue            3482 drivers/media/pci/bt8xx/bttv-driver.c 			item = list_entry(item->vb.queue.next, struct bttv_buffer, vb.queue);
queue            3632 drivers/media/pci/bt8xx/bttv-driver.c 		item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue);
queue            3633 drivers/media/pci/bt8xx/bttv-driver.c 		list_del(&item->vb.queue);
queue            3638 drivers/media/pci/bt8xx/bttv-driver.c 		item = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue);
queue            3639 drivers/media/pci/bt8xx/bttv-driver.c 		list_del(&item->vb.queue);
queue            3726 drivers/media/pci/bt8xx/bttv-driver.c 		new = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue);
queue             602 drivers/media/pci/bt8xx/bttv-risc.c 		list_del(&vbi->vb.queue);
queue             644 drivers/media/pci/bt8xx/bttv-risc.c 			if (set->top->vb.queue.next)
queue             645 drivers/media/pci/bt8xx/bttv-risc.c 				list_del(&set->top->vb.queue);
queue             649 drivers/media/pci/bt8xx/bttv-risc.c 			if (set->top->vb.queue.next)
queue             650 drivers/media/pci/bt8xx/bttv-risc.c 				list_del(&set->top->vb.queue);
queue             651 drivers/media/pci/bt8xx/bttv-risc.c 			if (set->bottom->vb.queue.next)
queue             652 drivers/media/pci/bt8xx/bttv-risc.c 				list_del(&set->bottom->vb.queue);
queue             666 drivers/media/pci/bt8xx/bttv-risc.c 		if (set->top->vb.queue.next)
queue             667 drivers/media/pci/bt8xx/bttv-risc.c 			list_del(&set->top->vb.queue);
queue             677 drivers/media/pci/bt8xx/bttv-risc.c 		if (set->bottom->vb.queue.next)
queue             678 drivers/media/pci/bt8xx/bttv-risc.c 			list_del(&set->bottom->vb.queue);
queue             207 drivers/media/pci/bt8xx/bttv-vbi.c 	list_add_tail(&buf->vb.queue,&btv->vcapture);
queue            1265 drivers/media/pci/cobalt/cobalt-v4l2.c 	vdev->queue = q;
queue             690 drivers/media/pci/cx18/cx18-fileops.c 			struct cx18_videobuf_buffer, vb.queue);
queue             691 drivers/media/pci/cx18/cx18-fileops.c 		list_del(&buf->vb.queue);
queue             166 drivers/media/pci/cx18/cx18-mailbox.c 		vb.queue);
queue             192 drivers/media/pci/cx18/cx18-mailbox.c 		list_del(&vb_buf->vb.queue);
queue             231 drivers/media/pci/cx18/cx18-streams.c 	list_add_tail(&buf->vb.queue, &s->vb_capture);
queue            1177 drivers/media/pci/cx23885/cx23885-417.c 			struct cx23885_buffer, queue);
queue            1185 drivers/media/pci/cx23885/cx23885-417.c 			struct cx23885_buffer, queue);
queue            1187 drivers/media/pci/cx23885/cx23885-417.c 		list_del(&buf->queue);
queue            1542 drivers/media/pci/cx23885/cx23885-417.c 	dev->v4l_device->queue = q;
queue             435 drivers/media/pci/cx23885/cx23885-core.c 				 struct cx23885_buffer, queue);
queue             446 drivers/media/pci/cx23885/cx23885-core.c 		list_del(&buf->queue);
queue            1626 drivers/media/pci/cx23885/cx23885-core.c 		list_add_tail(&buf->queue, &cx88q->active);
queue            1632 drivers/media/pci/cx23885/cx23885-core.c 				  queue);
queue            1633 drivers/media/pci/cx23885/cx23885-core.c 		list_add_tail(&buf->queue, &cx88q->active);
queue            1652 drivers/media/pci/cx23885/cx23885-core.c 				 queue);
queue            1653 drivers/media/pci/cx23885/cx23885-core.c 		list_del(&buf->queue);
queue             155 drivers/media/pci/cx23885/cx23885-dvb.c 			struct cx23885_buffer, queue);
queue             199 drivers/media/pci/cx23885/cx23885-vbi.c 		list_add_tail(&buf->queue, &q->active);
queue             207 drivers/media/pci/cx23885/cx23885-vbi.c 			queue);
queue             209 drivers/media/pci/cx23885/cx23885-vbi.c 		list_add_tail(&buf->queue, &q->active);
queue             222 drivers/media/pci/cx23885/cx23885-vbi.c 			struct cx23885_buffer, queue);
queue             238 drivers/media/pci/cx23885/cx23885-vbi.c 			struct cx23885_buffer, queue);
queue             240 drivers/media/pci/cx23885/cx23885-vbi.c 		list_del(&buf->queue);
queue              96 drivers/media/pci/cx23885/cx23885-video.c 			struct cx23885_buffer, queue);
queue             102 drivers/media/pci/cx23885/cx23885-video.c 	list_del(&buf->queue);
queue             469 drivers/media/pci/cx23885/cx23885-video.c 		list_add_tail(&buf->queue, &q->active);
queue             475 drivers/media/pci/cx23885/cx23885-video.c 			queue);
queue             476 drivers/media/pci/cx23885/cx23885-video.c 		list_add_tail(&buf->queue, &q->active);
queue             489 drivers/media/pci/cx23885/cx23885-video.c 			struct cx23885_buffer, queue);
queue             505 drivers/media/pci/cx23885/cx23885-video.c 			struct cx23885_buffer, queue);
queue             507 drivers/media/pci/cx23885/cx23885-video.c 		list_del(&buf->queue);
queue            1302 drivers/media/pci/cx23885/cx23885-video.c 	dev->video_dev->queue = &dev->vb2_vidq;
queue            1320 drivers/media/pci/cx23885/cx23885-video.c 	dev->vbi_dev->queue = &dev->vb2_vbiq;
queue             174 drivers/media/pci/cx23885/cx23885.h 	struct list_head queue;
queue             114 drivers/media/pci/cx25821/cx25821-video.c 					 struct cx25821_buffer, queue);
queue             118 drivers/media/pci/cx25821/cx25821-video.c 			list_del(&buf->queue);
queue             251 drivers/media/pci/cx25821/cx25821-video.c 		list_add_tail(&buf->queue, &q->active);
queue             255 drivers/media/pci/cx25821/cx25821-video.c 				queue);
queue             256 drivers/media/pci/cx25821/cx25821-video.c 		list_add_tail(&buf->queue, &q->active);
queue             267 drivers/media/pci/cx25821/cx25821-video.c 			struct cx25821_buffer, queue);
queue             285 drivers/media/pci/cx25821/cx25821-video.c 			struct cx25821_buffer, queue);
queue             287 drivers/media/pci/cx25821/cx25821-video.c 		list_del(&buf->queue);
queue             756 drivers/media/pci/cx25821/cx25821-video.c 		vdev->queue = q;
queue             115 drivers/media/pci/cx25821/cx25821.h 	struct list_head queue;
queue            1136 drivers/media/pci/cx88/cx88-blackbird.c 	dev->mpeg_dev.queue = &dev->vb2_mpegq;
queue            1449 drivers/media/pci/cx88/cx88-video.c 	dev->video_dev.queue = &dev->vb2_vidq;
queue            1466 drivers/media/pci/cx88/cx88-video.c 	dev->vbi_dev.queue = &dev->vb2_vbiq;
queue             529 drivers/media/pci/dt3155/dt3155.c 	pd->vdev.queue = &pd->vidq;
queue            1405 drivers/media/pci/intel/ipu3/ipu3-cio2.c 	if (cio2->queue[s_asd->csi2.port].sensor)
queue            1408 drivers/media/pci/intel/ipu3/ipu3-cio2.c 	q = &cio2->queue[s_asd->csi2.port];
queue            1427 drivers/media/pci/intel/ipu3/ipu3-cio2.c 	cio2->queue[s_asd->csi2.port].sensor = NULL;
queue            1443 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		q = &cio2->queue[s_asd->csi2.port];
queue            1612 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
queue            1641 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		 "%s %td", CIO2_NAME, q - cio2->queue);
queue            1647 drivers/media/pci/intel/ipu3/ipu3-cio2.c 	vdev->queue = &q->vbq;
queue            1700 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		r = cio2_queue_init(cio2, &cio2->queue[i]);
queue            1709 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		cio2_queue_exit(cio2, &cio2->queue[i]);
queue            1719 drivers/media/pci/intel/ipu3/ipu3-cio2.c 		cio2_queue_exit(cio2, &cio2->queue[i]);
queue             358 drivers/media/pci/intel/ipu3/ipu3-cio2.h 	struct cio2_queue queue[CIO2_QUEUES];
queue              43 drivers/media/pci/ngene/ngene-dvb.c 	if (wait_event_interruptible(dev->tsout_rbuf.queue,
queue              64 drivers/media/pci/ngene/ngene-dvb.c 			    dev->tsin_rbuf.queue,
queue              86 drivers/media/pci/ngene/ngene-dvb.c 	poll_wait(file, &rbuf->queue, wait);
queue              87 drivers/media/pci/ngene/ngene-dvb.c 	poll_wait(file, &wbuf->queue, wait);
queue             155 drivers/media/pci/ngene/ngene-dvb.c 			wake_up(&dev->tsin_rbuf.queue);
queue             269 drivers/media/pci/ngene/ngene-dvb.c 	wake_up_interruptible(&dev->tsout_rbuf.queue);
queue             276 drivers/media/pci/saa7134/saa7134-core.c 		} else if (list_empty(&q->queue)) {
queue             277 drivers/media/pci/saa7134/saa7134-core.c 			list_add_tail(&buf->entry, &q->queue);
queue             279 drivers/media/pci/saa7134/saa7134-core.c 			next = list_entry(q->queue.next, struct saa7134_buf,
queue             285 drivers/media/pci/saa7134/saa7134-core.c 		list_add_tail(&buf->entry, &q->queue);
queue             312 drivers/media/pci/saa7134/saa7134-core.c 	if (!list_empty(&q->queue)) {
queue             314 drivers/media/pci/saa7134/saa7134-core.c 		buf = list_entry(q->queue.next, struct saa7134_buf, entry);
queue             316 drivers/media/pci/saa7134/saa7134-core.c 			buf, q->queue.prev, q->queue.next);
queue             318 drivers/media/pci/saa7134/saa7134-core.c 		if (!list_empty(&q->queue))
queue             319 drivers/media/pci/saa7134/saa7134-core.c 			next = list_entry(q->queue.next, struct saa7134_buf, entry);
queue             323 drivers/media/pci/saa7134/saa7134-core.c 			q->queue.prev, q->queue.next);
queue             362 drivers/media/pci/saa7134/saa7134-core.c 	if (!list_empty(&q->queue)) {
queue             363 drivers/media/pci/saa7134/saa7134-core.c 		list_for_each_safe(pos, n, &q->queue) {
queue            1208 drivers/media/pci/saa7134/saa7134-core.c 	dev->video_dev->queue = &dev->video_vbq;
queue            1230 drivers/media/pci/saa7134/saa7134-core.c 	dev->vbi_dev->queue = &dev->vbi_vbq;
queue            1392 drivers/media/pci/saa7134/saa7134-core.c 	if (!list_empty(&q->queue))
queue            1393 drivers/media/pci/saa7134/saa7134-core.c 		next = list_entry(q->queue.next, struct saa7134_buf,
queue             287 drivers/media/pci/saa7134/saa7134-empress.c 	dev->empress_dev->queue = q;
queue             137 drivers/media/pci/saa7134/saa7134-ts.c 		list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
queue             216 drivers/media/pci/saa7134/saa7134-ts.c 	INIT_LIST_HEAD(&dev->ts_q.queue);
queue             174 drivers/media/pci/saa7134/saa7134-vbi.c 	INIT_LIST_HEAD(&dev->vbi_q.queue);
queue             989 drivers/media/pci/saa7134/saa7134-video.c 		list_for_each_entry_safe(buf, tmp, &dmaq->queue, entry) {
queue            2090 drivers/media/pci/saa7134/saa7134-video.c 	INIT_LIST_HEAD(&dev->video_q.queue);
queue             479 drivers/media/pci/saa7134/saa7134.h 	struct list_head           queue;
queue             739 drivers/media/pci/saa7134/saa7134.h 	return vdev->queue == &dev->empress_vbq;
queue            1304 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c 	solo_enc->vfd->queue = &solo_enc->vidq;
queue             657 drivers/media/pci/solo6x10/solo6x10-v4l2.c 	solo_dev->vfd->queue = &solo_dev->vidq;
queue            1068 drivers/media/pci/sta2x11/sta2x11_vip.c 	vip->video_dev.queue = &vip->vb_vidq;
queue             567 drivers/media/pci/ttpci/av7110.c 		wake_up(&cibuf->queue);
queue             227 drivers/media/pci/ttpci/av7110_av.c 		wake_up(&buf->queue);
queue             233 drivers/media/pci/ttpci/av7110_av.c 			wake_up(&buf->queue);
queue             253 drivers/media/pci/ttpci/av7110_av.c 		wake_up(&buf->queue);
queue             261 drivers/media/pci/ttpci/av7110_av.c 	wake_up(&buf->queue);
queue             392 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(rbuf->queue,
queue             452 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(rb->queue, FREE_COND_TS))
queue             485 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(av7110->avout.queue,
queue             518 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(av7110->avout.queue,
queue             547 drivers/media/pci/ttpci/av7110_av.c 			if (wait_event_interruptible(av7110->aout.queue,
queue             934 drivers/media/pci/ttpci/av7110_av.c 		poll_wait(file, &av7110->avout.queue, wait);
queue             985 drivers/media/pci/ttpci/av7110_av.c 	poll_wait(file, &av7110->aout.queue, wait);
queue              66 drivers/media/pci/ttpci/av7110_ca.c 	wake_up_interruptible(&cibuf->queue);
queue             158 drivers/media/pci/ttpci/av7110_ca.c 		if (wait_event_interruptible(cibuf->queue,
queue             183 drivers/media/pci/ttpci/av7110_ca.c 	if (wait_event_interruptible(cibuf->queue,
queue             222 drivers/media/pci/ttpci/av7110_ca.c 	poll_wait(file, &rbuf->queue, wait);
queue             223 drivers/media/pci/ttpci/av7110_ca.c 	poll_wait(file, &wbuf->queue, wait);
queue            1122 drivers/media/pci/tw5864/tw5864-video.c 	input->vdev.queue = &input->vidq;
queue             963 drivers/media/pci/tw68/tw68-video.c 	dev->vdev.queue = &dev->vidq;
queue            1275 drivers/media/pci/tw686x/tw686x-video.c 		vdev->queue = &vc->vidq;
queue            2380 drivers/media/platform/am437x/am437x-vpfe.c 	vdev->queue = q;
queue             219 drivers/media/platform/aspeed-video.c 	struct vb2_queue queue;
queue            1143 drivers/media/platform/aspeed-video.c 	if (vb2_is_busy(&video->queue))
queue            1507 drivers/media/platform/aspeed-video.c 	struct vb2_queue *vbq = &video->queue;
queue            1563 drivers/media/platform/aspeed-video.c 	vdev->queue = vbq;
queue            1706 drivers/media/platform/aspeed-video.c 	vb2_queue_release(&video->queue);
queue            2084 drivers/media/platform/atmel/atmel-isc-base.c 	vdev->queue		= q;
queue             131 drivers/media/platform/atmel/atmel-isi.c 	struct vb2_queue		queue;
queue             626 drivers/media/platform/atmel/atmel-isi.c 	if (vb2_is_streaming(&isi->queue))
queue            1181 drivers/media/platform/atmel/atmel-isi.c 	q = &isi->queue;
queue            1197 drivers/media/platform/atmel/atmel-isi.c 	isi->vdev->queue = &isi->queue;
queue            1340 drivers/media/platform/davinci/vpbe_display.c 	vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
queue             462 drivers/media/platform/davinci/vpfe_capture.c 					struct videobuf_buffer, queue);
queue             463 drivers/media/platform/davinci/vpfe_capture.c 	list_del(&vpfe_dev->next_frm->queue);
queue            1219 drivers/media/platform/davinci/vpfe_capture.c 	list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
queue            1435 drivers/media/platform/davinci/vpfe_capture.c 					struct videobuf_buffer, queue);
queue            1438 drivers/media/platform/davinci/vpfe_capture.c 	list_del(&vpfe_dev->cur_frm->queue);
queue            1464 drivers/media/platform/davinci/vpif_capture.c 		vdev->queue = q;
queue            1213 drivers/media/platform/davinci/vpif_display.c 		vdev->queue = q;
queue            1759 drivers/media/platform/exynos4-is/fimc-capture.c 	vfd->queue	= q;
queue             606 drivers/media/platform/exynos4-is/fimc-isp-video.c 	vdev->queue = q;
queue            1270 drivers/media/platform/exynos4-is/fimc-lite.c 	vfd->queue = q;
queue             296 drivers/media/platform/fsl-viu.c 		buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
queue             303 drivers/media/platform/fsl-viu.c 		list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) {
queue             304 drivers/media/platform/fsl-viu.c 			list_del(&buf->vb.queue);
queue             316 drivers/media/platform/fsl-viu.c 		buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
queue             318 drivers/media/platform/fsl-viu.c 			list_move_tail(&buf->vb.queue, &vidq->active);
queue             332 drivers/media/platform/fsl-viu.c 			list_move_tail(&buf->vb.queue, &vidq->active);
queue             350 drivers/media/platform/fsl-viu.c 		buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
queue             351 drivers/media/platform/fsl-viu.c 		list_del(&buf->vb.queue);
queue             501 drivers/media/platform/fsl-viu.c 		dprintk(1, "adding vb queue=%p\n", &buf->vb.queue);
queue             507 drivers/media/platform/fsl-viu.c 		list_add_tail(&buf->vb.queue, &vidq->queued);
queue             512 drivers/media/platform/fsl-viu.c 		dprintk(1, "adding vb active=%p\n", &buf->vb.queue);
queue             513 drivers/media/platform/fsl-viu.c 		list_add_tail(&buf->vb.queue, &vidq->active);
queue             521 drivers/media/platform/fsl-viu.c 		dprintk(1, "adding vb queue2=%p\n", &buf->vb.queue);
queue             522 drivers/media/platform/fsl-viu.c 		prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue);
queue             526 drivers/media/platform/fsl-viu.c 			list_add_tail(&buf->vb.queue, &vidq->active);
queue             531 drivers/media/platform/fsl-viu.c 			list_add_tail(&buf->vb.queue, &vidq->queued);
queue             964 drivers/media/platform/fsl-viu.c 					vb.queue);
queue             969 drivers/media/platform/fsl-viu.c 					vb.queue);
queue             970 drivers/media/platform/fsl-viu.c 		list_del(&buf->vb.queue);
queue             973 drivers/media/platform/fsl-viu.c 		list_add_tail(&buf->vb.queue, &vidq->active);
queue            1075 drivers/media/platform/fsl-viu.c 				 struct viu_buf, vb.queue);
queue            1082 drivers/media/platform/fsl-viu.c 			list_del(&buf->vb.queue);
queue             196 drivers/media/platform/marvell-ccic/mcam-core.c 	struct list_head queue;
queue             465 drivers/media/platform/marvell-ccic/mcam-core.c 				queue);
queue             466 drivers/media/platform/marvell-ccic/mcam-core.c 		list_del_init(&buf->queue);
queue             552 drivers/media/platform/marvell-ccic/mcam-core.c 					queue);
queue             553 drivers/media/platform/marvell-ccic/mcam-core.c 		list_del_init(&buf->queue);
queue             607 drivers/media/platform/marvell-ccic/mcam-core.c 	buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
queue             608 drivers/media/platform/marvell-ccic/mcam-core.c 	list_del_init(&buf->queue);
queue            1122 drivers/media/platform/marvell-ccic/mcam-core.c 	list_add(&mvb->queue, &cam->buffers);
queue            1139 drivers/media/platform/marvell-ccic/mcam-core.c 	list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
queue            1141 drivers/media/platform/marvell-ccic/mcam-core.c 		list_del(&buf->queue);
queue            1803 drivers/media/platform/marvell-ccic/mcam-core.c 	cam->vdev.queue = &cam->vb_queue;
queue            2012 drivers/media/platform/marvell-ccic/mcam-core.c 			list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
queue              37 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c 	wake_up_interruptible(&ctx->queue);
queue             121 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c 	init_waitqueue_head(&ctx->queue);
queue             301 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c 	init_waitqueue_head(&dev->queue);
queue             284 drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h 	wait_queue_head_t queue;
queue             360 drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h 	wait_queue_head_t queue;
queue              34 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c 	wake_up_interruptible(&ctx->queue);
queue             141 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c 	init_waitqueue_head(&ctx->queue);
queue             318 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c 	init_waitqueue_head(&dev->queue);
queue              21 drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c 	waitqueue = (wait_queue_head_t *)&ctx->queue;
queue             543 drivers/media/platform/omap/omap_vout.c 			struct omap_vout_buffer, queue);
queue             544 drivers/media/platform/omap/omap_vout.c 	list_del(&vout->next_frm->queue);
queue             990 drivers/media/platform/omap/omap_vout.c 	list_add_tail(&voutbuf->queue, &vout->dma_queue);
queue            1003 drivers/media/platform/omap/omap_vout.c 			struct omap_vout_buffer, queue);
queue            1005 drivers/media/platform/omap/omap_vout.c 	list_del(&vout->cur_frm->queue);
queue            1074 drivers/media/platform/omap/omap_vout.c 	list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) {
queue            1075 drivers/media/platform/omap/omap_vout.c 		list_del(&buf->queue);
queue            1109 drivers/media/platform/omap/omap_vout.c 	list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) {
queue            1110 drivers/media/platform/omap/omap_vout.c 		list_del(&buf->queue);
queue            1405 drivers/media/platform/omap/omap_vout.c 	vfd->queue = vq;
queue             121 drivers/media/platform/omap/omap_voutdef.h 	struct list_head		queue;
queue             368 drivers/media/platform/omap3isp/ispccdc.c 				struct list_head *queue)
queue             374 drivers/media/platform/omap3isp/ispccdc.c 	list_for_each_entry_safe(req, n, queue, list) {
queue            1815 drivers/media/platform/omap3isp/ispccdc.c 	.queue = ccdc_video_queue,
queue             939 drivers/media/platform/omap3isp/ispccp2.c 	.queue = ccp2_video_queue,
queue             822 drivers/media/platform/omap3isp/ispcsi2.c 	.queue = csi2_queue,
queue            1570 drivers/media/platform/omap3isp/isppreview.c 	.queue = preview_video_queue,
queue            1113 drivers/media/platform/omap3isp/ispresizer.c 	.queue = resizer_video_queue,
queue             329 drivers/media/platform/omap3isp/ispvideo.c static int isp_video_queue_setup(struct vb2_queue *queue,
queue             333 drivers/media/platform/omap3isp/ispvideo.c 	struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
queue             420 drivers/media/platform/omap3isp/ispvideo.c 		video->ops->queue(video, buffer);
queue             458 drivers/media/platform/omap3isp/ispvideo.c static int isp_video_start_streaming(struct vb2_queue *queue,
queue             461 drivers/media/platform/omap3isp/ispvideo.c 	struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
queue             630 drivers/media/platform/omap3isp/ispvideo.c 		vb2_discard_done(video->queue);
queue             637 drivers/media/platform/omap3isp/ispvideo.c 		video->ops->queue(video, buf);
queue             908 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_reqbufs(&vfh->queue, rb);
queue             922 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_querybuf(&vfh->queue, b);
queue             936 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
queue             950 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
queue            1149 drivers/media/platform/omap3isp/ispvideo.c 	video->queue = &vfh->queue;
queue            1155 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_streamon(&vfh->queue, type);
queue            1176 drivers/media/platform/omap3isp/ispvideo.c 	video->queue = NULL;
queue            1203 drivers/media/platform/omap3isp/ispvideo.c 	streaming = vb2_is_streaming(&vfh->queue);
queue            1226 drivers/media/platform/omap3isp/ispvideo.c 	vb2_streamoff(&vfh->queue, type);
queue            1228 drivers/media/platform/omap3isp/ispvideo.c 	video->queue = NULL;
queue            1298 drivers/media/platform/omap3isp/ispvideo.c 	struct vb2_queue *queue;
queue            1320 drivers/media/platform/omap3isp/ispvideo.c 	queue = &handle->queue;
queue            1321 drivers/media/platform/omap3isp/ispvideo.c 	queue->type = video->type;
queue            1322 drivers/media/platform/omap3isp/ispvideo.c 	queue->io_modes = VB2_MMAP | VB2_USERPTR;
queue            1323 drivers/media/platform/omap3isp/ispvideo.c 	queue->drv_priv = handle;
queue            1324 drivers/media/platform/omap3isp/ispvideo.c 	queue->ops = &isp_video_queue_ops;
queue            1325 drivers/media/platform/omap3isp/ispvideo.c 	queue->mem_ops = &vb2_dma_contig_memops;
queue            1326 drivers/media/platform/omap3isp/ispvideo.c 	queue->buf_struct_size = sizeof(struct isp_buffer);
queue            1327 drivers/media/platform/omap3isp/ispvideo.c 	queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue            1328 drivers/media/platform/omap3isp/ispvideo.c 	queue->dev = video->isp->dev;
queue            1330 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_queue_init(&handle->queue);
queue            1363 drivers/media/platform/omap3isp/ispvideo.c 	vb2_queue_release(&handle->queue);
queue            1386 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_poll(&vfh->queue, file, wait);
queue            1396 drivers/media/platform/omap3isp/ispvideo.c 	return vb2_mmap(&vfh->queue, vma);
queue             145 drivers/media/platform/omap3isp/ispvideo.h 	int(*queue)(struct isp_video *video, struct isp_buffer *buffer);
queue             171 drivers/media/platform/omap3isp/ispvideo.h 	struct vb2_queue *queue;
queue             185 drivers/media/platform/omap3isp/ispvideo.h 	struct vb2_queue queue;
queue             192 drivers/media/platform/omap3isp/ispvideo.h 				container_of(q, struct isp_video_fh, queue)
queue             672 drivers/media/platform/pxa_camera.c 	struct list_head		queue;
queue             988 drivers/media/platform/pxa_camera.c 	list_del_init(&buf->queue);
queue            1002 drivers/media/platform/pxa_camera.c 				   struct pxa_buffer, queue);
queue            1072 drivers/media/platform/pxa_camera.c 	WARN_ON(buf->inwork || list_empty(&buf->queue));
queue            1090 drivers/media/platform/pxa_camera.c 			      struct pxa_buffer, queue);
queue            1099 drivers/media/platform/pxa_camera.c 		list_for_each_entry(buf, &pcdev->capture, queue)
queue            1204 drivers/media/platform/pxa_camera.c 					 struct pxa_buffer, queue);
queue            1409 drivers/media/platform/pxa_camera.c 	INIT_LIST_HEAD(&buf->queue);
queue            1434 drivers/media/platform/pxa_camera.c 	list_add_tail(&buf->queue, &pcdev->capture);
queue            1556 drivers/media/platform/pxa_camera.c 	list_for_each_entry_safe(buf, tmp, &pcdev->capture, queue)
queue            2153 drivers/media/platform/pxa_camera.c 	pcdev->vdev.queue = &pcdev->vb2_vq;
queue             461 drivers/media/platform/qcom/camss/camss-vfe.c 					  queue);
queue             462 drivers/media/platform/qcom/camss/camss-vfe.c 		list_del(&buffer->queue);
queue             476 drivers/media/platform/qcom/camss/camss-vfe.c 	INIT_LIST_HEAD(&buffer->queue);
queue             477 drivers/media/platform/qcom/camss/camss-vfe.c 	list_add_tail(&buffer->queue, &output->pending_bufs);
queue             491 drivers/media/platform/qcom/camss/camss-vfe.c 	list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
queue             493 drivers/media/platform/qcom/camss/camss-vfe.c 		list_del(&buf->queue);
queue             920 drivers/media/platform/qcom/camss/camss-video.c 	vdev->queue = &video->vb2_q;
queue              25 drivers/media/platform/qcom/camss/camss-video.h 	struct list_head queue;
queue             164 drivers/media/platform/qcom/venus/hfi_venus.c 			     struct iface_queue *queue,
queue             172 drivers/media/platform/qcom/venus/hfi_venus.c 	if (!queue->qmem.kva)
queue             175 drivers/media/platform/qcom/venus/hfi_venus.c 	qhdr = queue->qhdr;
queue             208 drivers/media/platform/qcom/venus/hfi_venus.c 	wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
queue             217 drivers/media/platform/qcom/venus/hfi_venus.c 		memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
queue             233 drivers/media/platform/qcom/venus/hfi_venus.c 			    struct iface_queue *queue, void *pkt, u32 *tx_req)
queue             242 drivers/media/platform/qcom/venus/hfi_venus.c 	if (!queue->qmem.kva)
queue             245 drivers/media/platform/qcom/venus/hfi_venus.c 	qhdr = queue->qhdr;
queue             275 drivers/media/platform/qcom/venus/hfi_venus.c 	rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
queue             290 drivers/media/platform/qcom/venus/hfi_venus.c 			memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
queue             379 drivers/media/platform/qcom/venus/hfi_venus.c 	struct iface_queue *queue;
queue             389 drivers/media/platform/qcom/venus/hfi_venus.c 	queue = &hdev->queues[IFACEQ_CMD_IDX];
queue             391 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_write_queue(hdev, queue, pkt, &rx_req);
queue             606 drivers/media/platform/qcom/venus/hfi_venus.c 	struct iface_queue *queue;
queue             613 drivers/media/platform/qcom/venus/hfi_venus.c 	queue = &hdev->queues[IFACEQ_MSG_IDX];
queue             615 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
queue             639 drivers/media/platform/qcom/venus/hfi_venus.c 	struct iface_queue *queue;
queue             647 drivers/media/platform/qcom/venus/hfi_venus.c 	queue = &hdev->queues[IFACEQ_DBG_IDX];
queue             649 drivers/media/platform/qcom/venus/hfi_venus.c 	ret = venus_read_queue(hdev, queue, pkt, &tx_req);
queue             706 drivers/media/platform/qcom/venus/hfi_venus.c 	struct iface_queue *queue;
queue             721 drivers/media/platform/qcom/venus/hfi_venus.c 		queue = &hdev->queues[i];
queue             722 drivers/media/platform/qcom/venus/hfi_venus.c 		queue->qmem.da = desc.da + offset;
queue             723 drivers/media/platform/qcom/venus/hfi_venus.c 		queue->qmem.kva = desc.kva + offset;
queue             724 drivers/media/platform/qcom/venus/hfi_venus.c 		queue->qmem.size = IFACEQ_QUEUE_SIZE;
queue             725 drivers/media/platform/qcom/venus/hfi_venus.c 		offset += queue->qmem.size;
queue             726 drivers/media/platform/qcom/venus/hfi_venus.c 		queue->qhdr =
queue             729 drivers/media/platform/qcom/venus/hfi_venus.c 		venus_set_qhdr_defaults(queue->qhdr);
queue             731 drivers/media/platform/qcom/venus/hfi_venus.c 		queue->qhdr->start_addr = queue->qmem.da;
queue             734 drivers/media/platform/qcom/venus/hfi_venus.c 			queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
queue             736 drivers/media/platform/qcom/venus/hfi_venus.c 			queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
queue             738 drivers/media/platform/qcom/venus/hfi_venus.c 			queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
queue             753 drivers/media/platform/qcom/venus/hfi_venus.c 	queue = &hdev->queues[IFACEQ_DBG_IDX];
queue             754 drivers/media/platform/qcom/venus/hfi_venus.c 	queue->qhdr->rx_req = 0;
queue            1271 drivers/media/platform/rcar-vin/rcar-dma.c 	struct vb2_queue *q = &vin->queue;
queue             277 drivers/media/platform/rcar-vin/rcar-v4l2.c 	if (vb2_is_busy(&vin->queue))
queue             707 drivers/media/platform/rcar-vin/rcar-v4l2.c 	if (vb2_is_busy(&vin->queue))
queue             901 drivers/media/platform/rcar-vin/rcar-v4l2.c 	vdev->queue = &vin->queue;
queue             201 drivers/media/platform/rcar-vin/rcar-vin.h 	struct vb2_queue queue;
queue            1075 drivers/media/platform/rcar_drif.c 	sdr->vdev->queue = &sdr->vb_queue;
queue            1076 drivers/media/platform/rcar_drif.c 	sdr->vdev->queue->lock = &sdr->vb_queue_mutex;
queue             143 drivers/media/platform/renesas-ceu.c 	struct list_head queue;
queue             532 drivers/media/platform/renesas-ceu.c 				       queue);
queue             533 drivers/media/platform/renesas-ceu.c 		list_del(&buf->queue);
queue             550 drivers/media/platform/renesas-ceu.c 	list_for_each_entry(buf, &ceudev->capture, queue)
queue             654 drivers/media/platform/renesas-ceu.c 	list_add_tail(&buf->queue, &ceudev->capture);
queue             704 drivers/media/platform/renesas-ceu.c 			       queue);
queue             712 drivers/media/platform/renesas-ceu.c 	list_del(&buf->queue);
queue             730 drivers/media/platform/renesas-ceu.c 	list_for_each_entry(buf, &ceudev->capture, queue)
queue             761 drivers/media/platform/renesas-ceu.c 	list_for_each_entry(buf, &ceudev->capture, queue)
queue            1444 drivers/media/platform/renesas-ceu.c 	vdev->queue		= &ceudev->vb2_vq;
queue             117 drivers/media/platform/s5p-mfc/s5p_mfc.c 	wake_up(&ctx->queue);
queue             127 drivers/media/platform/s5p-mfc/s5p_mfc.c 	wake_up(&dev->queue);
queue             591 drivers/media/platform/s5p-mfc/s5p_mfc.c 		wake_up(&ctx->queue);
queue             598 drivers/media/platform/s5p-mfc/s5p_mfc.c 		wake_up(&ctx->queue);
queue             625 drivers/media/platform/s5p-mfc/s5p_mfc.c 	wake_up(&ctx->queue);
queue             742 drivers/media/platform/s5p-mfc/s5p_mfc.c 	wake_up(&ctx->queue);
queue             769 drivers/media/platform/s5p-mfc/s5p_mfc.c 	init_waitqueue_head(&ctx->queue);
queue            1325 drivers/media/platform/s5p-mfc/s5p_mfc.c 	init_waitqueue_head(&dev->queue);
queue            1474 drivers/media/platform/s5p-mfc/s5p_mfc.c 		ret = wait_event_interruptible_timeout(m_dev->queue,
queue             318 drivers/media/platform/s5p-mfc/s5p_mfc_common.h 	wait_queue_head_t queue;
queue             624 drivers/media/platform/s5p-mfc/s5p_mfc_common.h 	wait_queue_head_t queue;
queue              25 drivers/media/platform/s5p-mfc/s5p_mfc_intr.c 	ret = wait_event_interruptible_timeout(dev->queue,
queue              57 drivers/media/platform/s5p-mfc/s5p_mfc_intr.c 		ret = wait_event_interruptible_timeout(ctx->queue,
queue              62 drivers/media/platform/s5p-mfc/s5p_mfc_intr.c 		ret = wait_event_timeout(ctx->queue,
queue              85 drivers/media/platform/sh_vou.c 	struct vb2_queue queue;
queue             703 drivers/media/platform/sh_vou.c 	if (vb2_is_busy(&vou_dev->queue))
queue             825 drivers/media/platform/sh_vou.c 	if (vb2_is_busy(&vou_dev->queue))
queue             948 drivers/media/platform/sh_vou.c 	if (vb2_is_busy(&vou_dev->queue))
queue            1288 drivers/media/platform/sh_vou.c 	q = &vou_dev->queue;
queue            1303 drivers/media/platform/sh_vou.c 	vdev->queue = q;
queue             157 drivers/media/platform/stm32/stm32-dcmi.c 	struct vb2_queue		queue;
queue            1104 drivers/media/platform/stm32/stm32-dcmi.c 	if (vb2_is_streaming(&dcmi->queue))
queue            1930 drivers/media/platform/stm32/stm32-dcmi.c 	q = &dcmi->queue;
queue            1955 drivers/media/platform/stm32/stm32-dcmi.c 	dcmi->vdev->queue = &dcmi->queue;
queue             149 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h 	struct vb2_queue		queue;
queue             403 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c 	struct vb2_queue *q = &csi->queue;
queue             460 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c 	vb2_queue_release(&csi->queue);
queue             361 drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c 	vdev->queue = &csi->queue;
queue             654 drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c 	vdev->queue		= vidq;
queue            1556 drivers/media/platform/ti-vpe/cal.c 	vfd->queue = q;
queue             109 drivers/media/platform/via-camera.c 	struct list_head		queue;
queue             336 drivers/media/platform/via-camera.c 	return list_entry(cam->buffer_queue.next, struct via_buffer, queue);
queue             372 drivers/media/platform/via-camera.c 	list_del(&vb->queue);
queue             585 drivers/media/platform/via-camera.c 	list_add_tail(&via->queue, &cam->buffer_queue);
queue             653 drivers/media/platform/via-camera.c 	list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
queue             654 drivers/media/platform/via-camera.c 		list_del(&buf->queue);
queue             668 drivers/media/platform/via-camera.c 	list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
queue             669 drivers/media/platform/via-camera.c 		list_del(&buf->queue);
queue            1263 drivers/media/platform/via-camera.c 	cam->vdev.queue = vq;
queue              26 drivers/media/platform/vimc/vimc-capture.c 	struct vb2_queue queue;
queue             126 drivers/media/platform/vimc/vimc-capture.c 	if (vb2_is_busy(&vcap->queue))
queue             350 drivers/media/platform/vimc/vimc-capture.c 	vb2_queue_release(&vcap->queue);
queue             430 drivers/media/platform/vimc/vimc-capture.c 	q = &vcap->queue;
queue             474 drivers/media/platform/vimc/vimc-capture.c 	vdev->queue = q;
queue             474 drivers/media/platform/vivid/vivid-core.c 	if (vdev->queue)
queue            1280 drivers/media/platform/vivid/vivid-core.c 		vfd->queue = &dev->vb_vid_cap_q;
queue            1326 drivers/media/platform/vivid/vivid-core.c 		vfd->queue = &dev->vb_vid_out_q;
queue            1378 drivers/media/platform/vivid/vivid-core.c 		vfd->queue = &dev->vb_vbi_cap_q;
queue            1410 drivers/media/platform/vivid/vivid-core.c 		vfd->queue = &dev->vb_vbi_out_q;
queue            1441 drivers/media/platform/vivid/vivid-core.c 		vfd->queue = &dev->vb_sdr_cap_q;
queue             261 drivers/media/platform/vivid/vivid-vbi-cap.c 	dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_VBI_CAPTURE;
queue             329 drivers/media/platform/vivid/vivid-vbi-cap.c 	dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
queue             170 drivers/media/platform/vivid/vivid-vbi-out.c 	dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_VBI_OUTPUT;
queue             215 drivers/media/platform/vivid/vivid-vbi-out.c 	dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
queue              47 drivers/media/platform/vsp1/vsp1_histo.c 			       queue);
queue              48 drivers/media/platform/vsp1/vsp1_histo.c 	list_del(&buf->queue);
queue             130 drivers/media/platform/vsp1/vsp1_histo.c 	list_add_tail(&buf->queue, &histo->irqqueue);
queue             148 drivers/media/platform/vsp1/vsp1_histo.c 	list_for_each_entry(buffer, &histo->irqqueue, queue)
queue             444 drivers/media/platform/vsp1/vsp1_histo.c 	if (f->index > 0 || f->type != histo->queue.type)
queue             459 drivers/media/platform/vsp1/vsp1_histo.c 	if (format->type != histo->queue.type)
queue             562 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.type = V4L2_BUF_TYPE_META_CAPTURE;
queue             563 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue             564 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.lock = &histo->lock;
queue             565 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.drv_priv = histo;
queue             566 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.buf_struct_size = sizeof(struct vsp1_histogram_buffer);
queue             567 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.ops = &histo_video_queue_qops;
queue             568 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.mem_ops = &vb2_vmalloc_memops;
queue             569 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
queue             570 drivers/media/platform/vsp1/vsp1_histo.c 	histo->queue.dev = vsp1->dev;
queue             571 drivers/media/platform/vsp1/vsp1_histo.c 	ret = vb2_queue_init(&histo->queue);
queue             578 drivers/media/platform/vsp1/vsp1_histo.c 	histo->video.queue = &histo->queue;
queue              30 drivers/media/platform/vsp1/vsp1_histo.h 	struct list_head queue;
queue              45 drivers/media/platform/vsp1/vsp1_histo.h 	struct vb2_queue queue;
queue             329 drivers/media/platform/vsp1/vsp1_video.c 				struct vsp1_vb2_buffer, queue);
queue             331 drivers/media/platform/vsp1/vsp1_video.c 	list_del(&done->queue);
queue             335 drivers/media/platform/vsp1/vsp1_video.c 					struct vsp1_vb2_buffer, queue);
queue             768 drivers/media/platform/vsp1/vsp1_video.c 	list_add_tail(&buf->queue, &video->irqqueue);
queue             779 drivers/media/platform/vsp1/vsp1_video.c 	if (vb2_is_streaming(&video->queue) &&
queue             842 drivers/media/platform/vsp1/vsp1_video.c 	list_for_each_entry(buffer, &video->irqqueue, queue)
queue             974 drivers/media/platform/vsp1/vsp1_video.c 	if (format->type != video->queue.type)
queue             990 drivers/media/platform/vsp1/vsp1_video.c 	if (format->type != video->queue.type)
queue            1004 drivers/media/platform/vsp1/vsp1_video.c 	if (format->type != video->queue.type)
queue            1013 drivers/media/platform/vsp1/vsp1_video.c 	if (vb2_is_busy(&video->queue)) {
queue            1035 drivers/media/platform/vsp1/vsp1_video.c 	if (video->queue.owner && video->queue.owner != file->private_data)
queue            1068 drivers/media/platform/vsp1/vsp1_video.c 	ret = vb2_streamon(&video->queue, type);
queue            1135 drivers/media/platform/vsp1/vsp1_video.c 	if (video->queue.owner == vfh) {
queue            1136 drivers/media/platform/vsp1/vsp1_video.c 		vb2_queue_release(&video->queue);
queue            1137 drivers/media/platform/vsp1/vsp1_video.c 		video->queue.owner = NULL;
queue            1302 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.type = video->type;
queue            1303 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue            1304 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.lock = &video->lock;
queue            1305 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.drv_priv = video;
queue            1306 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
queue            1307 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.ops = &vsp1_video_queue_qops;
queue            1308 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.mem_ops = &vb2_dma_contig_memops;
queue            1309 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
queue            1310 drivers/media/platform/vsp1/vsp1_video.c 	video->queue.dev = video->vsp1->bus_master;
queue            1311 drivers/media/platform/vsp1/vsp1_video.c 	ret = vb2_queue_init(&video->queue);
queue            1318 drivers/media/platform/vsp1/vsp1_video.c 	video->video.queue = &video->queue;
queue              21 drivers/media/platform/vsp1/vsp1_video.h 	struct list_head queue;
queue              44 drivers/media/platform/vsp1/vsp1_video.h 	struct vb2_queue queue;
queue              63 drivers/media/platform/vsp1/vsp1_wpf.c 	if (vb2_is_busy(&video->queue)) {
queue             293 drivers/media/platform/xilinx/xilinx-dma.c 	struct list_head queue;
queue             305 drivers/media/platform/xilinx/xilinx-dma.c 	list_del(&buf->queue);
queue             352 drivers/media/platform/xilinx/xilinx-dma.c 	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
queue             381 drivers/media/platform/xilinx/xilinx-dma.c 	list_add_tail(&buf->queue, &dma->queued_bufs);
queue             386 drivers/media/platform/xilinx/xilinx-dma.c 	if (vb2_is_streaming(&dma->queue))
queue             440 drivers/media/platform/xilinx/xilinx-dma.c 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
queue             442 drivers/media/platform/xilinx/xilinx-dma.c 		list_del(&buf->queue);
queue             467 drivers/media/platform/xilinx/xilinx-dma.c 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
queue             469 drivers/media/platform/xilinx/xilinx-dma.c 		list_del(&buf->queue);
queue             605 drivers/media/platform/xilinx/xilinx-dma.c 	if (vb2_is_busy(&dma->queue))
queue             683 drivers/media/platform/xilinx/xilinx-dma.c 	dma->video.queue = &dma->queue;
queue             710 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.type = type;
queue             711 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue             712 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.lock = &dma->lock;
queue             713 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.drv_priv = dma;
queue             714 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
queue             715 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.ops = &xvip_dma_queue_qops;
queue             716 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.mem_ops = &vb2_dma_contig_memops;
queue             717 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
queue             719 drivers/media/platform/xilinx/xilinx-dma.c 	dma->queue.dev = dma->xdev->dev;
queue             720 drivers/media/platform/xilinx/xilinx-dma.c 	ret = vb2_queue_init(&dma->queue);
queue              86 drivers/media/platform/xilinx/xilinx-dma.h 	struct vb2_queue queue;
queue              61 drivers/media/rc/fintek-cir.h 		wait_queue_head_t queue;
queue            1016 drivers/media/usb/airspy/airspy.c 	s->vdev.queue = &s->vb_queue;
queue            1017 drivers/media/usb/airspy/airspy.c 	s->vdev.queue->lock = &s->vb_queue_lock;
queue            1979 drivers/media/usb/au0828/au0828-video.c 	dev->vdev.queue = &dev->vb_vidq;
queue            1980 drivers/media/usb/au0828/au0828-video.c 	dev->vdev.queue->lock = &dev->vb_queue_lock;
queue            1990 drivers/media/usb/au0828/au0828-video.c 	dev->vbi_dev.queue = &dev->vb_vbiq;
queue            1991 drivers/media/usb/au0828/au0828-video.c 	dev->vbi_dev.queue->lock = &dev->vb_vbi_queue_lock;
queue            1279 drivers/media/usb/cx231xx/cx231xx-417.c 				struct cx231xx_buffer, vb.queue);
queue            1312 drivers/media/usb/cx231xx/cx231xx-417.c 		list_del(&buf->vb.queue);
queue            1335 drivers/media/usb/cx231xx/cx231xx-417.c 			struct cx231xx_buffer, vb.queue);
queue            1343 drivers/media/usb/cx231xx/cx231xx-417.c 	list_del(&buf->vb.queue);
queue            1471 drivers/media/usb/cx231xx/cx231xx-417.c 	list_add_tail(&buf->vb.queue, &vidq->active);
queue             262 drivers/media/usb/cx231xx/cx231xx-vbi.c 	list_add_tail(&buf->vb.queue, &vidq->active);
queue             523 drivers/media/usb/cx231xx/cx231xx-vbi.c 	list_del(&buf->vb.queue);
queue             614 drivers/media/usb/cx231xx/cx231xx-vbi.c 	*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
queue             179 drivers/media/usb/cx231xx/cx231xx-video.c 	list_del(&buf->vb.queue);
queue             244 drivers/media/usb/cx231xx/cx231xx-video.c 	*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
queue             832 drivers/media/usb/cx231xx/cx231xx-video.c 	list_add_tail(&buf->vb.queue, &vidq->active);
queue            1618 drivers/media/usb/dvb-usb/cxusb-analog.c 	vb2_queue_release(vdev->queue);
queue            1657 drivers/media/usb/dvb-usb/cxusb-analog.c 	cxdev->videodev->queue = &cxdev->videoqueue;
queue            2762 drivers/media/usb/em28xx/em28xx-video.c 	v4l2->vdev.queue = &v4l2->vb_vidq;
queue            2763 drivers/media/usb/em28xx/em28xx-video.c 	v4l2->vdev.queue->lock = &v4l2->vb_queue_lock;
queue            2805 drivers/media/usb/em28xx/em28xx-video.c 		v4l2->vbi_dev.queue = &v4l2->vb_vbiq;
queue            2806 drivers/media/usb/em28xx/em28xx-video.c 		v4l2->vbi_dev.queue->lock = &v4l2->vb_vbi_queue_lock;
queue            1096 drivers/media/usb/go7007/go7007-v4l2.c 	vdev->queue = &go->vidq;
queue            1096 drivers/media/usb/gspca/gspca.c 	if (vb2_is_busy(&gspca_dev->queue))
queue            1258 drivers/media/usb/gspca/gspca.c 	parm->parm.capture.readbuffers = gspca_dev->queue.min_buffers_needed;
queue            1274 drivers/media/usb/gspca/gspca.c 	parm->parm.capture.readbuffers = gspca_dev->queue.min_buffers_needed;
queue            1510 drivers/media/usb/gspca/gspca.c 	q = &gspca_dev->queue;
queue            1523 drivers/media/usb/gspca/gspca.c 	gspca_dev->vdev.queue = q;
queue            1630 drivers/media/usb/gspca/gspca.c 	vb2_queue_error(&gspca_dev->queue);
queue            1657 drivers/media/usb/gspca/gspca.c 	if (!vb2_start_streaming_called(&gspca_dev->queue))
queue            1689 drivers/media/usb/gspca/gspca.c 	streaming = vb2_start_streaming_called(&gspca_dev->queue);
queue             194 drivers/media/usb/gspca/gspca.h 	struct vb2_queue queue;
queue            1478 drivers/media/usb/hackrf/hackrf.c 	dev->rx_vdev.queue = &dev->rx_vb2_queue;
queue            1479 drivers/media/usb/hackrf/hackrf.c 	dev->rx_vdev.queue->lock = &dev->vb_queue_lock;
queue            1498 drivers/media/usb/hackrf/hackrf.c 	dev->tx_vdev.queue = &dev->tx_vb2_queue;
queue            1499 drivers/media/usb/hackrf/hackrf.c 	dev->tx_vdev.queue->lock = &dev->vb_queue_lock;
queue            1213 drivers/media/usb/msi2500/msi2500.c 	dev->vdev.queue = &dev->vb_queue;
queue            1214 drivers/media/usb/msi2500/msi2500.c 	dev->vdev.queue->lock = &dev->vb_queue_lock;
queue            1062 drivers/media/usb/pwc/pwc-if.c 	pdev->vdev.queue = &pdev->vb_queue;
queue            1063 drivers/media/usb/pwc/pwc-if.c 	pdev->vdev.queue->lock = &pdev->vb_queue_lock;
queue            1643 drivers/media/usb/s2255/s2255drv.c 		vc->vdev.queue = q;
queue             807 drivers/media/usb/stk1160/stk1160-v4l.c 	dev->vdev.queue = &dev->vb_vidq;
queue              92 drivers/media/usb/tm6000/tm6000-video.c 			struct tm6000_buffer, vb.queue);
queue             108 drivers/media/usb/tm6000/tm6000-video.c 	list_del(&buf->vb.queue);
queue             778 drivers/media/usb/tm6000/tm6000-video.c 	list_add_tail(&buf->vb.queue, &vidq->active);
queue             939 drivers/media/usb/usbtv/usbtv-video.c 	usbtv->vdev.queue = &usbtv->vb2q;
queue            1967 drivers/media/usb/uvc/uvc_driver.c 			      struct uvc_video_queue *queue,
queue            1975 drivers/media/usb/uvc/uvc_driver.c 	ret = uvc_queue_init(queue, type, !uvc_no_drop_param);
queue            2051 drivers/media/usb/uvc/uvc_driver.c 					 &stream->queue, stream->type,
queue            2331 drivers/media/usb/uvc/uvc_driver.c 				uvc_queue_streamoff(&stream->queue,
queue            2332 drivers/media/usb/uvc/uvc_driver.c 						    stream->queue.queue.type);
queue              34 drivers/media/usb/uvc/uvc_isight.c static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
queue             120 drivers/media/usb/uvc/uvc_isight.c 			ret = isight_decode(&stream->queue, buf,
queue             130 drivers/media/usb/uvc/uvc_isight.c 				buf = uvc_queue_next_buffer(&stream->queue,
queue              48 drivers/media/usb/uvc/uvc_metadata.c 	if (format->type != vfh->vdev->queue->type)
queue              68 drivers/media/usb/uvc/uvc_metadata.c 	if (format->type != vfh->vdev->queue->type)
queue              99 drivers/media/usb/uvc/uvc_metadata.c 	if (uvc_queue_allocated(&stream->queue))
queue             117 drivers/media/usb/uvc/uvc_metadata.c 	if (fdesc->type != vfh->vdev->queue->type ||
queue             123 drivers/media/usb/uvc/uvc_metadata.c 	fdesc->type = vfh->vdev->queue->type;
queue             163 drivers/media/usb/uvc/uvc_metadata.c 	struct uvc_video_queue *queue = &stream->meta.queue;
queue             171 drivers/media/usb/uvc/uvc_metadata.c 	vdev->queue = &queue->queue;
queue             173 drivers/media/usb/uvc/uvc_metadata.c 	return uvc_register_video_device(dev, stream, vdev, queue,
queue              36 drivers/media/usb/uvc/uvc_queue.c uvc_queue_to_stream(struct uvc_video_queue *queue)
queue              38 drivers/media/usb/uvc/uvc_queue.c 	return container_of(queue, struct uvc_streaming, queue);
queue              51 drivers/media/usb/uvc/uvc_queue.c static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
queue              58 drivers/media/usb/uvc/uvc_queue.c 	while (!list_empty(&queue->irqqueue)) {
queue              59 drivers/media/usb/uvc/uvc_queue.c 		struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
queue              61 drivers/media/usb/uvc/uvc_queue.c 							  queue);
queue              62 drivers/media/usb/uvc/uvc_queue.c 		list_del(&buf->queue);
queue              76 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
queue              86 drivers/media/usb/uvc/uvc_queue.c 		stream = uvc_queue_to_stream(queue);
queue             107 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue             116 drivers/media/usb/uvc/uvc_queue.c 	if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
queue             134 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue             138 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             139 drivers/media/usb/uvc/uvc_queue.c 	if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
queue             141 drivers/media/usb/uvc/uvc_queue.c 		list_add_tail(&buf->queue, &queue->irqqueue);
queue             150 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             156 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue             157 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
queue             166 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
queue             167 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
queue             172 drivers/media/usb/uvc/uvc_queue.c 	queue->buf_used = 0;
queue             178 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irq(&queue->irqlock);
queue             179 drivers/media/usb/uvc/uvc_queue.c 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
queue             180 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irq(&queue->irqlock);
queue             187 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
queue             192 drivers/media/usb/uvc/uvc_queue.c 		uvc_video_stop_streaming(uvc_queue_to_stream(queue));
queue             194 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irq(&queue->irqlock);
queue             195 drivers/media/usb/uvc/uvc_queue.c 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
queue             196 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irq(&queue->irqlock);
queue             219 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue             224 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.type = type;
queue             225 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.io_modes = VB2_MMAP | VB2_USERPTR;
queue             226 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.drv_priv = queue;
queue             227 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue             228 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.mem_ops = &vb2_vmalloc_memops;
queue             229 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
queue             231 drivers/media/usb/uvc/uvc_queue.c 	queue->queue.lock = &queue->mutex;
queue             235 drivers/media/usb/uvc/uvc_queue.c 		queue->queue.ops = &uvc_meta_queue_qops;
queue             238 drivers/media/usb/uvc/uvc_queue.c 		queue->queue.io_modes |= VB2_DMABUF;
queue             239 drivers/media/usb/uvc/uvc_queue.c 		queue->queue.ops = &uvc_queue_qops;
queue             243 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_queue_init(&queue->queue);
queue             247 drivers/media/usb/uvc/uvc_queue.c 	mutex_init(&queue->mutex);
queue             248 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_init(&queue->irqlock);
queue             249 drivers/media/usb/uvc/uvc_queue.c 	INIT_LIST_HEAD(&queue->irqqueue);
queue             250 drivers/media/usb/uvc/uvc_queue.c 	queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
queue             255 drivers/media/usb/uvc/uvc_queue.c void uvc_queue_release(struct uvc_video_queue *queue)
queue             257 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             258 drivers/media/usb/uvc/uvc_queue.c 	vb2_queue_release(&queue->queue);
queue             259 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             266 drivers/media/usb/uvc/uvc_queue.c int uvc_request_buffers(struct uvc_video_queue *queue,
queue             271 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             272 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_reqbufs(&queue->queue, rb);
queue             273 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             278 drivers/media/usb/uvc/uvc_queue.c int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
queue             282 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             283 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_querybuf(&queue->queue, buf);
queue             284 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             289 drivers/media/usb/uvc/uvc_queue.c int uvc_create_buffers(struct uvc_video_queue *queue,
queue             294 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             295 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_create_bufs(&queue->queue, cb);
queue             296 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             301 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_buffer(struct uvc_video_queue *queue,
queue             306 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             307 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_qbuf(&queue->queue, mdev, buf);
queue             308 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             313 drivers/media/usb/uvc/uvc_queue.c int uvc_export_buffer(struct uvc_video_queue *queue,
queue             318 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             319 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_expbuf(&queue->queue, exp);
queue             320 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             325 drivers/media/usb/uvc/uvc_queue.c int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
queue             330 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             331 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
queue             332 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             337 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
queue             341 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             342 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_streamon(&queue->queue, type);
queue             343 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             348 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
queue             352 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             353 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_streamoff(&queue->queue, type);
queue             354 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             359 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
queue             361 drivers/media/usb/uvc/uvc_queue.c 	return vb2_mmap(&queue->queue, vma);
queue             365 drivers/media/usb/uvc/uvc_queue.c unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
queue             368 drivers/media/usb/uvc/uvc_queue.c 	return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
queue             372 drivers/media/usb/uvc/uvc_queue.c __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
queue             377 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             378 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_poll(&queue->queue, file, wait);
queue             379 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             391 drivers/media/usb/uvc/uvc_queue.c int uvc_queue_allocated(struct uvc_video_queue *queue)
queue             395 drivers/media/usb/uvc/uvc_queue.c 	mutex_lock(&queue->mutex);
queue             396 drivers/media/usb/uvc/uvc_queue.c 	allocated = vb2_is_busy(&queue->queue);
queue             397 drivers/media/usb/uvc/uvc_queue.c 	mutex_unlock(&queue->mutex);
queue             414 drivers/media/usb/uvc/uvc_queue.c void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
queue             418 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             419 drivers/media/usb/uvc/uvc_queue.c 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
queue             427 drivers/media/usb/uvc/uvc_queue.c 		queue->flags |= UVC_QUEUE_DISCONNECTED;
queue             428 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             438 drivers/media/usb/uvc/uvc_queue.c __uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
queue             440 drivers/media/usb/uvc/uvc_queue.c 	if (list_empty(&queue->irqqueue))
queue             443 drivers/media/usb/uvc/uvc_queue.c 	return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
queue             446 drivers/media/usb/uvc/uvc_queue.c struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
queue             451 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             452 drivers/media/usb/uvc/uvc_queue.c 	nextbuf = __uvc_queue_get_current_buffer(queue);
queue             453 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             465 drivers/media/usb/uvc/uvc_queue.c static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
queue             480 drivers/media/usb/uvc/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue             482 drivers/media/usb/uvc/uvc_queue.c 	if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
queue             483 drivers/media/usb/uvc/uvc_queue.c 		uvc_queue_buffer_requeue(queue, buf);
queue             506 drivers/media/usb/uvc/uvc_queue.c struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
queue             512 drivers/media/usb/uvc/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             513 drivers/media/usb/uvc/uvc_queue.c 	list_del(&buf->queue);
queue             514 drivers/media/usb/uvc/uvc_queue.c 	nextbuf = __uvc_queue_get_current_buffer(queue);
queue             515 drivers/media/usb/uvc/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             315 drivers/media/usb/uvc/uvc_v4l2.c 	if (uvc_queue_allocated(&stream->queue)) {
queue             390 drivers/media/usb/uvc/uvc_v4l2.c 	if (uvc_queue_streaming(&stream->queue)) {
queue             561 drivers/media/usb/uvc/uvc_v4l2.c 		uvc_queue_release(&stream->queue);
queue             716 drivers/media/usb/uvc/uvc_v4l2.c 	ret = uvc_request_buffers(&stream->queue, rb);
queue             736 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_query_buffer(&stream->queue, buf);
queue             747 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_queue_buffer(&stream->queue,
queue             760 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_export_buffer(&stream->queue, exp);
queue             771 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_dequeue_buffer(&stream->queue, buf,
queue             786 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_create_buffers(&stream->queue, cb);
queue             800 drivers/media/usb/uvc/uvc_v4l2.c 	ret = uvc_queue_streamon(&stream->queue, type);
queue             816 drivers/media/usb/uvc/uvc_v4l2.c 	uvc_queue_streamoff(&stream->queue, type);
queue            1441 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_queue_mmap(&stream->queue, vma);
queue            1451 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_queue_poll(&stream->queue, file, wait);
queue            1464 drivers/media/usb/uvc/uvc_v4l2.c 	return uvc_queue_get_unmapped_area(&stream->queue, pgoff);
queue            1196 drivers/media/usb/uvc/uvc_video.c 	struct uvc_video_queue *queue = &stream->queue;
queue            1201 drivers/media/usb/uvc/uvc_video.c 	mem = buf->mem + queue->buf_used;
queue            1202 drivers/media/usb/uvc/uvc_video.c 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
queue            1207 drivers/media/usb/uvc/uvc_video.c 	queue->buf_used += nbytes;
queue            1326 drivers/media/usb/uvc/uvc_video.c 		*meta_buf = uvc_queue_next_buffer(&stream->meta.queue,
queue            1329 drivers/media/usb/uvc/uvc_video.c 	*video_buf = uvc_queue_next_buffer(&stream->queue, *video_buf);
queue            1476 drivers/media/usb/uvc/uvc_video.c 	if (buf->bytesused == stream->queue.buf_used ||
queue            1478 drivers/media/usb/uvc/uvc_video.c 		if (buf->bytesused == stream->queue.buf_used) {
queue            1479 drivers/media/usb/uvc/uvc_video.c 			stream->queue.buf_used = 0;
queue            1482 drivers/media/usb/uvc/uvc_video.c 			uvc_queue_next_buffer(&stream->queue, buf);
queue            1497 drivers/media/usb/uvc/uvc_video.c 	struct uvc_video_queue *queue = &stream->queue;
queue            1498 drivers/media/usb/uvc/uvc_video.c 	struct uvc_video_queue *qmeta = &stream->meta.queue;
queue            1499 drivers/media/usb/uvc/uvc_video.c 	struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue;
queue            1519 drivers/media/usb/uvc/uvc_video.c 		uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);
queue            1525 drivers/media/usb/uvc/uvc_video.c 	buf = uvc_queue_get_current_buffer(queue);
queue            1531 drivers/media/usb/uvc/uvc_video.c 						    struct uvc_buffer, queue);
queue            1925 drivers/media/usb/uvc/uvc_video.c 	if (!uvc_queue_streaming(&stream->queue))
queue            1958 drivers/media/usb/uvc/uvc_video.c 	if (!uvc_queue_streaming(&stream->queue))
queue             407 drivers/media/usb/uvc/uvcvideo.h 	struct list_head queue;
queue             426 drivers/media/usb/uvc/uvcvideo.h 	struct vb2_queue queue;
queue             564 drivers/media/usb/uvc/uvcvideo.h 	struct uvc_video_queue queue;
queue             571 drivers/media/usb/uvc/uvcvideo.h 		struct uvc_video_queue queue;
queue             747 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue             749 drivers/media/usb/uvc/uvcvideo.h void uvc_queue_release(struct uvc_video_queue *queue);
queue             750 drivers/media/usb/uvc/uvcvideo.h int uvc_request_buffers(struct uvc_video_queue *queue,
queue             752 drivers/media/usb/uvc/uvcvideo.h int uvc_query_buffer(struct uvc_video_queue *queue,
queue             754 drivers/media/usb/uvc/uvcvideo.h int uvc_create_buffers(struct uvc_video_queue *queue,
queue             756 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_buffer(struct uvc_video_queue *queue,
queue             759 drivers/media/usb/uvc/uvcvideo.h int uvc_export_buffer(struct uvc_video_queue *queue,
queue             761 drivers/media/usb/uvc/uvcvideo.h int uvc_dequeue_buffer(struct uvc_video_queue *queue,
queue             763 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type);
queue             764 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type);
queue             765 drivers/media/usb/uvc/uvcvideo.h void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
queue             766 drivers/media/usb/uvc/uvcvideo.h struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
queue             768 drivers/media/usb/uvc/uvcvideo.h struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue);
queue             770 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_mmap(struct uvc_video_queue *queue,
queue             772 drivers/media/usb/uvc/uvcvideo.h __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
queue             775 drivers/media/usb/uvc/uvcvideo.h unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
queue             778 drivers/media/usb/uvc/uvcvideo.h int uvc_queue_allocated(struct uvc_video_queue *queue);
queue             779 drivers/media/usb/uvc/uvcvideo.h static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
queue             781 drivers/media/usb/uvc/uvcvideo.h 	return vb2_is_streaming(&queue->queue);
queue             810 drivers/media/usb/uvc/uvcvideo.h 			      struct uvc_video_queue *queue,
queue             413 drivers/media/usb/zr364xx/zr364xx.c 	list_add_tail(&buf->vb.queue, &cam->vidq.active);
queue             530 drivers/media/usb/zr364xx/zr364xx.c 			 struct zr364xx_buffer, vb.queue);
queue             537 drivers/media/usb/zr364xx/zr364xx.c 	list_del(&buf->vb.queue);
queue            3149 drivers/media/v4l2-core/v4l2-ctrls.c 		goto queue;
queue            3176 drivers/media/v4l2-core/v4l2-ctrls.c queue:
queue            3207 drivers/media/v4l2-core/v4l2-ctrls.c 	.queue = v4l2_ctrl_request_queue,
queue            2798 drivers/media/v4l2-core/v4l2-ioctl.c 	if (vdev->queue && vdev->queue->lock &&
queue            2800 drivers/media/v4l2-core/v4l2-ioctl.c 		return vdev->queue->lock;
queue             267 drivers/media/v4l2-core/v4l2-mem2mem.c 				   struct v4l2_m2m_ctx, queue);
queue             331 drivers/media/v4l2-core/v4l2-mem2mem.c 	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
queue             404 drivers/media/v4l2-core/v4l2-mem2mem.c 		list_del(&m2m_ctx->queue);
queue             427 drivers/media/v4l2-core/v4l2-mem2mem.c 	list_del(&m2m_dev->curr_ctx->queue);
queue             586 drivers/media/v4l2-core/v4l2-mem2mem.c 		list_del(&m2m_ctx->queue);
queue             924 drivers/media/v4l2-core/v4l2-mem2mem.c 	INIT_LIST_HEAD(&m2m_ctx->queue);
queue            1016 drivers/media/v4l2-core/v4l2-mem2mem.c 		if (!obj->ops->queue)
queue            1035 drivers/media/v4l2-core/v4l2-mem2mem.c 		obj->ops->queue(obj);
queue             277 drivers/media/v4l2-core/videobuf-core.c 			list_del(&q->bufs[i]->queue);
queue            1898 drivers/memstick/core/ms_block.c 		blk_rq_map_sg(msb->queue, req, sg);
queue            2000 drivers/memstick/core/ms_block.c 	struct memstick_dev *card = hctx->queue->queuedata;
queue            2046 drivers/memstick/core/ms_block.c 	blk_mq_stop_hw_queues(msb->queue);
queue            2082 drivers/memstick/core/ms_block.c 	blk_mq_start_hw_queues(msb->queue);
queue            2119 drivers/memstick/core/ms_block.c 	msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
queue            2121 drivers/memstick/core/ms_block.c 	if (IS_ERR(msb->queue)) {
queue            2122 drivers/memstick/core/ms_block.c 		rc = PTR_ERR(msb->queue);
queue            2123 drivers/memstick/core/ms_block.c 		msb->queue = NULL;
queue            2127 drivers/memstick/core/ms_block.c 	msb->queue->queuedata = card;
queue            2129 drivers/memstick/core/ms_block.c 	blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
queue            2130 drivers/memstick/core/ms_block.c 	blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
queue            2131 drivers/memstick/core/ms_block.c 	blk_queue_max_segment_size(msb->queue,
queue            2133 drivers/memstick/core/ms_block.c 	blk_queue_logical_block_size(msb->queue, msb->page_size);
queue            2138 drivers/memstick/core/ms_block.c 	msb->disk->queue = msb->queue;
queue            2212 drivers/memstick/core/ms_block.c 	blk_mq_start_hw_queues(msb->queue);
queue            2216 drivers/memstick/core/ms_block.c 	blk_cleanup_queue(msb->queue);
queue            2218 drivers/memstick/core/ms_block.c 	msb->queue = NULL;
queue             149 drivers/memstick/core/ms_block.h 	struct request_queue		*queue;
queue             139 drivers/memstick/core/mspro_block.c 	struct request_queue  *queue;
queue             806 drivers/memstick/core/mspro_block.c 			blk_mq_stop_hw_queues(msb->queue);
queue             822 drivers/memstick/core/mspro_block.c 	blk_mq_start_hw_queues(msb->queue);
queue             828 drivers/memstick/core/mspro_block.c 	struct memstick_dev *card = hctx->queue->queuedata;
queue            1214 drivers/memstick/core/mspro_block.c 	msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &mspro_mq_ops, 2,
queue            1216 drivers/memstick/core/mspro_block.c 	if (IS_ERR(msb->queue)) {
queue            1217 drivers/memstick/core/mspro_block.c 		rc = PTR_ERR(msb->queue);
queue            1218 drivers/memstick/core/mspro_block.c 		msb->queue = NULL;
queue            1222 drivers/memstick/core/mspro_block.c 	msb->queue->queuedata = card;
queue            1224 drivers/memstick/core/mspro_block.c 	blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
queue            1225 drivers/memstick/core/mspro_block.c 	blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
queue            1226 drivers/memstick/core/mspro_block.c 	blk_queue_max_segment_size(msb->queue,
queue            1234 drivers/memstick/core/mspro_block.c 	msb->disk->queue = msb->queue;
queue            1238 drivers/memstick/core/mspro_block.c 	blk_queue_logical_block_size(msb->queue, msb->page_size);
queue            1329 drivers/memstick/core/mspro_block.c 	blk_mq_start_hw_queues(msb->queue);
queue            1334 drivers/memstick/core/mspro_block.c 	blk_cleanup_queue(msb->queue);
queue            1336 drivers/memstick/core/mspro_block.c 	msb->queue = NULL;
queue            1355 drivers/memstick/core/mspro_block.c 	blk_mq_stop_hw_queues(msb->queue);
queue            1412 drivers/memstick/core/mspro_block.c 	blk_mq_start_hw_queues(msb->queue);
queue              68 drivers/mfd/ipaq-micro.c 		list_add_tail(&msg->node, &micro->queue);
queue             101 drivers/mfd/ipaq-micro.c 			if (!list_empty(&micro->queue)) {
queue             102 drivers/mfd/ipaq-micro.c 				micro->msg = list_entry(micro->queue.next,
queue             422 drivers/mfd/ipaq-micro.c 	INIT_LIST_HEAD(&micro->queue);
queue              43 drivers/mfd/pcf50633-adc.c 	struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
queue              74 drivers/mfd/pcf50633-adc.c 	if (!adc->queue[head])
queue              77 drivers/mfd/pcf50633-adc.c 	adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
queue              91 drivers/mfd/pcf50633-adc.c 	if (adc->queue[tail]) {
queue              97 drivers/mfd/pcf50633-adc.c 	adc->queue[tail] = req;
queue             178 drivers/mfd/pcf50633-adc.c 	req = adc->queue[head];
queue             184 drivers/mfd/pcf50633-adc.c 	adc->queue[head] = NULL;
queue             226 drivers/mfd/pcf50633-adc.c 	if (WARN_ON(adc->queue[head]))
queue             231 drivers/mfd/pcf50633-adc.c 		kfree(adc->queue[i]);
queue             281 drivers/misc/genwqe/card_base.h 	struct ddcb_queue queue;	/* genwqe DDCB queue */
queue             384 drivers/misc/genwqe/card_base.h 	struct ddcb_queue *queue;	  /* associated queue */
queue              82 drivers/misc/genwqe/card_ddcb.c static int queue_empty(struct ddcb_queue *queue)
queue              84 drivers/misc/genwqe/card_ddcb.c 	return queue->ddcb_next == queue->ddcb_act;
queue              87 drivers/misc/genwqe/card_ddcb.c static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
queue              89 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcb_next >= queue->ddcb_act)
queue              90 drivers/misc/genwqe/card_ddcb.c 		return queue->ddcb_next - queue->ddcb_act;
queue              92 drivers/misc/genwqe/card_ddcb.c 	return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
queue              95 drivers/misc/genwqe/card_ddcb.c static int queue_free_ddcbs(struct ddcb_queue *queue)
queue              97 drivers/misc/genwqe/card_ddcb.c 	int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
queue             163 drivers/misc/genwqe/card_ddcb.c static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue             174 drivers/misc/genwqe/card_ddcb.c 		 cd->card_idx, queue->ddcb_act, queue->ddcb_next);
queue             176 drivers/misc/genwqe/card_ddcb.c 	pddcb = queue->ddcb_vaddr;
queue             177 drivers/misc/genwqe/card_ddcb.c 	for (i = 0; i < queue->ddcb_max; i++) {
queue             180 drivers/misc/genwqe/card_ddcb.c 			i == queue->ddcb_act ? '>' : ' ',
queue             265 drivers/misc/genwqe/card_ddcb.c static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
queue             282 drivers/misc/genwqe/card_ddcb.c 	prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
queue             283 drivers/misc/genwqe/card_ddcb.c 	prev_ddcb = &queue->ddcb_vaddr[prev_no];
queue             312 drivers/misc/genwqe/card_ddcb.c 	__genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
queue             330 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = req->queue;
queue             331 drivers/misc/genwqe/card_ddcb.c 	struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
queue             346 drivers/misc/genwqe/card_ddcb.c 			queue->ddcb_max - 1 : ddcb_no - 1;
queue             347 drivers/misc/genwqe/card_ddcb.c 		struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
queue             363 drivers/misc/genwqe/card_ddcb.c 				   struct ddcb_queue *queue)
queue             369 drivers/misc/genwqe/card_ddcb.c 	spin_lock_irqsave(&queue->ddcb_lock, flags);
queue             372 drivers/misc/genwqe/card_ddcb.c 	while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
queue             378 drivers/misc/genwqe/card_ddcb.c 		pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
queue             387 drivers/misc/genwqe/card_ddcb.c 		req = queue->ddcb_req[queue->ddcb_act];
queue             405 drivers/misc/genwqe/card_ddcb.c 			u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
queue             407 drivers/misc/genwqe/card_ddcb.c 			errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
queue             408 drivers/misc/genwqe/card_ddcb.c 			status  = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
queue             414 drivers/misc/genwqe/card_ddcb.c 				queue->ddcb_daddr + ddcb_offs);
queue             417 drivers/misc/genwqe/card_ddcb.c 		copy_ddcb_results(req, queue->ddcb_act);
queue             418 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
queue             439 drivers/misc/genwqe/card_ddcb.c 		queue->ddcbs_completed++;
queue             440 drivers/misc/genwqe/card_ddcb.c 		queue->ddcbs_in_flight--;
queue             444 drivers/misc/genwqe/card_ddcb.c 		wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
queue             445 drivers/misc/genwqe/card_ddcb.c 		wake_up_interruptible(&queue->busy_waitq);
queue             448 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
queue             453 drivers/misc/genwqe/card_ddcb.c 	spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             478 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue;
queue             484 drivers/misc/genwqe/card_ddcb.c 	queue = req->queue;
queue             485 drivers/misc/genwqe/card_ddcb.c 	if (queue == NULL)
queue             489 drivers/misc/genwqe/card_ddcb.c 	if (ddcb_no >= queue->ddcb_max)
queue             492 drivers/misc/genwqe/card_ddcb.c 	rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
queue             503 drivers/misc/genwqe/card_ddcb.c 		struct ddcb_queue *queue = req->queue;
queue             511 drivers/misc/genwqe/card_ddcb.c 		genwqe_check_ddcb_queue(cd, req->queue);
queue             521 drivers/misc/genwqe/card_ddcb.c 			__genwqe_readq(cd, queue->IO_QUEUE_STATUS));
queue             523 drivers/misc/genwqe/card_ddcb.c 		pddcb = &queue->ddcb_vaddr[req->num];
queue             526 drivers/misc/genwqe/card_ddcb.c 		print_ddcb_info(cd, req->queue);
queue             563 drivers/misc/genwqe/card_ddcb.c 				  struct ddcb_queue *queue,
queue             569 drivers/misc/genwqe/card_ddcb.c 	if (queue_free_ddcbs(queue) == 0) /* queue is  full */
queue             573 drivers/misc/genwqe/card_ddcb.c 	pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
queue             580 drivers/misc/genwqe/card_ddcb.c 	*num = queue->ddcb_next;	/* internal DDCB number */
queue             581 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
queue             596 drivers/misc/genwqe/card_ddcb.c 	pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
queue             620 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = req->queue;
queue             633 drivers/misc/genwqe/card_ddcb.c 	pddcb = &queue->ddcb_vaddr[req->num];
queue             637 drivers/misc/genwqe/card_ddcb.c 		spin_lock_irqsave(&queue->ddcb_lock, flags);
queue             660 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             676 drivers/misc/genwqe/card_ddcb.c 		queue->ddcbs_in_flight--;
queue             677 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_req[req->num] = NULL; /* delete from array */
queue             692 drivers/misc/genwqe/card_ddcb.c 		    (queue->ddcb_act == req->num)) {
queue             693 drivers/misc/genwqe/card_ddcb.c 			queue->ddcb_act = ((queue->ddcb_act + 1) %
queue             694 drivers/misc/genwqe/card_ddcb.c 					   queue->ddcb_max);
queue             697 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             705 drivers/misc/genwqe/card_ddcb.c 	queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
queue             715 drivers/misc/genwqe/card_ddcb.c 	print_ddcb_info(cd, req->queue);
queue             754 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue;
queue             767 drivers/misc/genwqe/card_ddcb.c 	queue = req->queue = &cd->queue;
queue             773 drivers/misc/genwqe/card_ddcb.c 		genwqe_check_ddcb_queue(cd, queue);
queue             780 drivers/misc/genwqe/card_ddcb.c 	spin_lock_irqsave(&queue->ddcb_lock, flags);
queue             782 drivers/misc/genwqe/card_ddcb.c 	pddcb = get_next_ddcb(cd, queue, &req->num);	/* get ptr and num */
queue             786 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             789 drivers/misc/genwqe/card_ddcb.c 			queue->return_on_busy++;
queue             793 drivers/misc/genwqe/card_ddcb.c 		queue->wait_on_busy++;
queue             794 drivers/misc/genwqe/card_ddcb.c 		rc = wait_event_interruptible(queue->busy_waitq,
queue             795 drivers/misc/genwqe/card_ddcb.c 					      queue_free_ddcbs(queue) != 0);
queue             804 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcb_req[req->num] != NULL) {
queue             805 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             813 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_req[req->num] = req;
queue             887 drivers/misc/genwqe/card_ddcb.c 	enqueue_ddcb(cd, queue, pddcb, req->num);
queue             888 drivers/misc/genwqe/card_ddcb.c 	queue->ddcbs_in_flight++;
queue             890 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
queue             891 drivers/misc/genwqe/card_ddcb.c 		queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
queue             894 drivers/misc/genwqe/card_ddcb.c 	spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             975 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = &cd->queue;
queue             977 drivers/misc/genwqe/card_ddcb.c 	spin_lock_irqsave(&queue->ddcb_lock, flags);
queue             979 drivers/misc/genwqe/card_ddcb.c 	if (queue_empty(queue)) { /* emtpy queue */
queue             980 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             984 drivers/misc/genwqe/card_ddcb.c 	pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
queue             986 drivers/misc/genwqe/card_ddcb.c 		spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue             990 drivers/misc/genwqe/card_ddcb.c 	spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue            1005 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = &cd->queue;
queue            1007 drivers/misc/genwqe/card_ddcb.c 	spin_lock_irqsave(&queue->ddcb_lock, flags);
queue            1008 drivers/misc/genwqe/card_ddcb.c 	ddcbs_in_flight += queue->ddcbs_in_flight;
queue            1009 drivers/misc/genwqe/card_ddcb.c 	spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue            1014 drivers/misc/genwqe/card_ddcb.c static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue            1027 drivers/misc/genwqe/card_ddcb.c 	queue->ddcbs_in_flight = 0;  /* statistics */
queue            1028 drivers/misc/genwqe/card_ddcb.c 	queue->ddcbs_max_in_flight = 0;
queue            1029 drivers/misc/genwqe/card_ddcb.c 	queue->ddcbs_completed = 0;
queue            1030 drivers/misc/genwqe/card_ddcb.c 	queue->return_on_busy = 0;
queue            1031 drivers/misc/genwqe/card_ddcb.c 	queue->wait_on_busy = 0;
queue            1033 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_seq	  = 0x100; /* start sequence number */
queue            1034 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_max	  = GENWQE_DDCB_MAX;
queue            1035 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
queue            1036 drivers/misc/genwqe/card_ddcb.c 						&queue->ddcb_daddr);
queue            1037 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcb_vaddr == NULL) {
queue            1042 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
queue            1044 drivers/misc/genwqe/card_ddcb.c 	if (!queue->ddcb_req) {
queue            1049 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
queue            1052 drivers/misc/genwqe/card_ddcb.c 	if (!queue->ddcb_waitqs) {
queue            1057 drivers/misc/genwqe/card_ddcb.c 	for (i = 0; i < queue->ddcb_max; i++) {
queue            1058 drivers/misc/genwqe/card_ddcb.c 		pddcb = &queue->ddcb_vaddr[i];		     /* DDCBs */
queue            1062 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_req[i] = NULL;		     /* requests */
queue            1063 drivers/misc/genwqe/card_ddcb.c 		init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
queue            1066 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_act  = 0;
queue            1067 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_next = 0;	/* queue is empty */
queue            1069 drivers/misc/genwqe/card_ddcb.c 	spin_lock_init(&queue->ddcb_lock);
queue            1070 drivers/misc/genwqe/card_ddcb.c 	init_waitqueue_head(&queue->busy_waitq);
queue            1072 drivers/misc/genwqe/card_ddcb.c 	val64 = ((u64)(queue->ddcb_max - 1) <<  8); /* lastptr */
queue            1073 drivers/misc/genwqe/card_ddcb.c 	__genwqe_writeq(cd, queue->IO_QUEUE_CONFIG,  0x07);  /* iCRC/vCRC */
queue            1074 drivers/misc/genwqe/card_ddcb.c 	__genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
queue            1075 drivers/misc/genwqe/card_ddcb.c 	__genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
queue            1076 drivers/misc/genwqe/card_ddcb.c 	__genwqe_writeq(cd, queue->IO_QUEUE_WRAP,    val64);
queue            1080 drivers/misc/genwqe/card_ddcb.c 	kfree(queue->ddcb_req);
queue            1081 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_req = NULL;
queue            1083 drivers/misc/genwqe/card_ddcb.c 	__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue            1084 drivers/misc/genwqe/card_ddcb.c 				queue->ddcb_daddr);
queue            1085 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_vaddr = NULL;
queue            1086 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_daddr = 0ull;
queue            1091 drivers/misc/genwqe/card_ddcb.c static int ddcb_queue_initialized(struct ddcb_queue *queue)
queue            1093 drivers/misc/genwqe/card_ddcb.c 	return queue->ddcb_vaddr != NULL;
queue            1096 drivers/misc/genwqe/card_ddcb.c static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue            1100 drivers/misc/genwqe/card_ddcb.c 	queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
queue            1102 drivers/misc/genwqe/card_ddcb.c 	kfree(queue->ddcb_req);
queue            1103 drivers/misc/genwqe/card_ddcb.c 	queue->ddcb_req = NULL;
queue            1105 drivers/misc/genwqe/card_ddcb.c 	if (queue->ddcb_vaddr) {
queue            1106 drivers/misc/genwqe/card_ddcb.c 		__genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
queue            1107 drivers/misc/genwqe/card_ddcb.c 					queue->ddcb_daddr);
queue            1108 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_vaddr = NULL;
queue            1109 drivers/misc/genwqe/card_ddcb.c 		queue->ddcb_daddr = 0ull;
queue            1187 drivers/misc/genwqe/card_ddcb.c 		genwqe_check_ddcb_queue(cd, &cd->queue);
queue            1223 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue;
queue            1236 drivers/misc/genwqe/card_ddcb.c 	queue = &cd->queue;
queue            1237 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_CONFIG  = IO_SLC_QUEUE_CONFIG;
queue            1238 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_STATUS  = IO_SLC_QUEUE_STATUS;
queue            1239 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
queue            1240 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
queue            1241 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_OFFSET  = IO_SLC_QUEUE_OFFSET;
queue            1242 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_WRAP    = IO_SLC_QUEUE_WRAP;
queue            1243 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_WTIME   = IO_SLC_QUEUE_WTIME;
queue            1244 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
queue            1245 drivers/misc/genwqe/card_ddcb.c 	queue->IO_QUEUE_LRW     = IO_SLC_QUEUE_LRW;
queue            1247 drivers/misc/genwqe/card_ddcb.c 	rc = setup_ddcb_queue(cd, queue);
queue            1295 drivers/misc/genwqe/card_ddcb.c 	free_ddcb_queue(cd, queue);
queue            1311 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = &cd->queue;
queue            1313 drivers/misc/genwqe/card_ddcb.c 	spin_lock_irqsave(&queue->ddcb_lock, flags);
queue            1315 drivers/misc/genwqe/card_ddcb.c 	for (i = 0; i < queue->ddcb_max; i++)
queue            1316 drivers/misc/genwqe/card_ddcb.c 		wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
queue            1318 drivers/misc/genwqe/card_ddcb.c 	wake_up_interruptible(&queue->busy_waitq);
queue            1319 drivers/misc/genwqe/card_ddcb.c 	spin_unlock_irqrestore(&queue->ddcb_lock, flags);
queue            1337 drivers/misc/genwqe/card_ddcb.c 	struct ddcb_queue *queue = &cd->queue;
queue            1339 drivers/misc/genwqe/card_ddcb.c 	if (!ddcb_queue_initialized(queue))
queue            1388 drivers/misc/genwqe/card_ddcb.c 	if (!ddcb_queue_initialized(&cd->queue))
queue            1399 drivers/misc/genwqe/card_ddcb.c 	free_ddcb_queue(cd, &cd->queue);
queue             215 drivers/misc/genwqe/card_debugfs.c 	struct ddcb_queue *queue;
queue             218 drivers/misc/genwqe/card_debugfs.c 	queue = &cd->queue;
queue             229 drivers/misc/genwqe/card_debugfs.c 		   queue->ddcb_max, (long long)queue->ddcb_daddr,
queue             230 drivers/misc/genwqe/card_debugfs.c 		   (long long)queue->ddcb_daddr +
queue             231 drivers/misc/genwqe/card_debugfs.c 		   (queue->ddcb_max * DDCB_LENGTH),
queue             232 drivers/misc/genwqe/card_debugfs.c 		   queue->ddcb_vaddr, queue->ddcbs_in_flight,
queue             233 drivers/misc/genwqe/card_debugfs.c 		   queue->ddcbs_max_in_flight, queue->ddcbs_completed,
queue             234 drivers/misc/genwqe/card_debugfs.c 		   queue->return_on_busy, queue->wait_on_busy,
queue             247 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_CONFIG,
queue             248 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_CONFIG),
queue             249 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_STATUS,
queue             250 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_STATUS),
queue             251 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_SEGMENT,
queue             252 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT),
queue             253 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_INITSQN,
queue             254 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_INITSQN),
queue             255 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_WRAP,
queue             256 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_WRAP),
queue             257 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_OFFSET,
queue             258 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_OFFSET),
queue             259 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_WTIME,
queue             260 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_WTIME),
queue             261 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_ERRCNTS,
queue             262 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS),
queue             263 drivers/misc/genwqe/card_debugfs.c 		   queue->IO_QUEUE_LRW,
queue             264 drivers/misc/genwqe/card_debugfs.c 		   __genwqe_readq(cd, queue->IO_QUEUE_LRW));
queue             267 drivers/misc/genwqe/card_debugfs.c 		   queue->ddcb_act, queue->ddcb_next);
queue             269 drivers/misc/genwqe/card_debugfs.c 	pddcb = queue->ddcb_vaddr;
queue             270 drivers/misc/genwqe/card_debugfs.c 	for (i = 0; i < queue->ddcb_max; i++) {
queue              79 drivers/misc/habanalabs/irq.c 	struct hl_hw_queue *queue;
queue             117 drivers/misc/habanalabs/irq.c 		queue = &hdev->kernel_queues[cq->hw_queue_id];
queue             120 drivers/misc/habanalabs/irq.c 			job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
queue             129 drivers/misc/habanalabs/irq.c 		queue->ci = hl_queue_inc_ptr(queue->ci);
queue             145 drivers/misc/ibmvmc.c 	struct crq_queue *queue = &adapter->queue;
queue             155 drivers/misc/ibmvmc.c 			 queue->msg_token,
queue             156 drivers/misc/ibmvmc.c 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
queue             157 drivers/misc/ibmvmc.c 	free_page((unsigned long)queue->msgs);
queue             175 drivers/misc/ibmvmc.c 	struct crq_queue *queue = &adapter->queue;
queue             182 drivers/misc/ibmvmc.c 	memset(queue->msgs, 0x00, PAGE_SIZE);
queue             183 drivers/misc/ibmvmc.c 	queue->cur = 0;
queue             188 drivers/misc/ibmvmc.c 				queue->msg_token, PAGE_SIZE);
queue             205 drivers/misc/ibmvmc.c static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
queue             210 drivers/misc/ibmvmc.c 	spin_lock_irqsave(&queue->lock, flags);
queue             211 drivers/misc/ibmvmc.c 	crq = &queue->msgs[queue->cur];
queue             213 drivers/misc/ibmvmc.c 		if (++queue->cur == queue->size)
queue             214 drivers/misc/ibmvmc.c 			queue->cur = 0;
queue             224 drivers/misc/ibmvmc.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue             665 drivers/misc/ibmvmc.c 	crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
queue            2077 drivers/misc/ibmvmc.c 		while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
queue            2088 drivers/misc/ibmvmc.c 		crq = crq_queue_next_crq(&adapter->queue);
queue            2116 drivers/misc/ibmvmc.c 	struct crq_queue *queue = &adapter->queue;
queue            2120 drivers/misc/ibmvmc.c 	queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
queue            2122 drivers/misc/ibmvmc.c 	if (!queue->msgs)
queue            2125 drivers/misc/ibmvmc.c 	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue            2127 drivers/misc/ibmvmc.c 	queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
queue            2128 drivers/misc/ibmvmc.c 					  queue->size * sizeof(*queue->msgs),
queue            2131 drivers/misc/ibmvmc.c 	if (dma_mapping_error(adapter->dev, queue->msg_token))
queue            2136 drivers/misc/ibmvmc.c 				   queue->msg_token, PAGE_SIZE);
queue            2150 drivers/misc/ibmvmc.c 	queue->cur = 0;
queue            2151 drivers/misc/ibmvmc.c 	spin_lock_init(&queue->lock);
queue            2179 drivers/misc/ibmvmc.c 			 queue->msg_token,
queue            2180 drivers/misc/ibmvmc.c 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
queue            2182 drivers/misc/ibmvmc.c 	free_page((unsigned long)queue->msgs);
queue             162 drivers/misc/ibmvmc.h 	struct crq_queue queue;
queue             570 drivers/misc/sgi-gru/gru_instructions.h static inline void gru_mesq(void *cb, unsigned long queue,
queue             576 drivers/misc/sgi-gru/gru_instructions.h 	ins->baddr0 = (long)queue;
queue             248 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_queue *queue = q;
queue             250 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (queue) {
queue             256 drivers/misc/vmw_vmci/vmci_queue_pair.c 					  queue->kernel_if->u.g.vas[i],
queue             257 drivers/misc/vmw_vmci/vmci_queue_pair.c 					  queue->kernel_if->u.g.pas[i]);
queue             260 drivers/misc/vmw_vmci/vmci_queue_pair.c 		vfree(queue);
queue             272 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_queue *queue;
queue             275 drivers/misc/vmw_vmci/vmci_queue_pair.c 	size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
queue             283 drivers/misc/vmw_vmci/vmci_queue_pair.c 		 (sizeof(*queue->kernel_if->u.g.pas) +
queue             284 drivers/misc/vmw_vmci/vmci_queue_pair.c 		  sizeof(*queue->kernel_if->u.g.vas)))
queue             287 drivers/misc/vmw_vmci/vmci_queue_pair.c 	pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
queue             288 drivers/misc/vmw_vmci/vmci_queue_pair.c 	vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
queue             291 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue = vmalloc(queue_size);
queue             292 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (!queue)
queue             295 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->q_header = NULL;
queue             296 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->saved_header = NULL;
queue             297 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue             298 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->mutex = NULL;
queue             299 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->num_pages = num_pages;
queue             300 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
queue             301 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->u.g.vas =
queue             302 drivers/misc/vmw_vmci/vmci_queue_pair.c 		(void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
queue             303 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->host = false;
queue             306 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->u.g.vas[i] =
queue             308 drivers/misc/vmw_vmci/vmci_queue_pair.c 					   &queue->kernel_if->u.g.pas[i],
queue             310 drivers/misc/vmw_vmci/vmci_queue_pair.c 		if (!queue->kernel_if->u.g.vas[i]) {
queue             312 drivers/misc/vmw_vmci/vmci_queue_pair.c 			qp_free_queue(queue, i * PAGE_SIZE);
queue             318 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->q_header = queue->kernel_if->u.g.vas[0];
queue             320 drivers/misc/vmw_vmci/vmci_queue_pair.c 	return queue;
queue             329 drivers/misc/vmw_vmci/vmci_queue_pair.c static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
queue             334 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
queue             378 drivers/misc/vmw_vmci/vmci_queue_pair.c 				    const struct vmci_queue *queue,
queue             381 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
queue             526 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_queue *queue;
queue             529 drivers/misc/vmw_vmci/vmci_queue_pair.c 	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
queue             535 drivers/misc/vmw_vmci/vmci_queue_pair.c 		 sizeof(*queue->kernel_if->u.h.page))
queue             538 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
queue             540 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
queue             541 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (queue) {
queue             542 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->q_header = NULL;
queue             543 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->saved_header = NULL;
queue             544 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue             545 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->host = true;
queue             546 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->mutex = NULL;
queue             547 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->num_pages = num_pages;
queue             548 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->u.h.header_page =
queue             549 drivers/misc/vmw_vmci/vmci_queue_pair.c 		    (struct page **)((u8 *)queue + queue_size);
queue             550 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->u.h.page =
queue             551 drivers/misc/vmw_vmci/vmci_queue_pair.c 			&queue->kernel_if->u.h.header_page[1];
queue             554 drivers/misc/vmw_vmci/vmci_queue_pair.c 	return queue;
queue             561 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
queue             563 drivers/misc/vmw_vmci/vmci_queue_pair.c 	kfree(queue);
queue             605 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_acquire_queue_mutex(struct vmci_queue *queue)
queue             607 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (queue->kernel_if->host)
queue             608 drivers/misc/vmw_vmci/vmci_queue_pair.c 		mutex_lock(queue->kernel_if->mutex);
queue             616 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_release_queue_mutex(struct vmci_queue *queue)
queue             618 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (queue->kernel_if->host)
queue             619 drivers/misc/vmw_vmci/vmci_queue_pair.c 		mutex_unlock(queue->kernel_if->mutex);
queue             105 drivers/mmc/core/block.c 	struct mmc_queue queue;
queue             199 drivers/mmc/core/block.c 		blk_put_queue(md->queue.queue);
queue             212 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue             243 drivers/mmc/core/block.c 	mq = &md->queue;
queue             246 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
queue             252 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
queue             661 drivers/mmc/core/block.c 	card = md->queue.card;
queue             670 drivers/mmc/core/block.c 	mq = &md->queue;
queue             671 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
queue             682 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
queue             730 drivers/mmc/core/block.c 	card = md->queue.card;
queue             740 drivers/mmc/core/block.c 	mq = &md->queue;
queue             741 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue,
queue             751 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
queue            1089 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            1127 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            1196 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            1285 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            2182 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            2296 drivers/mmc/core/block.c 	ret = mmc_init_queue(&md->queue, card);
queue            2300 drivers/mmc/core/block.c 	md->queue.blkdata = md;
queue            2308 drivers/mmc/core/block.c 	if (!blk_get_queue(md->queue.queue)) {
queue            2309 drivers/mmc/core/block.c 		mmc_cleanup_queue(&md->queue);
queue            2318 drivers/mmc/core/block.c 	md->disk->queue = md->queue.queue;
queue            2356 drivers/mmc/core/block.c 		blk_queue_write_cache(md->queue.queue, true, true);
queue            2623 drivers/mmc/core/block.c 		card = md->queue.card;
queue            2633 drivers/mmc/core/block.c 		mmc_cleanup_queue(&md->queue);
queue            2662 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
queue            2710 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
queue            2715 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
queue            2719 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
queue            2739 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
queue            2751 drivers/mmc/core/block.c 	req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
queue            2758 drivers/mmc/core/block.c 	blk_execute_rq(mq->queue, NULL, req, 0);
queue            2959 drivers/mmc/core/block.c 		mmc_queue_suspend(&md->queue);
queue            2961 drivers/mmc/core/block.c 			mmc_queue_suspend(&part_md->queue);
queue            2991 drivers/mmc/core/block.c 		mmc_queue_resume(&md->queue);
queue            2993 drivers/mmc/core/block.c 			mmc_queue_resume(&part_md->queue);
queue             140 drivers/mmc/core/queue.c 	struct request_queue *q = mq->queue;
queue             237 drivers/mmc/core/queue.c 	mmc_exit_request(mq->queue, req);
queue             358 drivers/mmc/core/queue.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
queue             359 drivers/mmc/core/queue.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
queue             361 drivers/mmc/core/queue.c 		mmc_queue_setup_discard(mq->queue, card);
queue             364 drivers/mmc/core/queue.c 		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
queue             365 drivers/mmc/core/queue.c 	blk_queue_max_hw_sectors(mq->queue,
queue             368 drivers/mmc/core/queue.c 		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
queue             371 drivers/mmc/core/queue.c 	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
queue             376 drivers/mmc/core/queue.c 	blk_queue_logical_block_size(mq->queue, block_size);
queue             383 drivers/mmc/core/queue.c 		blk_queue_max_segment_size(mq->queue,
queue             386 drivers/mmc/core/queue.c 	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
queue             454 drivers/mmc/core/queue.c 	mq->queue = blk_mq_init_queue(&mq->tag_set);
queue             455 drivers/mmc/core/queue.c 	if (IS_ERR(mq->queue)) {
queue             456 drivers/mmc/core/queue.c 		ret = PTR_ERR(mq->queue);
queue             461 drivers/mmc/core/queue.c 		mq->queue->backing_dev_info->capabilities |=
queue             464 drivers/mmc/core/queue.c 	mq->queue->queuedata = mq;
queue             465 drivers/mmc/core/queue.c 	blk_queue_rq_timeout(mq->queue, 60 * HZ);
queue             477 drivers/mmc/core/queue.c 	blk_mq_quiesce_queue(mq->queue);
queue             489 drivers/mmc/core/queue.c 	blk_mq_unquiesce_queue(mq->queue);
queue             494 drivers/mmc/core/queue.c 	struct request_queue *q = mq->queue;
queue             523 drivers/mmc/core/queue.c 	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
queue              79 drivers/mmc/core/queue.h 	struct request_queue	*queue;
queue             332 drivers/mmc/host/atmel-mci.c 	struct list_head	queue;
queue            1334 drivers/mmc/host/atmel-mci.c 		list_add_tail(&slot->queue_node, &host->queue);
queue            1457 drivers/mmc/host/atmel-mci.c 		if (list_empty(&host->queue)) {
queue            1573 drivers/mmc/host/atmel-mci.c 	if (!list_empty(&host->queue)) {
queue            1574 drivers/mmc/host/atmel-mci.c 		slot = list_entry(host->queue.next,
queue            2475 drivers/mmc/host/atmel-mci.c 	INIT_LIST_HEAD(&host->queue);
queue            1373 drivers/mmc/host/dw_mmc.c 		list_add_tail(&slot->queue_node, &host->queue);
queue            1801 drivers/mmc/host/dw_mmc.c 	if (!list_empty(&host->queue)) {
queue            1802 drivers/mmc/host/dw_mmc.c 		slot = list_entry(host->queue.next,
queue            3237 drivers/mmc/host/dw_mmc.c 	INIT_LIST_HEAD(&host->queue);
queue             197 drivers/mmc/host/dw_mmc.h 	struct list_head	queue;
queue             190 drivers/mtd/mtd_blkdevs.c 	dev = hctx->queue->queuedata;
queue             455 drivers/mtd/mtd_blkdevs.c 	gd->queue = new->rq;
queue             317 drivers/mtd/ubi/block.c 	struct ubiblock *dev = hctx->queue->queuedata;
queue             442 drivers/mtd/ubi/block.c 	dev->gd->queue = dev->rq;
queue              87 drivers/net/dsa/microchip/ksz8795.c static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
queue              92 drivers/net/dsa/microchip/ksz8795.c 	switch (queue) {
queue              95 drivers/net/dsa/microchip/ksz8795.c 		queue = PORT_QUEUE_SPLIT_4;
queue              98 drivers/net/dsa/microchip/ksz8795.c 		queue = PORT_QUEUE_SPLIT_2;
queue             101 drivers/net/dsa/microchip/ksz8795.c 		queue = PORT_QUEUE_SPLIT_1;
queue             106 drivers/net/dsa/microchip/ksz8795.c 	if (queue & PORT_QUEUE_SPLIT_2)
queue             109 drivers/net/dsa/microchip/ksz8795.c 	if (queue & PORT_QUEUE_SPLIT_4)
queue             115 drivers/net/dsa/microchip/ksz8795.c 	if (queue != PORT_QUEUE_SPLIT_1)
queue             140 drivers/net/eql.c static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
queue             147 drivers/net/eql.c 	spin_lock(&eql->queue.lock);
queue             148 drivers/net/eql.c 	head = &eql->queue.all_slaves;
queue             157 drivers/net/eql.c 			eql_kill_one_slave(&eql->queue, slave);
queue             161 drivers/net/eql.c 	spin_unlock(&eql->queue.lock);
queue             184 drivers/net/eql.c 	spin_lock_init(&eql->queue.lock);
queue             185 drivers/net/eql.c 	INIT_LIST_HEAD(&eql->queue.all_slaves);
queue             186 drivers/net/eql.c 	eql->queue.master_dev	= dev;
queue             211 drivers/net/eql.c 	BUG_ON(!list_empty(&eql->queue.all_slaves));
queue             221 drivers/net/eql.c static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
queue             224 drivers/net/eql.c 	queue->num_slaves--;
queue             230 drivers/net/eql.c static void eql_kill_slave_queue(slave_queue_t *queue)
queue             234 drivers/net/eql.c 	spin_lock_bh(&queue->lock);
queue             236 drivers/net/eql.c 	head = &queue->all_slaves;
queue             240 drivers/net/eql.c 		eql_kill_one_slave(queue, s);
queue             243 drivers/net/eql.c 	spin_unlock_bh(&queue->lock);
queue             257 drivers/net/eql.c 	eql_kill_slave_queue(&eql->queue);
queue             296 drivers/net/eql.c static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
queue             305 drivers/net/eql.c 	head = &queue->all_slaves;
queue             325 drivers/net/eql.c 			eql_kill_one_slave(queue, slave);
queue             336 drivers/net/eql.c 	spin_lock(&eql->queue.lock);
queue             338 drivers/net/eql.c 	slave = __eql_schedule_slaves(&eql->queue);
queue             352 drivers/net/eql.c 	spin_unlock(&eql->queue.lock);
queue             362 drivers/net/eql.c static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
queue             366 drivers/net/eql.c 	head = &queue->all_slaves;
queue             377 drivers/net/eql.c static inline int eql_is_full(slave_queue_t *queue)
queue             379 drivers/net/eql.c 	equalizer_t *eql = netdev_priv(queue->master_dev);
queue             381 drivers/net/eql.c 	if (queue->num_slaves >= eql->max_slaves)
queue             387 drivers/net/eql.c static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
queue             389 drivers/net/eql.c 	if (!eql_is_full(queue)) {
queue             392 drivers/net/eql.c 		duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
queue             394 drivers/net/eql.c 			eql_kill_one_slave(queue, duplicate_slave);
queue             397 drivers/net/eql.c 		list_add(&slave->list, &queue->all_slaves);
queue             398 drivers/net/eql.c 		queue->num_slaves++;
queue             435 drivers/net/eql.c 			spin_lock_bh(&eql->queue.lock);
queue             436 drivers/net/eql.c 			ret = __eql_insert_slave(&eql->queue, s);
queue             440 drivers/net/eql.c 			spin_unlock_bh(&eql->queue.lock);
queue             464 drivers/net/eql.c 	spin_lock_bh(&eql->queue.lock);
queue             466 drivers/net/eql.c 		slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
queue             468 drivers/net/eql.c 			eql_kill_one_slave(&eql->queue, slave);
queue             472 drivers/net/eql.c 	spin_unlock_bh(&eql->queue.lock);
queue             494 drivers/net/eql.c 	spin_lock_bh(&eql->queue.lock);
queue             496 drivers/net/eql.c 		slave = __eql_find_slave_dev(&eql->queue, slave_dev);
queue             502 drivers/net/eql.c 	spin_unlock_bh(&eql->queue.lock);
queue             528 drivers/net/eql.c 	spin_lock_bh(&eql->queue.lock);
queue             530 drivers/net/eql.c 		slave = __eql_find_slave_dev(&eql->queue, slave_dev);
queue             538 drivers/net/eql.c 	spin_unlock_bh(&eql->queue.lock);
queue             171 drivers/net/ethernet/3com/3c509.c 	struct sk_buff *queue[SKB_QUEUE_SIZE];
queue             109 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
queue             111 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_com_admin_sq *sq = &queue->sq;
queue             112 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 size = ADMIN_SQ_SIZE(queue->q_depth);
queue             114 drivers/net/ethernet/amazon/ena/ena_com.c 	sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
queue             131 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
queue             133 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_com_admin_cq *cq = &queue->cq;
queue             134 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 size = ADMIN_CQ_SIZE(queue->q_depth);
queue             136 drivers/net/ethernet/amazon/ena/ena_com.c 	cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
queue             193 drivers/net/ethernet/amazon/ena/ena_com.c static void comp_ctxt_release(struct ena_com_admin_queue *queue,
queue             197 drivers/net/ethernet/amazon/ena/ena_com.c 	atomic_dec(&queue->outstanding_cmds);
queue             200 drivers/net/ethernet/amazon/ena/ena_com.c static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
queue             203 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!queue->comp_ctx)) {
queue             208 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(command_id >= queue->q_depth)) {
queue             210 drivers/net/ethernet/amazon/ena/ena_com.c 		       command_id, queue->q_depth);
queue             214 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
queue             220 drivers/net/ethernet/amazon/ena/ena_com.c 		atomic_inc(&queue->outstanding_cmds);
queue             221 drivers/net/ethernet/amazon/ena/ena_com.c 		queue->comp_ctx[command_id].occupied = true;
queue             224 drivers/net/ethernet/amazon/ena/ena_com.c 	return &queue->comp_ctx[command_id];
queue             285 drivers/net/ethernet/amazon/ena/ena_com.c static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
queue             287 drivers/net/ethernet/amazon/ena/ena_com.c 	size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
queue             291 drivers/net/ethernet/amazon/ena/ena_com.c 	queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
queue             292 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(!queue->comp_ctx)) {
queue             297 drivers/net/ethernet/amazon/ena/ena_com.c 	for (i = 0; i < queue->q_depth; i++) {
queue             298 drivers/net/ethernet/amazon/ena/ena_com.c 		comp_ctx = get_comp_ctxt(queue, i, false);
queue             463 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			      unsigned int queue)
queue             469 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		if (pdata->prio2q_map[prio] != queue)
queue            2274 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 					      unsigned int queue,
queue            2282 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
queue            2298 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfa[queue] = 0;
queue            2299 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfd[queue] = 0;
queue            2305 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
queue            2306 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
queue            2312 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
queue            2313 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
queue            2334 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
queue            2335 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
queue            2602 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	unsigned int qptc, qptc_extra, queue;
queue            2614 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
queue            2617 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 				  "TXq%u mapped to TC%u\n", queue, i);
queue            2618 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
queue            2620 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->q2tc_map[queue++] = i;
queue            2625 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 				  "TXq%u mapped to TC%u\n", queue, i);
queue            2626 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
queue            2628 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			pdata->q2tc_map[queue++] = i;
queue            2683 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	unsigned int offset, queue, prio;
queue            2692 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
queue            2693 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		while ((queue < pdata->tx_q_count) &&
queue            2694 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		       (pdata->q2tc_map[queue] == i))
queue            2695 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			queue++;
queue            2698 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			  i, offset, queue - 1);
queue            2699 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
queue            2700 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		offset = queue;
queue            3214 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 				     unsigned int queue)
queue            3225 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
queue            3236 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			    queue);
queue            3240 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 				 unsigned int queue)
queue            3247 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		return xgbe_txq_prepare_tx_stop(pdata, queue);
queue            3250 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	if (queue < DMA_DSRX_FIRST_QUEUE) {
queue            3252 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
queue            3254 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
queue            3279 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			    queue);
queue            3328 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 				 unsigned int queue)
queue            3339 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
queue            3350 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			    queue);
queue              61 drivers/net/ethernet/apple/bmac.c 	struct sk_buff_head *queue;
queue            1316 drivers/net/ethernet/apple/bmac.c 	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
queue            1317 drivers/net/ethernet/apple/bmac.c 	skb_queue_head_init(bp->queue);
queue            1452 drivers/net/ethernet/apple/bmac.c 		skb = skb_dequeue(bp->queue);
queue            1464 drivers/net/ethernet/apple/bmac.c 	skb_queue_tail(bp->queue, skb);
queue             311 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 		data->queue = fsp->ring_cookie;
queue             313 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 		data->queue = -1;
queue             348 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
queue             370 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
queue             378 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
queue             386 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
queue             417 drivers/net/ethernet/aquantia/atlantic/aq_filters.c 	aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
queue            1078 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	if (data->queue < 0) {
queue            1084 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
queue            1124 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 			if (aq_vlans[i].queue != 0xFF) {
queue            1126 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 							    aq_vlans[i].queue,
queue             514 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c 				       u32 queue)
queue             516 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c 	aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
queue             552 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c 				       u32 queue)
queue             554 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c 	aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
queue             255 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h 				u32 queue);
queue             279 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h 				       u32 queue);
queue            2495 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h #define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
queue             249 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h 	u8 queue;
queue             253 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h 	s8 queue;
queue            1286 drivers/net/ethernet/broadcom/bcmsysport.c 	u16 queue;
queue            1289 drivers/net/ethernet/broadcom/bcmsysport.c 	queue = skb_get_queue_mapping(skb);
queue            1290 drivers/net/ethernet/broadcom/bcmsysport.c 	txq = netdev_get_tx_queue(dev, queue);
queue            1291 drivers/net/ethernet/broadcom/bcmsysport.c 	ring = &priv->tx_rings[queue];
queue            1297 drivers/net/ethernet/broadcom/bcmsysport.c 		netdev_err(dev, "queue %d awake and ring full!\n", queue);
queue            2235 drivers/net/ethernet/broadcom/bcmsysport.c 	u16 queue = skb_get_queue_mapping(skb);
queue            2243 drivers/net/ethernet/broadcom/bcmsysport.c 	q = BRCM_TAG_GET_QUEUE(queue);
queue            2244 drivers/net/ethernet/broadcom/bcmsysport.c 	port = BRCM_TAG_GET_PORT(queue);
queue             430 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
queue             439 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
queue             444 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
queue             488 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	fp->tpa_queue_used |= (1 << queue);
queue             914 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		u16 len, pad, queue;
queue             972 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				u16 queue = cqe_fp->queue_index;
queue             975 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				   queue);
queue             977 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				bnx2x_tpa_start(fp, queue,
queue             983 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			queue = cqe->end_agg_cqe.queue_index;
queue             984 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			tpa_info = &fp->tpa_info[queue];
queue             987 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			   queue);
queue             496 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
queue            1426 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
queue            1454 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
queue            1575 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txq = netdev_get_tx_queue(dev, ring->queue);
queue            1585 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				   __func__, index, ring->queue);
queue            2127 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->queue = 0;
queue            2131 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->queue = index + 1;
queue            2532 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
queue            2536 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
queue            3026 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txq = netdev_get_tx_queue(priv->dev, ring->queue);
queue            3049 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->index, ring->queue,
queue             562 drivers/net/ethernet/broadcom/genet/bcmgenet.h 	unsigned int	queue;		/* queue index */
queue             689 drivers/net/ethernet/cadence/macb.h #define queue_readl(queue, reg)		(queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
queue             690 drivers/net/ethernet/cadence/macb.h #define queue_writel(queue, reg, value)	(queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
queue            1070 drivers/net/ethernet/cadence/macb.h 	int	(*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
queue            1251 drivers/net/ethernet/cadence/macb.h int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
queue            1253 drivers/net/ethernet/cadence/macb.h static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
queue            1255 drivers/net/ethernet/cadence/macb.h 	if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
queue            1258 drivers/net/ethernet/cadence/macb.h 	return gem_ptp_txstamp(queue, skb, desc);
queue            1274 drivers/net/ethernet/cadence/macb.h static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
queue             183 drivers/net/ethernet/cadence/macb_main.c static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
queue             186 drivers/net/ethernet/cadence/macb_main.c 	index = macb_tx_ring_wrap(queue->bp, index);
queue             187 drivers/net/ethernet/cadence/macb_main.c 	index = macb_adj_dma_desc_idx(queue->bp, index);
queue             188 drivers/net/ethernet/cadence/macb_main.c 	return &queue->tx_ring[index];
queue             191 drivers/net/ethernet/cadence/macb_main.c static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
queue             194 drivers/net/ethernet/cadence/macb_main.c 	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
queue             197 drivers/net/ethernet/cadence/macb_main.c static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
queue             201 drivers/net/ethernet/cadence/macb_main.c 	offset = macb_tx_ring_wrap(queue->bp, index) *
queue             202 drivers/net/ethernet/cadence/macb_main.c 			macb_dma_desc_get_size(queue->bp);
queue             204 drivers/net/ethernet/cadence/macb_main.c 	return queue->tx_ring_dma + offset;
queue             212 drivers/net/ethernet/cadence/macb_main.c static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
queue             214 drivers/net/ethernet/cadence/macb_main.c 	index = macb_rx_ring_wrap(queue->bp, index);
queue             215 drivers/net/ethernet/cadence/macb_main.c 	index = macb_adj_dma_desc_idx(queue->bp, index);
queue             216 drivers/net/ethernet/cadence/macb_main.c 	return &queue->rx_ring[index];
queue             219 drivers/net/ethernet/cadence/macb_main.c static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
queue             221 drivers/net/ethernet/cadence/macb_main.c 	return queue->rx_buffers + queue->bp->rx_buffer_size *
queue             222 drivers/net/ethernet/cadence/macb_main.c 	       macb_rx_ring_wrap(queue->bp, index);
queue             727 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue	*queue = container_of(work, struct macb_queue,
queue             729 drivers/net/ethernet/cadence/macb_main.c 	struct macb		*bp = queue->bp;
queue             737 drivers/net/ethernet/cadence/macb_main.c 		    (unsigned int)(queue - bp->queues),
queue             738 drivers/net/ethernet/cadence/macb_main.c 		    queue->tx_tail, queue->tx_head);
queue             762 drivers/net/ethernet/cadence/macb_main.c 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
queue             765 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_tx_desc(queue, tail);
queue             767 drivers/net/ethernet/cadence/macb_main.c 		tx_skb = macb_tx_skb(queue, tail);
queue             775 drivers/net/ethernet/cadence/macb_main.c 				tx_skb = macb_tx_skb(queue, tail);
queue             787 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_packets++;
queue             789 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_bytes += skb->len;
queue             807 drivers/net/ethernet/cadence/macb_main.c 	desc = macb_tx_desc(queue, 0);
queue             815 drivers/net/ethernet/cadence/macb_main.c 	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
queue             818 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
queue             821 drivers/net/ethernet/cadence/macb_main.c 	queue->tx_head = 0;
queue             822 drivers/net/ethernet/cadence/macb_main.c 	queue->tx_tail = 0;
queue             826 drivers/net/ethernet/cadence/macb_main.c 	queue_writel(queue, IER, MACB_TX_INT_FLAGS);
queue             835 drivers/net/ethernet/cadence/macb_main.c static void macb_tx_interrupt(struct macb_queue *queue)
queue             840 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue             841 drivers/net/ethernet/cadence/macb_main.c 	u16 queue_index = queue - bp->queues;
queue             847 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, ISR, MACB_BIT(TCOMP));
queue             852 drivers/net/ethernet/cadence/macb_main.c 	head = queue->tx_head;
queue             853 drivers/net/ethernet/cadence/macb_main.c 	for (tail = queue->tx_tail; tail != head; tail++) {
queue             859 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_tx_desc(queue, tail);
queue             874 drivers/net/ethernet/cadence/macb_main.c 			tx_skb = macb_tx_skb(queue, tail);
queue             881 drivers/net/ethernet/cadence/macb_main.c 				    gem_ptp_do_txstamp(queue, skb, desc) == 0) {
queue             891 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_packets++;
queue             893 drivers/net/ethernet/cadence/macb_main.c 				queue->stats.tx_bytes += skb->len;
queue             908 drivers/net/ethernet/cadence/macb_main.c 	queue->tx_tail = tail;
queue             910 drivers/net/ethernet/cadence/macb_main.c 	    CIRC_CNT(queue->tx_head, queue->tx_tail,
queue             915 drivers/net/ethernet/cadence/macb_main.c static void gem_rx_refill(struct macb_queue *queue)
queue             920 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue             923 drivers/net/ethernet/cadence/macb_main.c 	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
queue             925 drivers/net/ethernet/cadence/macb_main.c 		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
queue             930 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_prepared_head++;
queue             931 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_rx_desc(queue, entry);
queue             933 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->rx_skbuff[entry]) {
queue             951 drivers/net/ethernet/cadence/macb_main.c 			queue->rx_skbuff[entry] = skb;
queue             975 drivers/net/ethernet/cadence/macb_main.c 			queue, queue->rx_prepared_head, queue->rx_tail);
queue             979 drivers/net/ethernet/cadence/macb_main.c static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
queue             985 drivers/net/ethernet/cadence/macb_main.c 		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
queue             999 drivers/net/ethernet/cadence/macb_main.c static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
queue            1002 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1014 drivers/net/ethernet/cadence/macb_main.c 		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
queue            1015 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_rx_desc(queue, entry);
queue            1031 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_tail++;
queue            1038 drivers/net/ethernet/cadence/macb_main.c 			queue->stats.rx_dropped++;
queue            1041 drivers/net/ethernet/cadence/macb_main.c 		skb = queue->rx_skbuff[entry];
queue            1046 drivers/net/ethernet/cadence/macb_main.c 			queue->stats.rx_dropped++;
queue            1050 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_skbuff[entry] = NULL;
queue            1067 drivers/net/ethernet/cadence/macb_main.c 		queue->stats.rx_packets++;
queue            1069 drivers/net/ethernet/cadence/macb_main.c 		queue->stats.rx_bytes += skb->len;
queue            1085 drivers/net/ethernet/cadence/macb_main.c 	gem_rx_refill(queue);
queue            1090 drivers/net/ethernet/cadence/macb_main.c static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
queue            1098 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1100 drivers/net/ethernet/cadence/macb_main.c 	desc = macb_rx_desc(queue, last_frag);
queue            1119 drivers/net/ethernet/cadence/macb_main.c 			desc = macb_rx_desc(queue, frag);
queue            1147 drivers/net/ethernet/cadence/macb_main.c 					       macb_rx_buffer(queue, frag),
queue            1150 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_rx_desc(queue, frag);
queue            1172 drivers/net/ethernet/cadence/macb_main.c static inline void macb_init_rx_ring(struct macb_queue *queue)
queue            1174 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1179 drivers/net/ethernet/cadence/macb_main.c 	addr = queue->rx_buffers_dma;
queue            1181 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_rx_desc(queue, i);
queue            1187 drivers/net/ethernet/cadence/macb_main.c 	queue->rx_tail = 0;
queue            1190 drivers/net/ethernet/cadence/macb_main.c static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
queue            1193 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1199 drivers/net/ethernet/cadence/macb_main.c 	for (tail = queue->rx_tail; budget > 0; tail++) {
queue            1200 drivers/net/ethernet/cadence/macb_main.c 		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
queue            1216 drivers/net/ethernet/cadence/macb_main.c 				discard_partial_frame(queue, first_frag, tail);
queue            1228 drivers/net/ethernet/cadence/macb_main.c 			dropped = macb_rx_frame(queue, napi, first_frag, tail);
queue            1252 drivers/net/ethernet/cadence/macb_main.c 		macb_init_rx_ring(queue);
queue            1253 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, RBQP, queue->rx_ring_dma);
queue            1262 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_tail = first_frag;
queue            1264 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_tail = tail;
queue            1271 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
queue            1272 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1282 drivers/net/ethernet/cadence/macb_main.c 	work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
queue            1290 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
queue            1293 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, IER, bp->rx_intr_mask);
queue            1306 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = bp->queues;
queue            1310 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            1311 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, IDR, bp->rx_intr_mask |
queue            1325 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            1326 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue            1329 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, RBQPH,
queue            1330 drivers/net/ethernet/cadence/macb_main.c 				     upper_32_bits(queue->rx_ring_dma));
queue            1332 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
queue            1335 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, TBQPH,
queue            1336 drivers/net/ethernet/cadence/macb_main.c 				     upper_32_bits(queue->tx_ring_dma));
queue            1340 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, IER,
queue            1353 drivers/net/ethernet/cadence/macb_main.c static void macb_tx_restart(struct macb_queue *queue)
queue            1355 drivers/net/ethernet/cadence/macb_main.c 	unsigned int head = queue->tx_head;
queue            1356 drivers/net/ethernet/cadence/macb_main.c 	unsigned int tail = queue->tx_tail;
queue            1357 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1360 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, ISR, MACB_BIT(TXUBR));
queue            1370 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = dev_id;
queue            1371 drivers/net/ethernet/cadence/macb_main.c 	struct macb *bp = queue->bp;
queue            1375 drivers/net/ethernet/cadence/macb_main.c 	status = queue_readl(queue, ISR);
queue            1385 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, IDR, -1);
queue            1387 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, -1);
queue            1392 drivers/net/ethernet/cadence/macb_main.c 			    (unsigned int)(queue - bp->queues),
queue            1402 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, IDR, bp->rx_intr_mask);
queue            1404 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
queue            1406 drivers/net/ethernet/cadence/macb_main.c 			if (napi_schedule_prep(&queue->napi)) {
queue            1408 drivers/net/ethernet/cadence/macb_main.c 				__napi_schedule(&queue->napi);
queue            1413 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
queue            1414 drivers/net/ethernet/cadence/macb_main.c 			schedule_work(&queue->tx_error_task);
queue            1417 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
queue            1423 drivers/net/ethernet/cadence/macb_main.c 			macb_tx_interrupt(queue);
queue            1426 drivers/net/ethernet/cadence/macb_main.c 			macb_tx_restart(queue);
queue            1446 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_BIT(RXUBR));
queue            1457 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
queue            1465 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, ISR, MACB_BIT(HRESP));
queue            1467 drivers/net/ethernet/cadence/macb_main.c 		status = queue_readl(queue, ISR);
queue            1482 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            1487 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue            1488 drivers/net/ethernet/cadence/macb_main.c 		macb_interrupt(dev->irq, queue);
queue            1494 drivers/net/ethernet/cadence/macb_main.c 				struct macb_queue *queue,
queue            1499 drivers/net/ethernet/cadence/macb_main.c 	unsigned int len, entry, i, tx_head = queue->tx_head;
queue            1526 drivers/net/ethernet/cadence/macb_main.c 		tx_skb = &queue->tx_skb[entry];
queue            1557 drivers/net/ethernet/cadence/macb_main.c 			tx_skb = &queue->tx_skb[entry];
queue            1596 drivers/net/ethernet/cadence/macb_main.c 	desc = macb_tx_desc(queue, entry);
queue            1617 drivers/net/ethernet/cadence/macb_main.c 		tx_skb = &queue->tx_skb[entry];
queue            1618 drivers/net/ethernet/cadence/macb_main.c 		desc = macb_tx_desc(queue, entry);
queue            1629 drivers/net/ethernet/cadence/macb_main.c 		if (i == queue->tx_head) {
queue            1648 drivers/net/ethernet/cadence/macb_main.c 	} while (i != queue->tx_head);
queue            1650 drivers/net/ethernet/cadence/macb_main.c 	queue->tx_head = tx_head;
queue            1657 drivers/net/ethernet/cadence/macb_main.c 	for (i = queue->tx_head; i != tx_head; i++) {
queue            1658 drivers/net/ethernet/cadence/macb_main.c 		tx_skb = macb_tx_skb(queue, i);
queue            1780 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = &bp->queues[queue_index];
queue            1843 drivers/net/ethernet/cadence/macb_main.c 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
queue            1848 drivers/net/ethernet/cadence/macb_main.c 			   queue->tx_head, queue->tx_tail);
queue            1853 drivers/net/ethernet/cadence/macb_main.c 	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
queue            1864 drivers/net/ethernet/cadence/macb_main.c 	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
queue            1897 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            1902 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            1903 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->rx_skbuff)
queue            1907 drivers/net/ethernet/cadence/macb_main.c 			skb = queue->rx_skbuff[i];
queue            1912 drivers/net/ethernet/cadence/macb_main.c 			desc = macb_rx_desc(queue, i);
queue            1921 drivers/net/ethernet/cadence/macb_main.c 		kfree(queue->rx_skbuff);
queue            1922 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_skbuff = NULL;
queue            1928 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = &bp->queues[0];
queue            1930 drivers/net/ethernet/cadence/macb_main.c 	if (queue->rx_buffers) {
queue            1933 drivers/net/ethernet/cadence/macb_main.c 				  queue->rx_buffers, queue->rx_buffers_dma);
queue            1934 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_buffers = NULL;
queue            1940 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            1946 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            1947 drivers/net/ethernet/cadence/macb_main.c 		kfree(queue->tx_skb);
queue            1948 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_skb = NULL;
queue            1949 drivers/net/ethernet/cadence/macb_main.c 		if (queue->tx_ring) {
queue            1952 drivers/net/ethernet/cadence/macb_main.c 					  queue->tx_ring, queue->tx_ring_dma);
queue            1953 drivers/net/ethernet/cadence/macb_main.c 			queue->tx_ring = NULL;
queue            1955 drivers/net/ethernet/cadence/macb_main.c 		if (queue->rx_ring) {
queue            1958 drivers/net/ethernet/cadence/macb_main.c 					  queue->rx_ring, queue->rx_ring_dma);
queue            1959 drivers/net/ethernet/cadence/macb_main.c 			queue->rx_ring = NULL;
queue            1966 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            1970 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            1972 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
queue            1973 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->rx_skbuff)
queue            1978 drivers/net/ethernet/cadence/macb_main.c 				   bp->rx_ring_size, queue->rx_skbuff);
queue            1985 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = &bp->queues[0];
queue            1989 drivers/net/ethernet/cadence/macb_main.c 	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
queue            1990 drivers/net/ethernet/cadence/macb_main.c 					    &queue->rx_buffers_dma, GFP_KERNEL);
queue            1991 drivers/net/ethernet/cadence/macb_main.c 	if (!queue->rx_buffers)
queue            1996 drivers/net/ethernet/cadence/macb_main.c 		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
queue            2002 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2006 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            2008 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
queue            2009 drivers/net/ethernet/cadence/macb_main.c 						    &queue->tx_ring_dma,
queue            2011 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->tx_ring)
queue            2015 drivers/net/ethernet/cadence/macb_main.c 			   q, size, (unsigned long)queue->tx_ring_dma,
queue            2016 drivers/net/ethernet/cadence/macb_main.c 			   queue->tx_ring);
queue            2019 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
queue            2020 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->tx_skb)
queue            2024 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
queue            2025 drivers/net/ethernet/cadence/macb_main.c 						 &queue->rx_ring_dma, GFP_KERNEL);
queue            2026 drivers/net/ethernet/cadence/macb_main.c 		if (!queue->rx_ring)
queue            2030 drivers/net/ethernet/cadence/macb_main.c 			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
queue            2044 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2049 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            2051 drivers/net/ethernet/cadence/macb_main.c 			desc = macb_tx_desc(queue, i);
queue            2056 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_head = 0;
queue            2057 drivers/net/ethernet/cadence/macb_main.c 		queue->tx_tail = 0;
queue            2059 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_tail = 0;
queue            2060 drivers/net/ethernet/cadence/macb_main.c 		queue->rx_prepared_head = 0;
queue            2062 drivers/net/ethernet/cadence/macb_main.c 		gem_rx_refill(queue);
queue            2086 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2105 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            2106 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, IDR, -1);
queue            2107 drivers/net/ethernet/cadence/macb_main.c 		queue_readl(queue, ISR);
queue            2109 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, ISR, -1);
queue            2184 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2192 drivers/net/ethernet/cadence/macb_main.c 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            2194 drivers/net/ethernet/cadence/macb_main.c 				queue_writel(queue, RBQS, buffer_size);
queue            2230 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2267 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            2268 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue            2271 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
queue            2273 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
queue            2276 drivers/net/ethernet/cadence/macb_main.c 			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
queue            2280 drivers/net/ethernet/cadence/macb_main.c 		queue_writel(queue, IER,
queue            2413 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2442 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue            2443 drivers/net/ethernet/cadence/macb_main.c 		napi_enable(&queue->napi);
queue            2467 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2473 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue            2474 drivers/net/ethernet/cadence/macb_main.c 		napi_disable(&queue->napi);
queue            2506 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2528 drivers/net/ethernet/cadence/macb_main.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
queue            2529 drivers/net/ethernet/cadence/macb_main.c 		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
queue            2598 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            2608 drivers/net/ethernet/cadence/macb_main.c 		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue            3478 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue;
queue            3493 drivers/net/ethernet/cadence/macb_main.c 		queue = &bp->queues[q];
queue            3494 drivers/net/ethernet/cadence/macb_main.c 		queue->bp = bp;
queue            3495 drivers/net/ethernet/cadence/macb_main.c 		netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
queue            3497 drivers/net/ethernet/cadence/macb_main.c 			queue->ISR  = GEM_ISR(hw_q - 1);
queue            3498 drivers/net/ethernet/cadence/macb_main.c 			queue->IER  = GEM_IER(hw_q - 1);
queue            3499 drivers/net/ethernet/cadence/macb_main.c 			queue->IDR  = GEM_IDR(hw_q - 1);
queue            3500 drivers/net/ethernet/cadence/macb_main.c 			queue->IMR  = GEM_IMR(hw_q - 1);
queue            3501 drivers/net/ethernet/cadence/macb_main.c 			queue->TBQP = GEM_TBQP(hw_q - 1);
queue            3502 drivers/net/ethernet/cadence/macb_main.c 			queue->RBQP = GEM_RBQP(hw_q - 1);
queue            3503 drivers/net/ethernet/cadence/macb_main.c 			queue->RBQS = GEM_RBQS(hw_q - 1);
queue            3506 drivers/net/ethernet/cadence/macb_main.c 				queue->TBQPH = GEM_TBQPH(hw_q - 1);
queue            3507 drivers/net/ethernet/cadence/macb_main.c 				queue->RBQPH = GEM_RBQPH(hw_q - 1);
queue            3512 drivers/net/ethernet/cadence/macb_main.c 			queue->ISR  = MACB_ISR;
queue            3513 drivers/net/ethernet/cadence/macb_main.c 			queue->IER  = MACB_IER;
queue            3514 drivers/net/ethernet/cadence/macb_main.c 			queue->IDR  = MACB_IDR;
queue            3515 drivers/net/ethernet/cadence/macb_main.c 			queue->IMR  = MACB_IMR;
queue            3516 drivers/net/ethernet/cadence/macb_main.c 			queue->TBQP = MACB_TBQP;
queue            3517 drivers/net/ethernet/cadence/macb_main.c 			queue->RBQP = MACB_RBQP;
queue            3520 drivers/net/ethernet/cadence/macb_main.c 				queue->TBQPH = MACB_TBQPH;
queue            3521 drivers/net/ethernet/cadence/macb_main.c 				queue->RBQPH = MACB_RBQPH;
queue            3531 drivers/net/ethernet/cadence/macb_main.c 		queue->irq = platform_get_irq(pdev, q);
queue            3532 drivers/net/ethernet/cadence/macb_main.c 		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
queue            3533 drivers/net/ethernet/cadence/macb_main.c 				       IRQF_SHARED, dev->name, queue);
queue            3537 drivers/net/ethernet/cadence/macb_main.c 				queue->irq, err);
queue            3541 drivers/net/ethernet/cadence/macb_main.c 		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
queue            4422 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = bp->queues;
queue            4437 drivers/net/ethernet/cadence/macb_main.c 		for (q = 0, queue = bp->queues; q < bp->num_queues;
queue            4438 drivers/net/ethernet/cadence/macb_main.c 		     ++q, ++queue)
queue            4439 drivers/net/ethernet/cadence/macb_main.c 			napi_disable(&queue->napi);
queue            4465 drivers/net/ethernet/cadence/macb_main.c 	struct macb_queue *queue = bp->queues;
queue            4486 drivers/net/ethernet/cadence/macb_main.c 		for (q = 0, queue = bp->queues; q < bp->num_queues;
queue            4487 drivers/net/ethernet/cadence/macb_main.c 		     ++q, ++queue)
queue            4488 drivers/net/ethernet/cadence/macb_main.c 			napi_enable(&queue->napi);
queue             296 drivers/net/ethernet/cadence/macb_ptp.c int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
queue             299 drivers/net/ethernet/cadence/macb_ptp.c 	unsigned long tail = READ_ONCE(queue->tx_ts_tail);
queue             300 drivers/net/ethernet/cadence/macb_ptp.c 	unsigned long head = queue->tx_ts_head;
queue             311 drivers/net/ethernet/cadence/macb_ptp.c 	desc_ptp = macb_ptp_desc(queue->bp, desc);
queue             312 drivers/net/ethernet/cadence/macb_ptp.c 	tx_timestamp = &queue->tx_timestamps[head];
queue             319 drivers/net/ethernet/cadence/macb_ptp.c 	smp_store_release(&queue->tx_ts_head,
queue             322 drivers/net/ethernet/cadence/macb_ptp.c 	schedule_work(&queue->tx_ts_task);
queue             328 drivers/net/ethernet/cadence/macb_ptp.c 	struct macb_queue *queue =
queue             334 drivers/net/ethernet/cadence/macb_ptp.c 	head = smp_load_acquire(&queue->tx_ts_head);
queue             335 drivers/net/ethernet/cadence/macb_ptp.c 	tail = queue->tx_ts_tail;
queue             338 drivers/net/ethernet/cadence/macb_ptp.c 		tx_ts = &queue->tx_timestamps[tail];
queue             339 drivers/net/ethernet/cadence/macb_ptp.c 		gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
queue             343 drivers/net/ethernet/cadence/macb_ptp.c 		smp_store_release(&queue->tx_ts_tail,
queue             345 drivers/net/ethernet/cadence/macb_ptp.c 		tail = queue->tx_ts_tail;
queue             352 drivers/net/ethernet/cadence/macb_ptp.c 	struct macb_queue *queue;
queue             373 drivers/net/ethernet/cadence/macb_ptp.c 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue             374 drivers/net/ethernet/cadence/macb_ptp.c 		queue->tx_ts_head = 0;
queue             375 drivers/net/ethernet/cadence/macb_ptp.c 		queue->tx_ts_tail = 0;
queue             376 drivers/net/ethernet/cadence/macb_ptp.c 		INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
queue            1904 drivers/net/ethernet/chelsio/cxgb3/sge.c 		struct sk_buff_head queue;
queue            1908 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__skb_queue_head_init(&queue);
queue            1909 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_queue_splice_init(&q->rx_queue, &queue);
queue            1910 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (skb_queue_empty(&queue)) {
queue            1918 drivers/net/ethernet/chelsio/cxgb3/sge.c 		skb_queue_walk_safe(&queue, skb, tmp) {
queue            1923 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__skb_unlink(skb, &queue);
queue            1933 drivers/net/ethernet/chelsio/cxgb3/sge.c 		if (!skb_queue_empty(&queue)) {
queue            1936 drivers/net/ethernet/chelsio/cxgb3/sge.c 			skb_queue_splice(&queue, &q->rx_queue);
queue            1100 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h 	s8   queue;    /* queue index */
queue            1527 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			unsigned int queue)
queue            1547 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	chan = rxq_to_chan(&adap->sge, queue);
queue            1550 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
queue            1568 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			 unsigned int queue)
queue            1590 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	chan = rxq_to_chan(&adap->sge, queue);
queue            1593 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
queue            1600 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 			unsigned int queue, bool ipv6)
queue            1617 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 				LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
queue            2478 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		unsigned int queue, unsigned char port, unsigned char mask)
queue            2527 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	f->fs.iq = queue;
queue            2547 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		unsigned int queue, bool ipv6)
queue            3091 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	qe.queue = index;
queue            3126 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 	qe.queue = index;
queue             190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			unsigned int queue);
queue             193 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			 unsigned int queue);
queue             195 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			unsigned int queue, bool ipv6);
queue             198 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			       unsigned int queue,
queue             201 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 			       unsigned int queue, bool ipv6);
queue             229 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
queue             231 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	skb_set_queue_mapping(skb, (queue << 1) | prio);
queue             153 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (p->queue < 0 || p->queue >= pi->nqsets)
queue             156 drivers/net/ethernet/chelsio/cxgb4/sched.c 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
queue             194 drivers/net/ethernet/chelsio/cxgb4/sched.c 	if (p->queue < 0 || p->queue >= pi->nqsets)
queue             201 drivers/net/ethernet/chelsio/cxgb4/sched.c 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
queue             836 drivers/net/ethernet/dec/tulip/de4x5.c 	struct sk_buff_head queue;          /* Save the (re-ordered) skb's  */
queue            1142 drivers/net/ethernet/dec/tulip/de4x5.c 	skb_queue_head_init(&lp->cache.queue);
queue            1492 drivers/net/ethernet/dec/tulip/de4x5.c 	if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
queue            1587 drivers/net/ethernet/dec/tulip/de4x5.c 	while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
queue            3669 drivers/net/ethernet/dec/tulip/de4x5.c     __skb_queue_purge(&lp->cache.queue);
queue            3762 drivers/net/ethernet/dec/tulip/de4x5.c     __skb_queue_tail(&lp->cache.queue, skb);
queue            3770 drivers/net/ethernet/dec/tulip/de4x5.c     __skb_queue_head(&lp->cache.queue, skb);
queue            3778 drivers/net/ethernet/dec/tulip/de4x5.c     return __skb_dequeue(&lp->cache.queue);
queue            2022 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			    int queue,
queue            2028 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	egress_fq = priv->egress_fqs[queue];
queue            2030 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
queue            2556 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct dpni_queue queue;
queue            2573 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 					     &queue, &qid);
queue            2684 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct dpni_queue queue;
queue            2689 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
queue            2697 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.id = fq->channel->dpcon_id;
queue            2698 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.type = DPNI_DEST_DPCON;
queue            2699 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.priority = 1;
queue            2700 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.user_context = (u64)(uintptr_t)fq;
queue            2704 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			     &queue);
queue            2732 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	struct dpni_queue queue;
queue            2739 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 				     &queue, &qid);
queue            2752 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			     &queue, &qid);
queue            2760 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.id = fq->channel->dpcon_id;
queue            2761 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.type = DPNI_DEST_DPCON;
queue            2762 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.destination.priority = 0;
queue            2763 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	queue.user_context = (u64)(uintptr_t)fq;
queue            2767 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			     &queue);
queue            1380 drivers/net/ethernet/freescale/dpaa2/dpni.c 		   const struct dpni_queue *queue)
queue            1394 drivers/net/ethernet/freescale/dpaa2/dpni.c 	cmd_params->dest_id = cpu_to_le32(queue->destination.id);
queue            1395 drivers/net/ethernet/freescale/dpaa2/dpni.c 	cmd_params->dest_prio = queue->destination.priority;
queue            1396 drivers/net/ethernet/freescale/dpaa2/dpni.c 	dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
queue            1397 drivers/net/ethernet/freescale/dpaa2/dpni.c 	dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
queue            1399 drivers/net/ethernet/freescale/dpaa2/dpni.c 		       queue->destination.hold_active);
queue            1400 drivers/net/ethernet/freescale/dpaa2/dpni.c 	cmd_params->flc = cpu_to_le64(queue->flc.value);
queue            1401 drivers/net/ethernet/freescale/dpaa2/dpni.c 	cmd_params->user_context = cpu_to_le64(queue->user_context);
queue            1427 drivers/net/ethernet/freescale/dpaa2/dpni.c 		   struct dpni_queue *queue,
queue            1451 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->destination.id = le32_to_cpu(rsp_params->dest_id);
queue            1452 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->destination.priority = rsp_params->dest_prio;
queue            1453 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->destination.type = dpni_get_field(rsp_params->flags,
queue            1455 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->flc.stash_control = dpni_get_field(rsp_params->flags,
queue            1457 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->destination.hold_active = dpni_get_field(rsp_params->flags,
queue            1459 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->flc.value = le64_to_cpu(rsp_params->flc);
queue            1460 drivers/net/ethernet/freescale/dpaa2/dpni.c 	queue->user_context = le64_to_cpu(rsp_params->user_context);
queue             825 drivers/net/ethernet/freescale/dpaa2/dpni.h 		   const struct dpni_queue *queue);
queue             833 drivers/net/ethernet/freescale/dpaa2/dpni.h 		   struct dpni_queue	*queue,
queue             802 drivers/net/ethernet/freescale/fec_main.c 	unsigned short queue;
queue             807 drivers/net/ethernet/freescale/fec_main.c 	queue = skb_get_queue_mapping(skb);
queue             808 drivers/net/ethernet/freescale/fec_main.c 	txq = fep->tx_queue[queue];
queue             809 drivers/net/ethernet/freescale/fec_main.c 	nq = netdev_get_tx_queue(ndev, queue);
queue            2854 drivers/net/ethernet/freescale/fec_main.c fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
queue            2862 drivers/net/ethernet/freescale/fec_main.c 	rxq = fep->rx_queue[queue];
queue            2896 drivers/net/ethernet/freescale/fec_main.c fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
queue            2903 drivers/net/ethernet/freescale/fec_main.c 	txq = fep->tx_queue[queue];
queue             214 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	int queue = 0;
queue             221 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_pending = ug_info->bdRingLenRx[queue];
queue             222 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
queue             223 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
queue             224 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->tx_pending = ug_info->bdRingLenTx[queue];
queue             233 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	int queue = 0, ret = 0;
queue             254 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ug_info->bdRingLenRx[queue] = ring->rx_pending;
queue             255 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ug_info->bdRingLenTx[queue] = ring->tx_pending;
queue             348 drivers/net/ethernet/hisilicon/hisi_femac.c 				 struct hisi_femac_queue *queue,
queue             351 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
queue             353 drivers/net/ethernet/hisilicon/hisi_femac.c 	if (!queue->skb)
queue             356 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
queue             358 drivers/net/ethernet/hisilicon/hisi_femac.c 	if (!queue->dma_phys)
queue             361 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->num = num;
queue             362 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->head = 0;
queue             363 drivers/net/ethernet/hisilicon/hisi_femac.c 	queue->tail = 0;
queue             486 drivers/net/ethernet/hisilicon/hns/hnae.h 	void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
queue             471 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
queue             649 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	struct hnae_queue *queue;
queue             663 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		queue = handle->qs[idx];
queue             664 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		hns_rcb_update_stats(queue);
queue             666 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		tx_bytes += queue->tx_ring.stats.tx_bytes;
queue             667 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		tx_packets += queue->tx_ring.stats.tx_pkts;
queue             668 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		rx_bytes += queue->rx_ring.stats.rx_bytes;
queue             669 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		rx_packets += queue->rx_ring.stats.rx_pkts;
queue             671 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 		rx_errors += queue->rx_ring.stats.err_pkt_len
queue             672 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 				+ queue->rx_ring.stats.l2_err
queue             673 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 				+ queue->rx_ring.stats.l3l4_csum_err;
queue             817 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_update_stats(struct hnae_queue *queue)
queue             820 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		container_of(queue, struct ring_pair_cb, q);
queue             826 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	hw_stats->rx_pkts += dsaf_read_dev(queue,
queue             828 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
queue             835 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	hw_stats->tx_pkts += dsaf_read_dev(queue,
queue             837 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
queue             850 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
queue             854 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		container_of(queue, struct ring_pair_cb, q);
queue             861 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
queue             863 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[4] = queue->tx_ring.stats.tx_pkts;
queue             864 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[5] = queue->tx_ring.stats.tx_bytes;
queue             865 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
queue             866 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
queue             867 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
queue             868 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
queue             869 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[10] = queue->tx_ring.stats.restart_queue;
queue             870 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[11] = queue->tx_ring.stats.tx_busy;
queue             876 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
queue             878 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[16] = queue->rx_ring.stats.rx_pkts;
queue             879 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[17] = queue->rx_ring.stats.rx_bytes;
queue             880 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
queue             881 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
queue             882 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
queue             883 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
queue             884 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
queue             885 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
queue             886 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
queue             887 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[25] = queue->rx_ring.stats.err_bd_num;
queue             888 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[26] = queue->rx_ring.stats.l2_err;
queue             889 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
queue            1069 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
queue            1073 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		= container_of(queue, struct ring_pair_cb, q);
queue            1077 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
queue            1078 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
queue            1079 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
queue            1080 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
queue            1081 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
queue            1082 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
queue            1083 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
queue            1084 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
queue            1085 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
queue            1087 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
queue            1088 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
queue            1089 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
queue            1090 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
queue            1091 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
queue            1092 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
queue            1093 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
queue            1094 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
queue            1095 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
queue            1096 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
queue            1098 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
queue            1099 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
queue            1100 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
queue            1101 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
queue            1102 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
queue            1103 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
queue            1104 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
queue            1106 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
queue            1107 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
queue            1108 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
queue            1109 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
queue            1110 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
queue            1111 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
queue            1112 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
queue            1113 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
queue             148 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_update_stats(struct hnae_queue *queue);
queue             150 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
queue             158 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
queue             669 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	struct hnae_queue *queue;
queue             672 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	queue = priv->ae_handle->qs[0];
queue             676 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 		ops->get_ring_bdnum_limit(queue, &uplimit);
queue             680 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	param->rx_pending = queue->rx_ring.desc_num;
queue             681 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	param->tx_pending = queue->tx_ring.desc_num;
queue            1083 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
queue            1094 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	if (queue >= queue_num) {
queue            1097 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			   queue, queue_num - 1);
queue            1101 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	tx_vector = priv->ring_data[queue].ring->tqp_vector;
queue            1102 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
queue            1220 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 					u32 queue)
queue            1227 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	tx_vector = priv->ring_data[queue].ring->tqp_vector;
queue            1228 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
queue             541 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hnae3_queue *queue;
queue             547 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		queue = handle->kinfo.tqp[i];
queue             548 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		tqp = container_of(queue, struct hclge_tqp, q);
queue             566 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		queue = handle->kinfo.tqp[i];
queue             567 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		tqp = container_of(queue, struct hclge_tqp, q);
queue            6627 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hnae3_queue *queue;
queue            6633 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		queue = handle->kinfo.tqp[i];
queue            6634 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		tqp = container_of(queue, struct hclge_tqp, q);
queue            8506 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	struct hnae3_queue *queue;
queue            8509 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	queue = handle->kinfo.tqp[queue_id];
queue            8510 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 	tqp = container_of(queue, struct hclge_tqp, q);
queue             943 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h static inline int hclge_get_queue_id(struct hnae3_queue *queue)
queue             945 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
queue              25 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static void *hw_qpageit_get_inc(struct hw_queue *queue)
queue              27 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	void *retvalue = hw_qeit_get(queue);
queue              29 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->current_q_offset += queue->pagesize;
queue              30 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	if (queue->current_q_offset > queue->queue_length) {
queue              31 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		queue->current_q_offset -= queue->pagesize;
queue              40 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
queue              52 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->queue_length = nr_of_pages * pagesize;
queue              53 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
queue              55 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	if (!queue->queue_pages)
queue              69 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
queue              75 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->current_q_offset = 0;
queue              76 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->qe_size = qe_size;
queue              77 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->pagesize = pagesize;
queue              78 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	queue->toggle_state = 1;
queue              83 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		if (!(queue->queue_pages)[i])
queue              85 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		free_page((unsigned long)(queue->queue_pages)[i]);
queue              90 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static void hw_queue_dtor(struct hw_queue *queue)
queue              95 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	if (!queue || !queue->queue_pages)
queue              98 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	pages_per_kpage = PAGE_SIZE / queue->pagesize;
queue             100 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	nr_pages = queue->queue_length / queue->pagesize;
queue             103 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 		free_page((unsigned long)(queue->queue_pages)[i]);
queue             105 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	kfree(queue->queue_pages);
queue             196 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
queue             200 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if (q_offset >= queue->queue_length)
queue             201 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		q_offset -= queue->queue_length;
queue             202 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
queue             206 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_get(struct hw_queue *queue)
queue             208 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	return hw_qeit_calc(queue, queue->current_q_offset);
queue             211 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void hw_qeit_inc(struct hw_queue *queue)
queue             213 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	queue->current_q_offset += queue->qe_size;
queue             214 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if (queue->current_q_offset >= queue->queue_length) {
queue             215 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue->current_q_offset = 0;
queue             217 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue->toggle_state = (~queue->toggle_state) & 1;
queue             221 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_get_inc(struct hw_queue *queue)
queue             223 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	void *retvalue = hw_qeit_get(queue);
queue             224 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	hw_qeit_inc(queue);
queue             228 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
queue             230 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	struct ehea_cqe *retvalue = hw_qeit_get(queue);
queue             234 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if ((valid >> 7) == (queue->toggle_state & 1)) {
queue             236 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		hw_qeit_inc(queue);
queue             237 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		pref = hw_qeit_calc(queue, queue->current_q_offset);
queue             245 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_get_valid(struct hw_queue *queue)
queue             247 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	struct ehea_cqe *retvalue = hw_qeit_get(queue);
queue             251 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	pref = hw_qeit_calc(queue, queue->current_q_offset);
queue             256 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if (!((valid >> 7) == (queue->toggle_state & 1)))
queue             261 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_reset(struct hw_queue *queue)
queue             263 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	queue->current_q_offset = 0;
queue             264 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	return hw_qeit_get(queue);
queue             267 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
queue             269 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
queue             272 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	retvalue = hw_qeit_get(queue);
queue             273 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	queue->current_q_offset += queue->qe_size;
queue             274 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if (queue->current_q_offset > last_entry_in_q) {
queue             275 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue->current_q_offset = 0;
queue             276 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue->toggle_state = (~queue->toggle_state) & 1;
queue             281 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
queue             283 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	void *retvalue = hw_qeit_get(queue);
queue             285 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	if ((qe >> 7) == (queue->toggle_state & 1))
queue             286 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		hw_qeit_eq_get_inc(queue);
queue             295 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	struct hw_queue *queue;
queue             298 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue = &qp->hw_rqueue1;
queue             300 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue = &qp->hw_rqueue2;
queue             302 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 		queue = &qp->hw_rqueue3;
queue             304 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	return hw_qeit_get_inc(queue);
queue             310 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	struct hw_queue *queue = &my_qp->hw_squeue;
queue             313 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
queue             327 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	struct hw_queue *queue = &qp->hw_rqueue1;
queue             329 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
queue             330 drivers/net/ethernet/ibm/ehea/ehea_qmr.h 	return hw_qeit_get_valid(queue);
queue            3321 drivers/net/ethernet/ibm/ibmvnic.c 	struct ibmvnic_crq_queue *queue = &adapter->crq;
queue            3324 drivers/net/ethernet/ibm/ibmvnic.c 	crq = &queue->msgs[queue->cur];
queue            3326 drivers/net/ethernet/ibm/ibmvnic.c 		if (++queue->cur == queue->size)
queue            3327 drivers/net/ethernet/ibm/ibmvnic.c 			queue->cur = 0;
queue            4643 drivers/net/ethernet/ibm/ibmvnic.c 	struct ibmvnic_crq_queue *queue = &adapter->crq;
queue            4648 drivers/net/ethernet/ibm/ibmvnic.c 	spin_lock_irqsave(&queue->lock, flags);
queue            4667 drivers/net/ethernet/ibm/ibmvnic.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue             502 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	u16 vsi, queue, pc, q_idx;
queue             523 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 		for (queue = 0; queue < queue_count; queue++, q_idx++) {
queue             539 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 		for (queue = 0; queue < queue_count; queue++) {
queue            1329 drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h 	__le16	queue;
queue            1056 drivers/net/ethernet/intel/i40e/i40e_common.c void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
queue            1058 drivers/net/ethernet/intel/i40e/i40e_common.c 	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
queue            4341 drivers/net/ethernet/intel/i40e/i40e_common.c 				u16 vsi_seid, u16 queue, bool is_add,
queue            4360 drivers/net/ethernet/intel/i40e/i40e_common.c 		cmd->queue = cpu_to_le16(queue);
queue            2731 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			       int queue)
queue            2743 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if (queue < 0)
queue            2744 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		queue = 0;
queue            2745 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	else if (queue >= vsi->num_queue_pairs)
queue            2748 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	rx_ring = vsi->rx_rings[queue];
queue            2749 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	tx_ring = vsi->tx_rings[queue];
queue            2795 drivers/net/ethernet/intel/i40e/i40e_ethtool.c static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
queue            2798 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	return __i40e_get_coalesce(netdev, ec, queue);
queue            2811 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 				   int queue)
queue            2813 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	struct i40e_ring *rx_ring = vsi->rx_rings[queue];
queue            2814 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	struct i40e_ring *tx_ring = vsi->tx_rings[queue];
queue            2860 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			       int queue)
queue            2871 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if (queue < 0) {
queue            2874 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	} else if (queue < vsi->num_queue_pairs) {
queue            2875 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
queue            2876 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
queue            2936 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if (queue < 0) {
queue            2940 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		i40e_set_itr_per_queue(vsi, ec, queue);
queue            2967 drivers/net/ethernet/intel/i40e/i40e_ethtool.c static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
queue            2970 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	return __i40e_set_coalesce(netdev, ec, queue);
queue            1046 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 						      u16 queue)
queue            1052 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 				     I40E_HMC_LAN_TX, queue);
queue            1066 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 						    u16 queue,
queue            1073 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 				     I40E_HMC_LAN_TX, queue);
queue            1087 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 						      u16 queue)
queue            1093 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 				     I40E_HMC_LAN_RX, queue);
queue            1107 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 						    u16 queue,
queue            1114 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c 				     I40E_HMC_LAN_RX, queue);
queue             148 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h 						      u16 queue);
queue             150 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h 						    u16 queue,
queue             153 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h 						      u16 queue);
queue             155 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h 						    u16 queue,
queue            8693 drivers/net/ethernet/intel/i40e/i40e_main.c 	u32 queue = le32_to_cpu(data->prtdcb_rupto);
queue            8700 drivers/net/ethernet/intel/i40e/i40e_main.c 		queue, qtx_ctl);
queue            10277 drivers/net/ethernet/intel/i40e/i40e_main.c 		u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
queue            10282 drivers/net/ethernet/intel/i40e/i40e_main.c 				 event, queue, pf_num, vf_num);
queue            10292 drivers/net/ethernet/intel/i40e/i40e_main.c 		u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
queue            10297 drivers/net/ethernet/intel/i40e/i40e_main.c 				 event, queue, func);
queue             310 drivers/net/ethernet/intel/i40e/i40e_prototype.h void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
queue             394 drivers/net/ethernet/intel/i40e/i40e_prototype.h 				u16 vsi_seid, u16 queue, bool is_add,
queue             642 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 			       struct ethtool_coalesce *ec, int queue)
queue             654 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	if (queue < 0)
queue             655 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		queue = 0;
queue             656 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	else if (queue >= adapter->num_active_queues)
queue             659 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	rx_ring = &adapter->rx_rings[queue];
queue             660 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	tx_ring = &adapter->tx_rings[queue];
queue             698 drivers/net/ethernet/intel/iavf/iavf_ethtool.c static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
queue             701 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	return __iavf_get_coalesce(netdev, ec, queue);
queue             713 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 				   struct ethtool_coalesce *ec, int queue)
queue             715 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
queue             716 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
queue             751 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 			       struct ethtool_coalesce *ec, int queue)
queue             779 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	if (queue < 0) {
queue             782 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	} else if (queue < adapter->num_active_queues) {
queue             783 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		iavf_set_itr_per_queue(adapter, ec, queue);
queue             814 drivers/net/ethernet/intel/iavf/iavf_ethtool.c static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
queue             817 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	return __iavf_set_coalesce(netdev, ec, queue);
queue            1242 drivers/net/ethernet/intel/ice/ice_main.c 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
queue            1247 drivers/net/ethernet/intel/ice/ice_main.c 				 event, queue, pf_num, vf_num);
queue            1260 drivers/net/ethernet/intel/ice/ice_main.c 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
queue            1265 drivers/net/ethernet/intel/ice/ice_main.c 				 event, queue, pf_num, vf_num);
queue            1278 drivers/net/ethernet/intel/ice/ice_main.c 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
queue            1283 drivers/net/ethernet/intel/ice/ice_main.c 				 event, queue, pf_num, vf_num);
queue             456 drivers/net/ethernet/intel/igb/igb.h 	u8 queue;
queue             731 drivers/net/ethernet/intel/igb/igb.h 				const u8 *addr, u8 queue, u8 flags);
queue             733 drivers/net/ethernet/intel/igb/igb.h 				const u8 *addr, u8 queue, u8 flags);
queue            1619 drivers/net/ethernet/intel/igb/igb_main.c static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
queue            1625 drivers/net/ethernet/intel/igb/igb_main.c 	WARN_ON(queue < 0 || queue > 4);
queue            1627 drivers/net/ethernet/intel/igb/igb_main.c 	val = rd32(E1000_I210_TXDCTL(queue));
queue            1634 drivers/net/ethernet/intel/igb/igb_main.c 	wr32(E1000_I210_TXDCTL(queue), val);
queue            1637 drivers/net/ethernet/intel/igb/igb_main.c static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
queue            1642 drivers/net/ethernet/intel/igb/igb_main.c 	WARN_ON(queue < 0 || queue > 1);
queue            1644 drivers/net/ethernet/intel/igb/igb_main.c 	val = rd32(E1000_I210_TQAVCC(queue));
queue            1651 drivers/net/ethernet/intel/igb/igb_main.c 	wr32(E1000_I210_TQAVCC(queue), val);
queue            1688 drivers/net/ethernet/intel/igb/igb_main.c static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
queue            1690 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring = adapter->tx_ring[queue];
queue            1697 drivers/net/ethernet/intel/igb/igb_main.c 	WARN_ON(queue < 0 || queue > 1);
queue            1704 drivers/net/ethernet/intel/igb/igb_main.c 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
queue            1705 drivers/net/ethernet/intel/igb/igb_main.c 		set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
queue            1707 drivers/net/ethernet/intel/igb/igb_main.c 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
queue            1708 drivers/net/ethernet/intel/igb/igb_main.c 		set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
queue            1712 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring->cbs_enable || queue == 0) {
queue            1722 drivers/net/ethernet/intel/igb/igb_main.c 		if (queue == 0 && !ring->cbs_enable) {
queue            1795 drivers/net/ethernet/intel/igb/igb_main.c 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
queue            1798 drivers/net/ethernet/intel/igb/igb_main.c 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
queue            1800 drivers/net/ethernet/intel/igb/igb_main.c 		wr32(E1000_I210_TQAVHC(queue),
queue            1805 drivers/net/ethernet/intel/igb/igb_main.c 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
queue            1807 drivers/net/ethernet/intel/igb/igb_main.c 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
queue            1810 drivers/net/ethernet/intel/igb/igb_main.c 		wr32(E1000_I210_TQAVHC(queue), 0);
queue            1858 drivers/net/ethernet/intel/igb/igb_main.c 		   queue,
queue            1863 drivers/net/ethernet/intel/igb/igb_main.c static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
queue            1868 drivers/net/ethernet/intel/igb/igb_main.c 	if (queue < 0 || queue > adapter->num_tx_queues)
queue            1871 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
queue            1877 drivers/net/ethernet/intel/igb/igb_main.c static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
queue            1883 drivers/net/ethernet/intel/igb/igb_main.c 	if (queue < 0 || queue > adapter->num_tx_queues)
queue            1886 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
queue            2541 drivers/net/ethernet/intel/igb/igb_main.c static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
queue            2548 drivers/net/ethernet/intel/igb/igb_main.c 	igb_config_tx_modes(adapter, queue);
queue            2565 drivers/net/ethernet/intel/igb/igb_main.c 	if (qopt->queue < 0 || qopt->queue > 1)
queue            2568 drivers/net/ethernet/intel/igb/igb_main.c 	err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
queue            2574 drivers/net/ethernet/intel/igb/igb_main.c 	igb_offload_apply(adapter, qopt->queue);
queue            2799 drivers/net/ethernet/intel/igb/igb_main.c 	if (qopt->queue < 0 || qopt->queue > 1)
queue            2802 drivers/net/ethernet/intel/igb/igb_main.c 	err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
queue            2806 drivers/net/ethernet/intel/igb/igb_main.c 	igb_offload_apply(adapter, qopt->queue);
queue            7143 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->mac_table[i].queue = 0;
queue            7148 drivers/net/ethernet/intel/igb/igb_main.c static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
queue            7163 drivers/net/ethernet/intel/igb/igb_main.c 		    (adapter->mac_table[i].queue != queue))
queue            7178 drivers/net/ethernet/intel/igb/igb_main.c 	mac_table->queue = adapter->vfs_allocated_count;
queue            7211 drivers/net/ethernet/intel/igb/igb_main.c 				    const u8 *addr, const u8 queue,
queue            7232 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->mac_table[i].queue = queue;
queue            7243 drivers/net/ethernet/intel/igb/igb_main.c 			      const u8 queue)
queue            7245 drivers/net/ethernet/intel/igb/igb_main.c 	return igb_add_mac_filter_flags(adapter, addr, queue, 0);
queue            7255 drivers/net/ethernet/intel/igb/igb_main.c 				    const u8 *addr, const u8 queue,
queue            7275 drivers/net/ethernet/intel/igb/igb_main.c 		if (adapter->mac_table[i].queue != queue)
queue            7286 drivers/net/ethernet/intel/igb/igb_main.c 			adapter->mac_table[i].queue =
queue            7290 drivers/net/ethernet/intel/igb/igb_main.c 			adapter->mac_table[i].queue = 0;
queue            7302 drivers/net/ethernet/intel/igb/igb_main.c 			      const u8 queue)
queue            7304 drivers/net/ethernet/intel/igb/igb_main.c 	return igb_del_mac_filter_flags(adapter, addr, queue, 0);
queue            7308 drivers/net/ethernet/intel/igb/igb_main.c 				const u8 *addr, u8 queue, u8 flags)
queue            7318 drivers/net/ethernet/intel/igb/igb_main.c 	return igb_add_mac_filter_flags(adapter, addr, queue,
queue            7323 drivers/net/ethernet/intel/igb/igb_main.c 				const u8 *addr, u8 queue, u8 flags)
queue            7325 drivers/net/ethernet/intel/igb/igb_main.c 	return igb_del_mac_filter_flags(adapter, addr, queue,
queue            9113 drivers/net/ethernet/intel/igb/igb_main.c 				    adapter->mac_table[index].queue;
queue            9117 drivers/net/ethernet/intel/igb/igb_main.c 				    adapter->mac_table[index].queue;
queue            9140 drivers/net/ethernet/intel/igb/igb_main.c 	adapter->mac_table[rar_entry].queue = vf;
queue              37 drivers/net/ethernet/intel/igc/igc.h 				const u8 *addr, u8 queue, u8 flags);
queue              39 drivers/net/ethernet/intel/igc/igc.h 				const u8 *addr, u8 queue, u8 flags);
queue             338 drivers/net/ethernet/intel/igc/igc.h 	u8 queue;
queue            2380 drivers/net/ethernet/intel/igc/igc_main.c 			adapter->mac_table[index].queue;
queue            2427 drivers/net/ethernet/intel/igc/igc_main.c 				    const u8 *addr, const u8 queue,
queue            2447 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->mac_table[i].queue = queue;
queue            2458 drivers/net/ethernet/intel/igc/igc_main.c 				const u8 *addr, u8 queue, u8 flags)
queue            2460 drivers/net/ethernet/intel/igc/igc_main.c 	return igc_add_mac_filter_flags(adapter, addr, queue,
queue            2471 drivers/net/ethernet/intel/igc/igc_main.c 				    const u8 *addr, const u8 queue,
queue            2490 drivers/net/ethernet/intel/igc/igc_main.c 		if (adapter->mac_table[i].queue != queue)
queue            2503 drivers/net/ethernet/intel/igc/igc_main.c 			adapter->mac_table[i].queue = 0;
queue            2515 drivers/net/ethernet/intel/igc/igc_main.c 				const u8 *addr, u8 queue, u8 flags)
queue            2517 drivers/net/ethernet/intel/igc/igc_main.c 	return igc_del_mac_filter_flags(adapter, addr, queue,
queue             878 drivers/net/ethernet/intel/ixgbe/ixgbe.h 			 const u8 *addr, u16 queue);
queue             880 drivers/net/ethernet/intel/ixgbe/ixgbe.h 			 const u8 *addr, u16 queue);
queue             897 drivers/net/ethernet/intel/ixgbe/ixgbe.h 					  u8 queue);
queue             902 drivers/net/ethernet/intel/ixgbe/ixgbe.h 					  u16 soft_id, u8 queue);
queue            1366 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 					  u8 queue)
queue            1397 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
queue            1409 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
queue            1632 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 					  u16 soft_id, u8 queue)
queue            1677 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 	if (queue == IXGBE_FDIR_DROP_QUEUE)
queue            1680 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
queue            2682 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	u8 queue;
queue            2692 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		queue = IXGBE_FDIR_DROP_QUEUE;
queue            2706 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			queue = adapter->rx_ring[ring]->reg_idx;
queue            2708 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			queue = ((vf - 1) *
queue            2788 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 				&input->filter, input->sw_idx, queue);
queue             857 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			   u8 queue, u8 msix_vector)
queue             866 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		index = (((direction * 64) + queue) >> 2) & 0x1F;
queue             868 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ivar &= ~(0xFF << (8 * (queue & 0x3)));
queue             869 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ivar |= (msix_vector << (8 * (queue & 0x3)));
queue             880 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			index = ((queue & 1) * 8);
queue             889 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			index = ((16 * (queue & 1)) + (8 * direction));
queue             890 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
queue             893 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
queue            5242 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 queue;
queue            5252 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			queue = IXGBE_FDIR_DROP_QUEUE;
queue            5271 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				queue = adapter->rx_ring[ring]->reg_idx;
queue            5273 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				queue = ((vf - 1) *
queue            5278 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				&filter->filter, filter->sw_idx, queue);
queue            9256 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 queue;
queue            9268 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
queue            9269 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			data->action = data->queue;
queue            9278 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				  u8 *queue, u64 *action)
queue            9289 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			*queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
queue            9300 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	data.queue = 0;
queue            9304 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		*queue = data.queue;
queue            9313 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			    struct tcf_exts *exts, u64 *action, u8 *queue)
queue            9325 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			*queue = IXGBE_FDIR_DROP_QUEUE;
queue            9336 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						      queue, action);
queue            9346 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			    struct tcf_exts *exts, u64 *action, u8 *queue)
queue            9415 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 queue;
queue            9558 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			       &queue);
queue            9578 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						    input->sw_idx, queue);
queue             702 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	u32 queue;
queue             746 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	for (queue = 0; queue < q_per_pool; queue++) {
queue             747 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 		unsigned int reg_idx = (vf * q_per_pool) + queue;
queue            1520 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	u16 queue, queues_per_pool;
queue            1559 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	for (queue = 0; queue < queues_per_pool; queue++) {
queue            1560 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 		unsigned int reg_idx = (vf * queues_per_pool) + queue;
queue              22 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
queue              23 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
queue             170 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			     u8 queue, u8 msix_vector)
queue             185 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		index = ((16 * (queue & 1)) + (8 * direction));
queue             186 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
queue             189 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
queue             457 drivers/net/ethernet/lantiq_etop.c 	int queue = skb_get_queue_mapping(skb);
queue             458 drivers/net/ethernet/lantiq_etop.c 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
queue             460 drivers/net/ethernet/lantiq_etop.c 	struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
queue             999 drivers/net/ethernet/marvell/mv643xx_eth.c 	int length, queue, ret;
queue            1003 drivers/net/ethernet/marvell/mv643xx_eth.c 	queue = skb_get_queue_mapping(skb);
queue            1004 drivers/net/ethernet/marvell/mv643xx_eth.c 	txq = mp->txq + queue;
queue            1005 drivers/net/ethernet/marvell/mv643xx_eth.c 	nq = netdev_get_tx_queue(dev, queue);
queue            2254 drivers/net/ethernet/marvell/mv643xx_eth.c 		int queue;
queue            2274 drivers/net/ethernet/marvell/mv643xx_eth.c 		queue = fls(queue_mask) - 1;
queue            2275 drivers/net/ethernet/marvell/mv643xx_eth.c 		queue_mask = 1 << queue;
queue            2282 drivers/net/ethernet/marvell/mv643xx_eth.c 			txq_kick(mp->txq + queue);
queue            2284 drivers/net/ethernet/marvell/mv643xx_eth.c 			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
queue            2285 drivers/net/ethernet/marvell/mv643xx_eth.c 			txq_maybe_wake(mp->txq + queue);
queue            2287 drivers/net/ethernet/marvell/mv643xx_eth.c 			work_done += rxq_process(mp->rxq + queue, work_tbd);
queue            2289 drivers/net/ethernet/marvell/mv643xx_eth.c 			work_done += rxq_refill(mp->rxq + queue, work_tbd);
queue            1149 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            1154 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            1155 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
queue            1157 drivers/net/ethernet/marvell/mvneta.c 			q_map |= (1 << queue);
queue            1163 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            1164 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
queue            1167 drivers/net/ethernet/marvell/mvneta.c 			q_map |= (1 << queue);
queue            1270 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
queue            1275 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            1278 drivers/net/ethernet/marvell/mvneta.c 		val = 0x1 | (queue << 1);
queue            1287 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
queue            1292 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            1295 drivers/net/ethernet/marvell/mvneta.c 		val = 0x1 | (queue << 1);
queue            1305 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
queue            1310 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            1315 drivers/net/ethernet/marvell/mvneta.c 		val = 0x1 | (queue << 1);
queue            1372 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            1425 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            1426 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
queue            1427 drivers/net/ethernet/marvell/mvneta.c 		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
queue            1492 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            1514 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            1515 drivers/net/ethernet/marvell/mvneta.c 		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
queue            1522 drivers/net/ethernet/marvell/mvneta.c 			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
queue            1529 drivers/net/ethernet/marvell/mvneta.c 				  int queue)
queue            1546 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            1551 drivers/net/ethernet/marvell/mvneta.c 		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
queue            1559 drivers/net/ethernet/marvell/mvneta.c 				int queue)
queue            1564 drivers/net/ethernet/marvell/mvneta.c 	if (queue != -1) {
queue            1574 drivers/net/ethernet/marvell/mvneta.c 	mvneta_set_ucast_addr(pp, addr[5], queue);
queue            1761 drivers/net/ethernet/marvell/mvneta.c 	int queue = fls(cause) - 1;
queue            1763 drivers/net/ethernet/marvell/mvneta.c 	return &pp->txqs[queue];
queue            2574 drivers/net/ethernet/marvell/mvneta.c 					  int queue)
queue            2588 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1)
queue            2592 drivers/net/ethernet/marvell/mvneta.c 		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
queue            2609 drivers/net/ethernet/marvell/mvneta.c 					int queue)
queue            2620 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            2625 drivers/net/ethernet/marvell/mvneta.c 		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
queue            2641 drivers/net/ethernet/marvell/mvneta.c 				 int queue)
queue            2646 drivers/net/ethernet/marvell/mvneta.c 		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
queue            2651 drivers/net/ethernet/marvell/mvneta.c 	if (queue == -1) {
queue            2668 drivers/net/ethernet/marvell/mvneta.c 	mvneta_set_other_mcast_addr(pp, crc_result, queue);
queue            2875 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            2878 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++)
queue            2879 drivers/net/ethernet/marvell/mvneta.c 		mvneta_txq_done_force(pp, &pp->txqs[queue]);
queue            3114 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            3116 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++)
queue            3117 drivers/net/ethernet/marvell/mvneta.c 		mvneta_txq_deinit(pp, &pp->txqs[queue]);
queue            3123 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            3125 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++)
queue            3126 drivers/net/ethernet/marvell/mvneta.c 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
queue            3133 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            3135 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            3136 drivers/net/ethernet/marvell/mvneta.c 		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
queue            3140 drivers/net/ethernet/marvell/mvneta.c 				   __func__, queue);
queue            3152 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            3154 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            3155 drivers/net/ethernet/marvell/mvneta.c 		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
queue            3158 drivers/net/ethernet/marvell/mvneta.c 				   __func__, queue);
queue            3983 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            3985 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            3986 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
queue            3993 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            3994 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
queue            4375 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            4388 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            4389 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
queue            4390 drivers/net/ethernet/marvell/mvneta.c 		txq->id = queue;
queue            4400 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            4401 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
queue            4402 drivers/net/ethernet/marvell/mvneta.c 		rxq->id = queue;
queue            4759 drivers/net/ethernet/marvell/mvneta.c 	int queue;
queue            4781 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            4782 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
queue            4787 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            4788 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
queue            4806 drivers/net/ethernet/marvell/mvneta.c 	int err, queue;
queue            4832 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < rxq_number; queue++) {
queue            4833 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
queue            4839 drivers/net/ethernet/marvell/mvneta.c 	for (queue = 0; queue < txq_number; queue++) {
queue            4840 drivers/net/ethernet/marvell/mvneta.c 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
queue            1124 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 		if (act->queue.ctx)
queue            1139 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 		if (act->queue.ctx) {
queue            1141 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 			ctx = mvpp22_rss_ctx(port, act->queue.ctx);
queue            1148 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 			qh = ((act->queue.index + port->first_rxq) >> 3) &
queue            1150 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 			ql = (act->queue.index + port->first_rxq) &
queue            1316 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c 	if (act->queue.ctx && act->queue.index)
queue            1696 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int tx_port_num, val, queue, lrxq;
queue            1717 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
queue            1719 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
queue            1741 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		queue = port->rxqs[lrxq]->id;
queue            1742 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
queue            1745 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
queue            1756 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int lrxq, queue;
queue            1759 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		queue = port->rxqs[lrxq]->id;
queue            1760 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
queue            1762 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
queue            1769 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int lrxq, queue;
queue            1772 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		queue = port->rxqs[lrxq]->id;
queue            1773 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
queue            1775 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
queue            1785 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue;
queue            1790 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            1791 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_tx_queue *txq = port->txqs[queue];
queue            1794 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			qmap |= (1 << queue);
queue            2078 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue;
queue            2084 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            2085 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		int id = port->txqs[queue]->id;
queue            2253 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue = fls(cause) - 1;
queue            2255 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	return port->rxqs[queue];
queue            2261 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue = fls(cause) - 1;
queue            2263 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	return port->txqs[queue];
queue            2638 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue;
queue            2647 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            2648 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		txq = port->txqs[queue];
queue            2662 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue;
queue            2664 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->nrxqs; queue++)
queue            2665 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
queue            2671 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue, err;
queue            2673 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->nrxqs; queue++) {
queue            2674 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
queue            2689 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue, err, cpu;
queue            2691 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            2692 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		txq = port->txqs[queue];
queue            2698 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		cpu = queue % num_present_cpus();
queue            2699 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
queue            2704 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		for (queue = 0; queue < port->ntxqs; queue++) {
queue            2705 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			txq = port->txqs[queue];
queue            4043 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue;
queue            4045 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->nrxqs; queue++) {
queue            4046 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
queue            4059 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            4060 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_tx_queue *txq = port->txqs[queue];
queue            4546 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	int queue, err;
queue            4570 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            4571 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
queue            4587 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		txq->log_id = queue;
queue            4594 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		port->txqs[queue] = txq;
queue            4605 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->nrxqs; queue++) {
queue            4615 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->id = port->first_rxq + queue;
queue            4617 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		rxq->logic_rxq = queue;
queue            4619 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		port->rxqs[queue] = rxq;
queue            4625 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->nrxqs; queue++) {
queue            4626 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
queue            4661 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	for (queue = 0; queue < port->ntxqs; queue++) {
queue            4662 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		if (!port->txqs[queue])
queue            4664 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		free_percpu(port->txqs[queue]->pcpu);
queue             538 drivers/net/ethernet/marvell/skge.h #define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
queue             824 drivers/net/ethernet/marvell/sky2.h #define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
queue            2559 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
queue            2564 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
queue            2638 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	u16 pseudo_hdr_offset, cksum_offset, queue;
queue            2642 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	queue = skb_get_queue_mapping(skb);
queue            2643 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ss = &mgp->ss[queue];
queue            2644 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
queue            4001 drivers/net/ethernet/neterion/s2io.c 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
queue            4030 drivers/net/ethernet/neterion/s2io.c 	queue = 0;
queue            4045 drivers/net/ethernet/neterion/s2io.c 					queue = (ntohs(th->source) +
queue            4048 drivers/net/ethernet/neterion/s2io.c 					if (queue >= queue_len)
queue            4049 drivers/net/ethernet/neterion/s2io.c 						queue = queue_len - 1;
queue            4052 drivers/net/ethernet/neterion/s2io.c 					queue = (ntohs(th->source) +
queue            4055 drivers/net/ethernet/neterion/s2io.c 					if (queue >= queue_len)
queue            4056 drivers/net/ethernet/neterion/s2io.c 						queue = queue_len - 1;
queue            4057 drivers/net/ethernet/neterion/s2io.c 					queue += sp->udp_fifo_idx;
queue            4065 drivers/net/ethernet/neterion/s2io.c 		queue = config->fifo_mapping
queue            4067 drivers/net/ethernet/neterion/s2io.c 	fifo = &mac_control->fifos[queue];
queue            4144 drivers/net/ethernet/neterion/s2io.c 	tx_fifo = mac_control->tx_FIFO_start[queue];
queue              50 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 		  unsigned int queue, bool is_u64, u64 *res)
queue              58 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
queue              67 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			alink->id, band, queue, alink->queue_base);
queue              99 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			   unsigned int queue, u32 val)
queue             103 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 	threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
queue             132 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			   unsigned int queue, enum nfp_abm_q_action act)
queue             136 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 	qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
queue             141 drivers/net/ethernet/netronome/nfp/abm/ctrl.c u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
queue             149 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				      band, queue, true, &val))
queue             157 drivers/net/ethernet/netronome/nfp/abm/ctrl.c u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
queue             165 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				      band, queue, true, &val))
queue             175 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			unsigned int queue, unsigned int off, u64 *val)
queue             179 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			unsigned int id = alink->queue_base + queue;
queue             190 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 					 NFP_Q_STAT_STRIDE, off, band, queue,
queue             196 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			      unsigned int queue, struct nfp_alink_stats *stats)
queue             200 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
queue             205 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 	err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
queue             211 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				NFP_QLVL_BLOG_BYTES, band, queue, false,
queue             218 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				band, queue, false, &stats->backlog_pkts);
queue             224 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				band, queue, true, &stats->drops);
queue             230 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				 band, queue, true, &stats->overlimits);
queue             234 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 			       unsigned int band, unsigned int queue,
queue             241 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				band, queue, true, &xstats->pdrop);
queue             247 drivers/net/ethernet/netronome/nfp/abm/ctrl.c 				 band, queue, true, &xstats->ecn_marked);
queue             256 drivers/net/ethernet/netronome/nfp/abm/main.h 			   unsigned int queue, u32 val);
queue             260 drivers/net/ethernet/netronome/nfp/abm/main.h 			   unsigned int queue, enum nfp_abm_q_action act);
queue             262 drivers/net/ethernet/netronome/nfp/abm/main.h 			      unsigned int band, unsigned int queue,
queue             265 drivers/net/ethernet/netronome/nfp/abm/main.h 			       unsigned int band, unsigned int queue,
queue              46 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			 unsigned int queue)
queue              56 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
queue              60 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 				i, queue, err);
queue              62 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
queue              66 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 				i, queue, err);
queue             139 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		     unsigned int queue, struct nfp_alink_stats *prev_stats,
queue             151 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
queue             155 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			band, queue, err);
queue             159 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
queue             163 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			band, queue, err);
queue             174 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		   unsigned int queue)
queue             180 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		err = __nfp_abm_stats_init(alink, i, queue,
queue             192 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 			    unsigned int queue)
queue             209 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		if (nfp_abm_stats_init(alink, qdisc, queue))
queue             218 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		nfp_abm_ctrl_set_q_lvl(alink, i, queue,
queue             222 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 		nfp_abm_ctrl_set_q_act(alink, i, queue, act);
queue             830 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 					   opt->graft_params.queue);
queue             118 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
queue             133 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = skb_peek(&nn->mbox_cmsg.queue);
queue             159 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
queue             192 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
queue             203 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
queue             210 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
queue             330 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             332 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
queue             351 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             360 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             362 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = __skb_dequeue(&nn->mbox_cmsg.queue);
queue             371 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             375 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__releases(&nn->mbox_cmsg.queue.lock)
queue             385 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	last = skb = __skb_peek(&nn->mbox_cmsg.queue);
queue             388 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
queue             389 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
queue             399 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             436 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__releases(&nn->mbox_cmsg.queue.lock)
queue             441 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             449 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__skb_unlink(skb, &nn->mbox_cmsg.queue);
queue             453 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             522 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	assert_spin_locked(&nn->mbox_cmsg.queue.lock);
queue             524 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
queue             534 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	__skb_queue_tail(&nn->mbox_cmsg.queue, skb);
queue             551 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             561 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             574 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             591 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             613 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             615 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = __skb_peek(&nn->mbox_cmsg.queue);
queue             618 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 		spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             633 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb = skb_peek(&nn->mbox_cmsg.queue);
queue             660 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_lock_bh(&nn->mbox_cmsg.queue.lock);
queue             678 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             683 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
queue             728 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	skb_queue_head_init(&nn->mbox_cmsg.queue);
queue             742 drivers/net/ethernet/netronome/nfp/ccm_mbox.c 	WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
queue             380 drivers/net/ethernet/netronome/nfp/flower/main.c 	const u8 queue = 0;
queue             434 drivers/net/ethernet/netronome/nfp/flower/main.c 						    i, queue);
queue             403 drivers/net/ethernet/netronome/nfp/nfp_net.h 			struct sk_buff_head queue;
queue             679 drivers/net/ethernet/netronome/nfp/nfp_net.h 		struct sk_buff_head queue;
queue            2091 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_queue_tail(&r_vec->queue, skb);
queue            2093 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_queue_head(&r_vec->queue, skb);
queue            2173 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	while ((skb = __skb_dequeue(&r_vec->queue)))
queue            2332 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			__skb_queue_head_init(&r_vec->queue);
queue             165 drivers/net/ethernet/qlogic/qed/qed_sp.h 	struct list_head		*queue;
queue              57 drivers/net/ethernet/qlogic/qed/qed_sp_commands.c 	if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
queue             637 drivers/net/ethernet/qlogic/qed/qed_spq.c 		p_ent->queue = &p_spq->unlimited_pending;
queue             642 drivers/net/ethernet/qlogic/qed/qed_spq.c 		p_ent->queue = &p_spq->pending;
queue             686 drivers/net/ethernet/qlogic/qed/qed_spq.c 	if (p_ent->queue == &p_spq->unlimited_pending) {
queue             890 drivers/net/ethernet/qlogic/qed/qed_spq.c 				   p_ent->queue == &p_spq->unlimited_pending);
queue             892 drivers/net/ethernet/qlogic/qed/qed_spq.c 		if (p_ent->queue == &p_spq->unlimited_pending) {
queue            1764 drivers/net/ethernet/qlogic/qede/qede_filter.c 			if (act->queue.vf)
queue            1767 drivers/net/ethernet/qlogic/qede/qede_filter.c 			if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
queue             192 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
queue             194 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 	return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
queue             967 drivers/net/ethernet/sfc/ef10.c 			if (tx_queue->queue == nic_data->pio_write_vi_base) {
queue             976 drivers/net/ethernet/sfc/ef10.c 					       tx_queue->queue);
queue             988 drivers/net/ethernet/sfc/ef10.c 					  tx_queue->queue, index, rc);
queue             997 drivers/net/ethernet/sfc/ef10.c 					  tx_queue->queue, index,
queue            2317 drivers/net/ethernet/sfc/ef10.c 			ER_DZ_TX_DESC_UPD, tx_queue->queue);
queue            2410 drivers/net/ethernet/sfc/ef10.c 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
queue            2449 drivers/net/ethernet/sfc/ef10.c 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
queue            2450 drivers/net/ethernet/sfc/ef10.c 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
queue            2457 drivers/net/ethernet/sfc/ef10.c 		  tx_queue->queue, entries, (u64)dma_addr);
queue            2526 drivers/net/ethernet/sfc/ef10.c 		    tx_queue->queue);
queue            2538 drivers/net/ethernet/sfc/ef10.c 		       tx_queue->queue);
queue            2567 drivers/net/ethernet/sfc/ef10.c 			ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
queue             479 drivers/net/ethernet/sfc/efx.c 		tx_queue->queue = i * EFX_TXQ_TYPES + j;
queue            1748 drivers/net/ethernet/sfc/efx.c 			tx_queue->queue -= (efx->tx_channel_offset *
queue             252 drivers/net/ethernet/sfc/ethtool.c #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
queue             253 drivers/net/ethernet/sfc/ethtool.c #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
queue             281 drivers/net/ethernet/sfc/ethtool.c 			      &lb_tests->tx_sent[tx_queue->queue],
queue             285 drivers/net/ethernet/sfc/ethtool.c 			      &lb_tests->tx_done[tx_queue->queue],
queue             385 drivers/net/ethernet/sfc/ethtool.c 					 channel->tx_queue[0].queue /
queue             443 drivers/net/ethernet/sfc/falcon/efx.c 		tx_queue->queue = i * EF4_TXQ_TYPES + j;
queue            1613 drivers/net/ethernet/sfc/falcon/efx.c 			tx_queue->queue -= (efx->tx_channel_offset *
queue             234 drivers/net/ethernet/sfc/falcon/ethtool.c #define EF4_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
queue             235 drivers/net/ethernet/sfc/falcon/ethtool.c #define EF4_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
queue             263 drivers/net/ethernet/sfc/falcon/ethtool.c 			      &lb_tests->tx_sent[tx_queue->queue],
queue             267 drivers/net/ethernet/sfc/falcon/ethtool.c 			      &lb_tests->tx_done[tx_queue->queue],
queue             367 drivers/net/ethernet/sfc/falcon/ethtool.c 					 channel->tx_queue[0].queue /
queue              74 drivers/net/ethernet/sfc/falcon/farch.c 			   (_tx_queue)->queue)
queue             280 drivers/net/ethernet/sfc/falcon/farch.c 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
queue             298 drivers/net/ethernet/sfc/falcon/farch.c 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
queue             390 drivers/net/ethernet/sfc/falcon/farch.c 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
queue             397 drivers/net/ethernet/sfc/falcon/farch.c 		int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
queue             404 drivers/net/ethernet/sfc/falcon/farch.c 			 tx_queue->queue);
queue             411 drivers/net/ethernet/sfc/falcon/farch.c 		if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
queue             412 drivers/net/ethernet/sfc/falcon/farch.c 			__clear_bit_le(tx_queue->queue, &reg);
queue             414 drivers/net/ethernet/sfc/falcon/farch.c 			__set_bit_le(tx_queue->queue, &reg);
queue             421 drivers/net/ethernet/sfc/falcon/farch.c 				     (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
queue             425 drivers/net/ethernet/sfc/falcon/farch.c 				 tx_queue->queue);
queue             439 drivers/net/ethernet/sfc/falcon/farch.c 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
queue             451 drivers/net/ethernet/sfc/falcon/farch.c 			 tx_queue->queue);
queue             624 drivers/net/ethernet/sfc/falcon/farch.c 					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
queue             631 drivers/net/ethernet/sfc/falcon/farch.c 					  tx_queue->queue);
queue             640 drivers/net/ethernet/sfc/falcon/farch.c 					  "the queue\n", tx_queue->queue);
queue             215 drivers/net/ethernet/sfc/falcon/net_driver.h 	unsigned queue;
queue            1212 drivers/net/ethernet/sfc/falcon/net_driver.h 		 tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI);
queue              71 drivers/net/ethernet/sfc/falcon/nic.h 	if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
queue             446 drivers/net/ethernet/sfc/falcon/selftest.c 				  "%d in %s loopback test\n", tx_queue->queue,
queue             498 drivers/net/ethernet/sfc/falcon/selftest.c 			  tx_queue->queue, tx_done, state->packet_count,
queue             509 drivers/net/ethernet/sfc/falcon/selftest.c 			  tx_queue->queue, rx_good, state->packet_count,
queue             516 drivers/net/ethernet/sfc/falcon/selftest.c 	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
queue             517 drivers/net/ethernet/sfc/falcon/selftest.c 	lb_tests->tx_done[tx_queue->queue] += tx_done;
queue             544 drivers/net/ethernet/sfc/falcon/selftest.c 			  tx_queue->queue, LOOPBACK_MODE(efx),
queue             571 drivers/net/ethernet/sfc/falcon/selftest.c 		  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
queue             661 drivers/net/ethernet/sfc/falcon/selftest.c 			state->offload_csum = (tx_queue->queue &
queue              74 drivers/net/ethernet/sfc/falcon/tx.c 			   tx_queue->queue, tx_queue->read_count);
queue             372 drivers/net/ethernet/sfc/falcon/tx.c 				  tx_queue->queue, read_ptr);
queue             420 drivers/net/ethernet/sfc/falcon/tx.c 				    tx_queue->queue / EF4_TXQ_TYPES +
queue             421 drivers/net/ethernet/sfc/falcon/tx.c 				    ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
queue             458 drivers/net/ethernet/sfc/falcon/tx.c 				if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
queue             551 drivers/net/ethernet/sfc/falcon/tx.c 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
queue             587 drivers/net/ethernet/sfc/falcon/tx.c 		  "initialising TX queue %d\n", tx_queue->queue);
queue             611 drivers/net/ethernet/sfc/falcon/tx.c 		  "shutting down TX queue %d\n", tx_queue->queue);
queue             636 drivers/net/ethernet/sfc/falcon/tx.c 		  "destroying TX queue %d\n", tx_queue->queue);
queue              76 drivers/net/ethernet/sfc/farch.c 			   (_tx_queue)->queue)
queue             289 drivers/net/ethernet/sfc/farch.c 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
queue             307 drivers/net/ethernet/sfc/farch.c 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
queue             381 drivers/net/ethernet/sfc/farch.c 	int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
queue             397 drivers/net/ethernet/sfc/farch.c 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
queue             407 drivers/net/ethernet/sfc/farch.c 			 tx_queue->queue);
queue             411 drivers/net/ethernet/sfc/farch.c 			     (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
queue             414 drivers/net/ethernet/sfc/farch.c 	efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
queue             427 drivers/net/ethernet/sfc/farch.c 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
queue             439 drivers/net/ethernet/sfc/farch.c 			 tx_queue->queue);
queue             606 drivers/net/ethernet/sfc/farch.c 					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
queue             613 drivers/net/ethernet/sfc/farch.c 					  tx_queue->queue);
queue             622 drivers/net/ethernet/sfc/farch.c 					  "the queue\n", tx_queue->queue);
queue             241 drivers/net/ethernet/sfc/net_driver.h 	unsigned queue;
queue            1493 drivers/net/ethernet/sfc/net_driver.h 		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
queue              69 drivers/net/ethernet/sfc/nic.h 	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
queue             446 drivers/net/ethernet/sfc/selftest.c 				  "%d in %s loopback test\n", tx_queue->queue,
queue             498 drivers/net/ethernet/sfc/selftest.c 			  tx_queue->queue, tx_done, state->packet_count,
queue             509 drivers/net/ethernet/sfc/selftest.c 			  tx_queue->queue, rx_good, state->packet_count,
queue             516 drivers/net/ethernet/sfc/selftest.c 	lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
queue             517 drivers/net/ethernet/sfc/selftest.c 	lb_tests->tx_done[tx_queue->queue] += tx_done;
queue             544 drivers/net/ethernet/sfc/selftest.c 			  tx_queue->queue, LOOPBACK_MODE(efx),
queue             571 drivers/net/ethernet/sfc/selftest.c 		  "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
queue             661 drivers/net/ethernet/sfc/selftest.c 			state->offload_csum = (tx_queue->queue &
queue            1493 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned queue, qid;
queue            1495 drivers/net/ethernet/sfc/siena_sriov.c 	queue = EFX_QWORD_FIELD(*event,  FSF_AZ_DRIVER_EV_SUBDATA);
queue            1496 drivers/net/ethernet/sfc/siena_sriov.c 	if (map_vi_index(efx, queue, &vf, &qid))
queue            1512 drivers/net/ethernet/sfc/siena_sriov.c 	unsigned ev_failed, queue, qid;
queue            1514 drivers/net/ethernet/sfc/siena_sriov.c 	queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
queue            1517 drivers/net/ethernet/sfc/siena_sriov.c 	if (map_vi_index(efx, queue, &vf, &qid))
queue              97 drivers/net/ethernet/sfc/tx.c 			   tx_queue->queue, tx_queue->read_count);
queue             623 drivers/net/ethernet/sfc/tx.c 				  tx_queue->queue, read_ptr);
queue             677 drivers/net/ethernet/sfc/tx.c 				    tx_queue->queue / EFX_TXQ_TYPES +
queue             678 drivers/net/ethernet/sfc/tx.c 				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
queue             715 drivers/net/ethernet/sfc/tx.c 				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
queue             808 drivers/net/ethernet/sfc/tx.c 		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
queue             844 drivers/net/ethernet/sfc/tx.c 		  "initialising TX queue %d\n", tx_queue->queue);
queue             876 drivers/net/ethernet/sfc/tx.c 		  "shutting down TX queue %d\n", tx_queue->queue);
queue             901 drivers/net/ethernet/sfc/tx.c 		  "destroying TX queue %d\n", tx_queue->queue);
queue              86 drivers/net/ethernet/stmicro/stmmac/dwmac4.h #define GMAC_RX_QUEUE_CLEAR(queue)	~(GENMASK(1, 0) << ((queue) * 2))
queue              87 drivers/net/ethernet/stmicro/stmmac/dwmac4.h #define GMAC_RX_AV_QUEUE_ENABLE(queue)	BIT((queue) * 2)
queue              88 drivers/net/ethernet/stmicro/stmmac/dwmac4.h #define GMAC_RX_DCB_QUEUE_ENABLE(queue)	BIT(((queue) * 2) + 1)
queue              60 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 				   u8 mode, u32 queue)
queue              65 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value &= GMAC_RX_QUEUE_CLEAR(queue);
queue              67 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
queue              69 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
queue              75 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 				     u32 prio, u32 queue)
queue              81 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
queue              82 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	if (queue >= 4)
queue              83 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		queue -= 4;
queue              87 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
queue              88 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
queue              89 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 						GMAC_RXQCTRL_PSRQX_MASK(queue);
queue              94 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 				     u32 prio, u32 queue)
queue             100 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
queue             101 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	if (queue >= 4)
queue             102 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		queue -= 4;
queue             106 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
queue             107 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
queue             108 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 						GMAC_TXQCTRL_PSTQX_MASK(queue);
queue             114 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 				    u8 packet, u32 queue)
queue             131 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value |= (queue << route_possibilities[packet-1].reg_shift) &
queue             195 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 					   u32 weight, u32 queue)
queue             198 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
queue             202 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
queue             205 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
queue             210 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	if (queue < 4)
queue             215 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	if (queue == 0 || queue == 4) {
queue             219 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
queue             220 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
queue             223 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	if (queue < 4)
queue             231 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 			      u32 high_credit, u32 low_credit, u32 queue)
queue             236 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
queue             243 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
queue             246 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
queue             249 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
queue             252 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
queue             255 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
queue             258 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
queue             261 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
queue             264 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
queue             267 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
queue             481 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	u32 queue = 0;
queue             496 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		for (queue = 0; queue < tx_cnt; queue++) {
queue             503 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
queue             506 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		for (queue = 0; queue < tx_cnt; queue++)
queue             507 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
queue             638 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	u32 queue;
queue             640 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	for (queue = 0; queue < tx_queues; queue++) {
queue             641 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
queue             665 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 	for (queue = 0; queue < rx_queues; queue++) {
queue             666 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
queue              82 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 				     u32 queue)
queue              87 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
queue              89 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
queue              91 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
queue              96 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 				   u32 queue)
queue             101 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
queue             102 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	if (queue >= 4)
queue             103 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 		queue -= 4;
queue             106 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value &= ~XGMAC_PSRQ(queue);
queue             107 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
queue             113 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 				   u32 queue)
queue             118 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
queue             119 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	if (queue >= 4)
queue             120 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 		queue -= 4;
queue             123 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value &= ~XGMAC_PSTC(queue);
queue             124 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
queue             190 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 					     u32 weight, u32 queue)
queue             194 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
queue             197 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
queue             203 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
queue             204 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	if (queue >= 4)
queue             205 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 		queue -= 4;
queue             208 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value &= ~XGMAC_QxMDMACH(queue);
queue             209 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
queue             216 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 				u32 high_credit, u32 low_credit, u32 queue)
queue             221 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
queue             222 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
queue             223 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
queue             224 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
queue             226 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
queue             229 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
queue             287 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
queue             289 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
queue             291 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
queue             294 drivers/net/ethernet/stmicro/stmmac/hwif.h 				 u32 queue);
queue             301 drivers/net/ethernet/stmicro/stmmac/hwif.h 					u32 weight, u32 queue);
queue             303 drivers/net/ethernet/stmicro/stmmac/hwif.h 	void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
queue             307 drivers/net/ethernet/stmicro/stmmac/hwif.h 			   u32 queue);
queue             145 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue             147 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < maxq; queue++) {
queue             148 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_channel *ch = &priv->channel[queue];
queue             150 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < rx_queues_cnt)
queue             152 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < tx_queues_cnt)
queue             166 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue             168 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < maxq; queue++) {
queue             169 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_channel *ch = &priv->channel[queue];
queue             171 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < rx_queues_cnt)
queue             173 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < tx_queues_cnt)
queue             185 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue             187 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queues_cnt; queue++)
queue             188 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
queue             198 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue             200 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queues_cnt; queue++)
queue             201 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
queue             291 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
queue             293 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue             309 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
queue             311 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue             331 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue             334 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_cnt; queue++) {
queue             335 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1061 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1064 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_cnt; queue++) {
queue            1065 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1067 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		pr_info("\tRX Queue %u rings\n", queue);
queue            1083 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1086 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_cnt; queue++) {
queue            1087 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1089 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		pr_info("\tTX Queue %d rings\n", queue);
queue            1134 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
queue            1136 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1160 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
queue            1162 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1185 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1188 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_queue_cnt; queue++)
queue            1189 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_clear_rx_descriptors(priv, queue);
queue            1192 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queue_cnt; queue++)
queue            1193 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_clear_tx_descriptors(priv, queue);
queue            1207 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				  int i, gfp_t flags, u32 queue)
queue            1209 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1241 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
queue            1243 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1261 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
queue            1263 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1299 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	int queue;
queue            1306 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_count; queue++) {
queue            1307 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1313 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_clear_rx_descriptors(priv, queue);
queue            1324 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 						     queue);
queue            1346 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	while (queue >= 0) {
queue            1348 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			stmmac_free_rx_buffer(priv, queue, i);
queue            1350 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue == 0)
queue            1354 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		queue--;
queue            1371 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1374 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queue_cnt; queue++) {
queue            1375 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1411 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
queue            1449 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
queue            1454 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_free_rx_buffer(priv, queue, i);
queue            1462 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
queue            1467 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_free_tx_buffer(priv, queue, i);
queue            1477 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1480 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_count; queue++) {
queue            1481 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1484 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		dma_free_rx_skbufs(priv, queue);
queue            1509 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1512 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_count; queue++) {
queue            1513 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1516 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		dma_free_tx_skbufs(priv, queue);
queue            1545 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1548 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_count; queue++) {
queue            1549 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            1553 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		rx_q->queue_index = queue;
queue            1614 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            1617 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_count; queue++) {
queue            1618 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1620 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		tx_q->queue_index = queue;
queue            1702 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	int queue;
queue            1705 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_queues_count; queue++) {
queue            1706 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
queue            1707 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
queue            1870 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
queue            1872 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            1876 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
queue            1950 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
queue            1954 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 								queue))) &&
queue            1955 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
queue            1959 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
queue            1971 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
queue            2245 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
queue            2247 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            2326 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2328 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queues_count; queue++) {
queue            2329 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		weight = priv->plat->tx_queues_cfg[queue].weight;
queue            2330 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
queue            2343 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2346 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 1; queue < tx_queues_count; queue++) {
queue            2347 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
queue            2352 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				priv->plat->tx_queues_cfg[queue].send_slope,
queue            2353 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				priv->plat->tx_queues_cfg[queue].idle_slope,
queue            2354 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				priv->plat->tx_queues_cfg[queue].high_credit,
queue            2355 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				priv->plat->tx_queues_cfg[queue].low_credit,
queue            2356 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				queue);
queue            2368 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2371 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_queues_count; queue++) {
queue            2372 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		chan = priv->plat->rx_queues_cfg[queue].chan;
queue            2373 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
queue            2385 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2388 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_queues_count; queue++) {
queue            2389 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
queue            2392 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		prio = priv->plat->rx_queues_cfg[queue].prio;
queue            2393 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
queue            2405 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2408 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_queues_count; queue++) {
queue            2409 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
queue            2412 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		prio = priv->plat->tx_queues_cfg[queue].prio;
queue            2413 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
queue            2425 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            2428 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_queues_count; queue++) {
queue            2430 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
queue            2433 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
queue            2434 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
queue            2850 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				 int total_len, bool last_segment, u32 queue)
queue            2852 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            2916 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue = skb_get_queue_mapping(skb);
queue            2926 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tx_q = &priv->tx_queue[queue];
queue            2932 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (unlikely(stmmac_tx_avail(priv, queue) <
queue            2934 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
queue            2936 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 								queue));
queue            3002 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
queue            3015 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 				     (i == nfrags - 1), queue);
queue            3032 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_tx_timer_arm(priv, queue);
queue            3047 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
queue            3050 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
queue            3104 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
queue            3107 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
queue            3108 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_tx_timer_arm(priv, queue);
queue            3132 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue = skb_get_queue_mapping(skb);
queue            3142 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	tx_q = &priv->tx_queue[queue];
queue            3153 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
queue            3154 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
queue            3156 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 								queue));
queue            3238 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		stmmac_tx_timer_arm(priv, queue);
queue            3277 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
queue            3280 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
queue            3330 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
queue            3335 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
queue            3336 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_tx_timer_arm(priv, queue);
queue            3384 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
queue            3386 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            3387 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	int len, dirty = stmmac_rx_dirty(priv, queue);
queue            3445 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
queue            3456 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
queue            3458 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            3459 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	struct stmmac_channel *ch = &priv->channel[queue];
queue            3643 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		skb_record_rx_queue(skb, queue);
queue            3658 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	stmmac_rx_refill(priv, queue);
queue            3851 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            3880 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		for (queue = 0; queue < queues_count; queue++) {
queue            3881 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            3884 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 								queue);
queue            3891 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 						       queue);
queue            4069 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            4074 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_count; queue++) {
queue            4075 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            4077 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		seq_printf(seq, "RX Queue %d:\n", queue);
queue            4090 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_count; queue++) {
queue            4091 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue            4093 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		seq_printf(seq, "TX Queue %d:\n", queue);
queue            4470 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue, rxq, maxq;
queue            4628 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < maxq; queue++) {
queue            4629 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_channel *ch = &priv->channel[queue];
queue            4632 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		ch->index = queue;
queue            4634 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < priv->plat->rx_queues_to_use) {
queue            4638 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < priv->plat->tx_queues_to_use) {
queue            4700 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < maxq; queue++) {
queue            4701 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_channel *ch = &priv->channel[queue];
queue            4703 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < priv->plat->rx_queues_to_use)
queue            4705 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		if (queue < priv->plat->tx_queues_to_use)
queue            4817 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	u32 queue;
queue            4819 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < rx_cnt; queue++) {
queue            4820 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
queue            4826 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 	for (queue = 0; queue < tx_cnt; queue++) {
queue            4827 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
queue             135 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 	u8 queue = 0;
queue             176 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 		if (queue >= plat->rx_queues_to_use)
queue             180 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
queue             182 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
queue             184 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
queue             187 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 					 &plat->rx_queues_cfg[queue].chan))
queue             188 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].chan = queue;
queue             192 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 					&plat->rx_queues_cfg[queue].prio)) {
queue             193 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].prio = 0;
queue             194 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].use_prio = false;
queue             196 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].use_prio = true;
queue             201 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
queue             203 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
queue             205 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
queue             207 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
queue             209 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
queue             211 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->rx_queues_cfg[queue].pkt_route = 0x0;
queue             213 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 		queue++;
queue             215 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 	if (queue != plat->rx_queues_to_use) {
queue             237 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 	queue = 0;
queue             241 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 		if (queue >= plat->tx_queues_to_use)
queue             245 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 					 &plat->tx_queues_cfg[queue].weight))
queue             246 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].weight = 0x10 + queue;
queue             249 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
queue             252 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
queue             256 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				&plat->tx_queues_cfg[queue].send_slope))
queue             257 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				plat->tx_queues_cfg[queue].send_slope = 0x0;
queue             259 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				&plat->tx_queues_cfg[queue].idle_slope))
queue             260 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				plat->tx_queues_cfg[queue].idle_slope = 0x0;
queue             262 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				&plat->tx_queues_cfg[queue].high_credit))
queue             263 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				plat->tx_queues_cfg[queue].high_credit = 0x0;
queue             265 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				&plat->tx_queues_cfg[queue].low_credit))
queue             266 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 				plat->tx_queues_cfg[queue].low_credit = 0x0;
queue             268 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
queue             272 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 					&plat->tx_queues_cfg[queue].prio)) {
queue             273 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].prio = 0;
queue             274 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].use_prio = false;
queue             276 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 			plat->tx_queues_cfg[queue].use_prio = true;
queue             279 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 		queue++;
queue             281 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 	if (queue != plat->tx_queues_to_use) {
queue            1641 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
queue            1648 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c 	attr.queue_mapping = queue;
queue             313 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	u32 queue = qopt->queue;
queue             320 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	if (queue <= 0 || queue >= tx_queues_count)
queue             327 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
queue             329 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
queue             333 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
queue             335 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 		return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
queue             344 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
queue             347 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
queue             350 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
queue             353 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 	priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
queue             356 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 				priv->plat->tx_queues_cfg[queue].send_slope,
queue             357 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 				priv->plat->tx_queues_cfg[queue].idle_slope,
queue             358 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 				priv->plat->tx_queues_cfg[queue].high_credit,
queue             359 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 				priv->plat->tx_queues_cfg[queue].low_credit,
queue             360 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 				queue);
queue             365 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c 			queue, qopt->sendslope, qopt->idleslope,
queue             574 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 				   unsigned int queue)
queue             585 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		rx_status = readl(XLGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR));
queue             599 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			    queue);
queue            1507 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	unsigned int qptc, qptc_extra, queue;
queue            1518 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
queue            1521 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 				  "TXq%u mapped to TC%u\n", queue, i);
queue            1522 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			regval = readl(XLGMAC_MTL_REG(pdata, queue,
queue            1528 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			writel(regval, XLGMAC_MTL_REG(pdata, queue,
queue            1530 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			queue++;
queue            1535 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 				  "TXq%u mapped to TC%u\n", queue, i);
queue            1536 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			regval = readl(XLGMAC_MTL_REG(pdata, queue,
queue            1542 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			writel(regval, XLGMAC_MTL_REG(pdata, queue,
queue            1544 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			queue++;
queue             537 drivers/net/ethernet/ti/cpmac.c 	int queue;
queue             549 drivers/net/ethernet/ti/cpmac.c 	queue = skb_get_queue_mapping(skb);
queue             550 drivers/net/ethernet/ti/cpmac.c 	netif_stop_subqueue(dev, queue);
queue             552 drivers/net/ethernet/ti/cpmac.c 	desc = &priv->desc_ring[queue];
queue             575 drivers/net/ethernet/ti/cpmac.c 	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
queue             580 drivers/net/ethernet/ti/cpmac.c static void cpmac_end_xmit(struct net_device *dev, int queue)
queue             585 drivers/net/ethernet/ti/cpmac.c 	desc = &priv->desc_ring[queue];
queue             586 drivers/net/ethernet/ti/cpmac.c 	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
queue             601 drivers/net/ethernet/ti/cpmac.c 		if (__netif_subqueue_stopped(dev, queue))
queue             602 drivers/net/ethernet/ti/cpmac.c 			netif_wake_subqueue(dev, queue);
queue             606 drivers/net/ethernet/ti/cpmac.c 		if (__netif_subqueue_stopped(dev, queue))
queue             607 drivers/net/ethernet/ti/cpmac.c 			netif_wake_subqueue(dev, queue);
queue             771 drivers/net/ethernet/ti/cpmac.c 	int queue;
queue             785 drivers/net/ethernet/ti/cpmac.c 		queue = (status >> 8) & 7;
queue             787 drivers/net/ethernet/ti/cpmac.c 			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
queue            1541 drivers/net/ethernet/ti/cpsw.c 	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
queue            2218 drivers/net/ethernet/ti/cpsw.c static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
queue            2227 drivers/net/ethernet/ti/cpsw.c 	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
queue            2250 drivers/net/ethernet/ti/cpsw.c 	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
queue            2262 drivers/net/ethernet/ti/cpsw.c 		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
queue             536 drivers/net/ethernet/ti/cpsw_ethtool.c 	struct netdev_queue *queue;
queue             553 drivers/net/ethernet/ti/cpsw_ethtool.c 		queue = netdev_get_tx_queue(priv->ndev, *ch);
queue             554 drivers/net/ethernet/ti/cpsw_ethtool.c 		queue->tx_maxrate = 0;
queue             612 drivers/net/ethernet/xscale/ixp4xx_eth.c static inline int queue_get_desc(unsigned int queue, struct port *port,
queue             618 drivers/net/ethernet/xscale/ixp4xx_eth.c 	if (!(phys = qmgr_get_entry(queue)))
queue             631 drivers/net/ethernet/xscale/ixp4xx_eth.c static inline void queue_put_desc(unsigned int queue, u32 phys,
queue             636 drivers/net/ethernet/xscale/ixp4xx_eth.c 	qmgr_put_entry(queue, phys);
queue             274 drivers/net/fddi/skfp/fplustm.c 	struct s_smt_rx_queue	*queue ;
queue             279 drivers/net/fddi/skfp/fplustm.c 	smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ;
queue             280 drivers/net/fddi/skfp/fplustm.c 	queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R1_CSR) ;
queue             281 drivers/net/fddi/skfp/fplustm.c 	queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R1_DA) ;
queue             286 drivers/net/fddi/skfp/fplustm.c 	smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ;
queue             287 drivers/net/fddi/skfp/fplustm.c 	queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R2_CSR) ;
queue             288 drivers/net/fddi/skfp/fplustm.c 	queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R2_DA) ;
queue             304 drivers/net/fddi/skfp/fplustm.c 	struct s_smt_tx_queue	*queue ;
queue             309 drivers/net/fddi/skfp/fplustm.c 	smc->hw.fp.tx[QUEUE_S] = queue = &smc->hw.fp.tx_q[QUEUE_S] ;
queue             310 drivers/net/fddi/skfp/fplustm.c 	queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XS_CSR) ;
queue             311 drivers/net/fddi/skfp/fplustm.c 	queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XS_DA) ;
queue             320 drivers/net/fddi/skfp/fplustm.c 	smc->hw.fp.tx[QUEUE_A0] = queue = &smc->hw.fp.tx_q[QUEUE_A0] ;
queue             321 drivers/net/fddi/skfp/fplustm.c 	queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XA_CSR) ;
queue             322 drivers/net/fddi/skfp/fplustm.c 	queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XA_DA) ;
queue             586 drivers/net/fddi/skfp/fplustm.c void enable_tx_irq(struct s_smc *smc, u_short queue)
queue             593 drivers/net/fddi/skfp/fplustm.c 	if (queue == 0) {
queue             596 drivers/net/fddi/skfp/fplustm.c 	if (queue == 1) {
queue             621 drivers/net/fddi/skfp/fplustm.c void disable_tx_irq(struct s_smc *smc, u_short queue)
queue             628 drivers/net/fddi/skfp/fplustm.c 	if (queue == 0) {
queue             631 drivers/net/fddi/skfp/fplustm.c 	if (queue == 1) {
queue             242 drivers/net/fddi/skfp/h/hwmtm.h #define	HWM_GET_TX_USED(smc,queue)	(int) (smc)->hw.fp.tx_q[queue].tx_used
queue             260 drivers/net/fddi/skfp/h/hwmtm.h #define	HWM_GET_CURR_TXD(smc,queue)	(struct s_smt_fp_txd volatile *)\
queue             261 drivers/net/fddi/skfp/h/hwmtm.h 					(smc)->hw.fp.tx_q[queue].tx_curr_put
queue              81 drivers/net/fddi/skfp/hwmtm.c static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
queue              82 drivers/net/fddi/skfp/hwmtm.c static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
queue             354 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_tx_queue *queue ;
queue             362 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.tx[QUEUE_A0] ;
queue             368 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_curr_put = queue->tx_curr_get = ds ;
queue             370 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_free = HWM_ASYNC_TXD_COUNT ;
queue             371 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_used = 0 ;
queue             376 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.tx[QUEUE_S] ;
queue             382 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_curr_put = queue->tx_curr_get = ds ;
queue             383 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_free = HWM_SYNC_TXD_COUNT ;
queue             384 drivers/net/fddi/skfp/hwmtm.c 	queue->tx_used = 0 ;
queue             391 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_rx_queue *queue ;
queue             398 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.rx[QUEUE_R1] ;
queue             404 drivers/net/fddi/skfp/hwmtm.c 	queue->rx_curr_put = queue->rx_curr_get = ds ;
queue             405 drivers/net/fddi/skfp/hwmtm.c 	queue->rx_free = SMT_R1_RXD_COUNT ;
queue             406 drivers/net/fddi/skfp/hwmtm.c 	queue->rx_used = 0 ;
queue             582 drivers/net/fddi/skfp/hwmtm.c static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
queue             592 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_get ;
queue             593 drivers/net/fddi/skfp/hwmtm.c 	tx_used = queue->tx_used ;
queue             594 drivers/net/fddi/skfp/hwmtm.c 	for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
queue             599 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_get ;
queue             634 drivers/net/fddi/skfp/hwmtm.c static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
queue             644 drivers/net/fddi/skfp/hwmtm.c 	r = queue->rx_curr_get ;
queue             645 drivers/net/fddi/skfp/hwmtm.c 	rx_used = queue->rx_used ;
queue             651 drivers/net/fddi/skfp/hwmtm.c 	r = queue->rx_curr_get ;
queue            1043 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_rx_queue *queue ;	/* points to the queue ctl struct */
queue            1056 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.rx[QUEUE_R1] ;
queue            1059 drivers/net/fddi/skfp/hwmtm.c 		r = queue->rx_curr_get ;
queue            1060 drivers/net/fddi/skfp/hwmtm.c 		rx_used = queue->rx_used ;
queue            1152 drivers/net/fddi/skfp/hwmtm.c 		rxd = queue->rx_curr_get ;
queue            1153 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_curr_get = r ;
queue            1154 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_free += frag_count ;
queue            1155 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_used = rx_used ;
queue            1357 drivers/net/fddi/skfp/hwmtm.c 		DB_RX(3, "next RxD is %p", queue->rx_curr_get);
queue            1358 drivers/net/fddi/skfp/hwmtm.c 		NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
queue            1366 drivers/net/fddi/skfp/hwmtm.c 		DB_RX(3, "next RxD is %p", queue->rx_curr_get);
queue            1367 drivers/net/fddi/skfp/hwmtm.c 		NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
queue            1463 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_rx_queue *queue ;
queue            1473 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.rx[QUEUE_R1] ;
queue            1479 drivers/net/fddi/skfp/hwmtm.c 	r = queue->rx_curr_get ;
queue            1480 drivers/net/fddi/skfp/hwmtm.c 	while (queue->rx_used) {
queue            1488 drivers/net/fddi/skfp/hwmtm.c 		while (r != queue->rx_curr_put &&
queue            1500 drivers/net/fddi/skfp/hwmtm.c 		for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
queue            1506 drivers/net/fddi/skfp/hwmtm.c 		      queue->rx_curr_get, frag_count);
queue            1507 drivers/net/fddi/skfp/hwmtm.c 		mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
queue            1509 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_curr_get = next_rxd ;
queue            1510 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_used -= frag_count ;
queue            1511 drivers/net/fddi/skfp/hwmtm.c 		queue->rx_free += frag_count ;
queue            1628 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_tx_queue *queue ;
queue            1631 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->os.hwm.tx_p ;
queue            1639 drivers/net/fddi/skfp/hwmtm.c 	t = queue->tx_curr_put ;
queue            1655 drivers/net/fddi/skfp/hwmtm.c 		outpd(queue->tx_bmu_ctl,CSR_START) ;
queue            1665 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_free-- ;
queue            1666 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_used++ ;
queue            1667 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_curr_put = t->txd_next ;
queue            1723 drivers/net/fddi/skfp/hwmtm.c 	NDD_TRACE("THfE",t,queue->tx_free,0) ;
queue            1812 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_tx_queue *queue ;
queue            1848 drivers/net/fddi/skfp/hwmtm.c 	queue = smc->hw.fp.tx[QUEUE_A0] ;
queue            1859 drivers/net/fddi/skfp/hwmtm.c 	if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
queue            1877 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_put ;
queue            1896 drivers/net/fddi/skfp/hwmtm.c 			outpd(queue->tx_bmu_ctl,CSR_START) ;
queue            1902 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_curr_put = t = t->txd_next ;
queue            1903 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_free-- ;
queue            1904 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_used++ ;
queue            1920 drivers/net/fddi/skfp/hwmtm.c 	NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
queue            1939 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_tx_queue *queue ;
queue            1950 drivers/net/fddi/skfp/hwmtm.c 		queue = smc->hw.fp.tx[i] ;
queue            1951 drivers/net/fddi/skfp/hwmtm.c 		t1 = queue->tx_curr_get ;
queue            1962 drivers/net/fddi/skfp/hwmtm.c 				if (tbctrl & BMU_OWN || !queue->tx_used){
queue            1970 drivers/net/fddi/skfp/hwmtm.c 			t1 = queue->tx_curr_get ;
queue            1991 drivers/net/fddi/skfp/hwmtm.c 				      queue->tx_curr_get);
queue            1992 drivers/net/fddi/skfp/hwmtm.c 				mac_drv_tx_complete(smc,queue->tx_curr_get) ;
queue            1995 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_curr_get = t1 ;
queue            1996 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_free += frag_count ;
queue            1997 drivers/net/fddi/skfp/hwmtm.c 			queue->tx_used -= frag_count ;
queue            2030 drivers/net/fddi/skfp/hwmtm.c 	struct s_smt_tx_queue *queue ;
queue            2041 drivers/net/fddi/skfp/hwmtm.c 		queue = smc->hw.fp.tx[i] ;
queue            2047 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_get ;
queue            2048 drivers/net/fddi/skfp/hwmtm.c 		tx_used = queue->tx_used ;
queue            2065 drivers/net/fddi/skfp/hwmtm.c 		queue = smc->hw.fp.tx[i] ;
queue            2066 drivers/net/fddi/skfp/hwmtm.c 		t = queue->tx_curr_get ;
queue            2080 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_curr_put = queue->tx_curr_get->txd_next ;
queue            2081 drivers/net/fddi/skfp/hwmtm.c 		queue->tx_curr_get = queue->tx_curr_put ;
queue             146 drivers/net/fddi/skfp/skfddi.c extern void enable_tx_irq(struct s_smc *smc, u_short queue);
queue            1105 drivers/net/fddi/skfp/skfddi.c 	int queue;
queue            1124 drivers/net/fddi/skfp/skfddi.c 		queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
queue            1142 drivers/net/fddi/skfp/skfddi.c 		frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
queue            1170 drivers/net/fddi/skfp/skfddi.c 		txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
queue             800 drivers/net/fjes/fjes_main.c 	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
queue             802 drivers/net/fjes/fjes_main.c 	netif_tx_wake_queue(queue);
queue              48 drivers/net/hyperv/netvsc_trace.h 	       __field(	 u16,  queue	   )
queue              55 drivers/net/hyperv/netvsc_trace.h 	       __entry->queue	 = q;
queue              61 drivers/net/hyperv/netvsc_trace.h 		 __get_str(name), __entry->queue, __entry->req_id,
queue             255 drivers/net/tap.c 	struct tap_queue *queue = NULL;
queue             273 drivers/net/tap.c 		queue = rcu_dereference(tap->taps[rxq % numvtaps]);
queue             283 drivers/net/tap.c 		queue = rcu_dereference(tap->taps[rxq]);
queue             288 drivers/net/tap.c 	queue = rcu_dereference(tap->taps[0]);
queue             290 drivers/net/tap.c 	return queue;
queue             273 drivers/net/tun.c 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
queue             280 drivers/net/tun.c 	spin_lock(&queue->lock);
queue             281 drivers/net/tun.c 	skb_queue_splice_tail_init(queue, &process_queue);
queue             282 drivers/net/tun.c 	spin_unlock(&queue->lock);
queue             290 drivers/net/tun.c 		spin_lock(&queue->lock);
queue             291 drivers/net/tun.c 		skb_queue_splice(&process_queue, queue);
queue             292 drivers/net/tun.c 		spin_unlock(&queue->lock);
queue            1545 drivers/net/tun.c 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
queue            1550 drivers/net/tun.c 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
queue            1558 drivers/net/tun.c 	spin_lock(&queue->lock);
queue            1559 drivers/net/tun.c 	if (!more || skb_queue_len(queue) == rx_batched) {
queue            1561 drivers/net/tun.c 		skb_queue_splice_tail_init(queue, &process_queue);
queue            1564 drivers/net/tun.c 		__skb_queue_tail(queue, skb);
queue            1566 drivers/net/tun.c 	spin_unlock(&queue->lock);
queue            1993 drivers/net/tun.c 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
queue            1996 drivers/net/tun.c 		spin_lock_bh(&queue->lock);
queue            1997 drivers/net/tun.c 		__skb_queue_tail(queue, skb);
queue            1998 drivers/net/tun.c 		queue_len = skb_queue_len(queue);
queue            1999 drivers/net/tun.c 		spin_unlock(&queue->lock);
queue            2901 drivers/net/virtio_net.c static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
queue            2904 drivers/net/virtio_net.c 	struct virtnet_info *vi = netdev_priv(queue->dev);
queue            2905 drivers/net/virtio_net.c 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
queue             581 drivers/net/wan/farsync.c fst_q_work_item(u64 * queue, int card_index)
queue             598 drivers/net/wan/farsync.c 	*queue |= mask;
queue             579 drivers/net/wan/ixp4xx_hss.c static inline int queue_get_desc(unsigned int queue, struct port *port,
queue             585 drivers/net/wan/ixp4xx_hss.c 	if (!(phys = qmgr_get_entry(queue)))
queue             598 drivers/net/wan/ixp4xx_hss.c static inline void queue_put_desc(unsigned int queue, u32 phys,
queue             603 drivers/net/wan/ixp4xx_hss.c 	qmgr_put_entry(queue, phys);
queue             491 drivers/net/wimax/i2400m/rx.c 	struct sk_buff_head queue;
queue             500 drivers/net/wimax/i2400m/rx.c 	skb_queue_head_init(&roq->queue);
queue             676 drivers/net/wimax/i2400m/rx.c 	if (skb_queue_empty(&roq->queue)) {
queue             678 drivers/net/wimax/i2400m/rx.c 		__skb_queue_head(&roq->queue, skb);
queue             682 drivers/net/wimax/i2400m/rx.c 	skb_itr = skb_peek_tail(&roq->queue);
queue             689 drivers/net/wimax/i2400m/rx.c 		__skb_queue_tail(&roq->queue, skb);
queue             698 drivers/net/wimax/i2400m/rx.c 	skb_queue_walk(&roq->queue, skb_itr) {
queue             706 drivers/net/wimax/i2400m/rx.c 			__skb_queue_before(&roq->queue, skb_itr, skb);
queue             715 drivers/net/wimax/i2400m/rx.c 	skb_queue_walk(&roq->queue, skb_itr) {
queue             756 drivers/net/wimax/i2400m/rx.c 	skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
queue             765 drivers/net/wimax/i2400m/rx.c 			__skb_unlink(skb_itr, &roq->queue);
queue             794 drivers/net/wimax/i2400m/rx.c 			     roq->ws, skb_queue_len(&roq->queue),
queue             796 drivers/net/wimax/i2400m/rx.c 	skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
queue             800 drivers/net/wimax/i2400m/rx.c 		__skb_unlink(skb_itr, &roq->queue);
queue             829 drivers/net/wimax/i2400m/rx.c 	len = skb_queue_len(&roq->queue);
queue             863 drivers/net/wimax/i2400m/rx.c 	len = skb_queue_len(&roq->queue);
queue             892 drivers/net/wimax/i2400m/rx.c 	len = skb_queue_len(&roq->queue);
queue             933 drivers/net/wimax/i2400m/rx.c 		__skb_queue_purge(&i2400m->rx_roq[itr].queue);
queue            1512 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
queue            1513 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue);
queue            1514 drivers/net/wireless/ath/ath5k/ath5k.h u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
queue            1515 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
queue            1562 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
queue            1564 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
queue            1570 drivers/net/wireless/ath/ath5k/ath5k.h 				  unsigned int queue);
queue            1571 drivers/net/wireless/ath/ath5k/ath5k.h u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
queue            1572 drivers/net/wireless/ath/ath5k/ath5k.h void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
queue            1573 drivers/net/wireless/ath/ath5k/ath5k.h int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
queue             130 drivers/net/wireless/ath/ath5k/dma.c ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
queue             134 drivers/net/wireless/ath/ath5k/dma.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             137 drivers/net/wireless/ath/ath5k/dma.c 	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
queue             146 drivers/net/wireless/ath/ath5k/dma.c 		switch (ah->ah_txq[queue].tqi_type) {
queue             168 drivers/net/wireless/ath/ath5k/dma.c 		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
queue             172 drivers/net/wireless/ath/ath5k/dma.c 		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
queue             188 drivers/net/wireless/ath/ath5k/dma.c ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
queue             193 drivers/net/wireless/ath/ath5k/dma.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             196 drivers/net/wireless/ath/ath5k/dma.c 	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
queue             205 drivers/net/wireless/ath/ath5k/dma.c 		switch (ah->ah_txq[queue].tqi_type) {
queue             228 drivers/net/wireless/ath/ath5k/dma.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             234 drivers/net/wireless/ath/ath5k/dma.c 		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
queue             238 drivers/net/wireless/ath/ath5k/dma.c 		(AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
queue             242 drivers/net/wireless/ath/ath5k/dma.c 		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
queue             244 drivers/net/wireless/ath/ath5k/dma.c 				"queue %i didn't stop !\n", queue);
queue             250 drivers/net/wireless/ath/ath5k/dma.c 				AR5K_QUEUE_STATUS(queue)) &
queue             286 drivers/net/wireless/ath/ath5k/dma.c 					AR5K_QUEUE_STATUS(queue)) &
queue             297 drivers/net/wireless/ath/ath5k/dma.c 					queue);
queue             303 drivers/net/wireless/ath/ath5k/dma.c 		AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             311 drivers/net/wireless/ath/ath5k/dma.c 					queue, pending);
queue             328 drivers/net/wireless/ath/ath5k/dma.c ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
queue             331 drivers/net/wireless/ath/ath5k/dma.c 	ret = ath5k_hw_stop_tx_dma(ah, queue);
queue             353 drivers/net/wireless/ath/ath5k/dma.c ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
queue             357 drivers/net/wireless/ath/ath5k/dma.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             364 drivers/net/wireless/ath/ath5k/dma.c 		switch (ah->ah_txq[queue].tqi_type) {
queue             376 drivers/net/wireless/ath/ath5k/dma.c 		tx_reg = AR5K_QUEUE_TXDP(queue);
queue             396 drivers/net/wireless/ath/ath5k/dma.c ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
queue             400 drivers/net/wireless/ath/ath5k/dma.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             407 drivers/net/wireless/ath/ath5k/dma.c 		switch (ah->ah_txq[queue].tqi_type) {
queue             424 drivers/net/wireless/ath/ath5k/dma.c 		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
queue             427 drivers/net/wireless/ath/ath5k/dma.c 		tx_reg = AR5K_QUEUE_TXDP(queue);
queue             574 drivers/net/wireless/ath/ath5k/mac80211-ops.c ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
queue             581 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
queue             586 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	ath5k_hw_get_tx_queueprops(ah, queue, &qi);
queue             596 drivers/net/wireless/ath/ath5k/mac80211-ops.c 		  queue, params->aifs, params->cw_min,
queue             599 drivers/net/wireless/ath/ath5k/mac80211-ops.c 	if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
queue             601 drivers/net/wireless/ath/ath5k/mac80211-ops.c 			  "Unable to update hardware queue %u!\n", queue);
queue             604 drivers/net/wireless/ath/ath5k/mac80211-ops.c 		ath5k_hw_reset_tx_queue(ah, queue);
queue              63 drivers/net/wireless/ath/ath5k/qcu.c ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
queue              66 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue              69 drivers/net/wireless/ath/ath5k/qcu.c 	if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
queue              76 drivers/net/wireless/ath/ath5k/qcu.c 	pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
queue              82 drivers/net/wireless/ath/ath5k/qcu.c 	if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
queue              94 drivers/net/wireless/ath/ath5k/qcu.c ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
queue              96 drivers/net/wireless/ath/ath5k/qcu.c 	if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
queue             100 drivers/net/wireless/ath/ath5k/qcu.c 	ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
queue             102 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
queue             138 drivers/net/wireless/ath/ath5k/qcu.c ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
queue             141 drivers/net/wireless/ath/ath5k/qcu.c 	memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
queue             154 drivers/net/wireless/ath/ath5k/qcu.c ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
queue             159 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             161 drivers/net/wireless/ath/ath5k/qcu.c 	qi = &ah->ah_txq[queue];
queue             206 drivers/net/wireless/ath/ath5k/qcu.c 	unsigned int queue;
queue             216 drivers/net/wireless/ath/ath5k/qcu.c 			queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
queue             220 drivers/net/wireless/ath/ath5k/qcu.c 			queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
queue             228 drivers/net/wireless/ath/ath5k/qcu.c 			queue = queue_info->tqi_subtype;
queue             231 drivers/net/wireless/ath/ath5k/qcu.c 			queue = AR5K_TX_QUEUE_ID_UAPSD;
queue             234 drivers/net/wireless/ath/ath5k/qcu.c 			queue = AR5K_TX_QUEUE_ID_BEACON;
queue             237 drivers/net/wireless/ath/ath5k/qcu.c 			queue = AR5K_TX_QUEUE_ID_CAB;
queue             247 drivers/net/wireless/ath/ath5k/qcu.c 	memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
queue             248 drivers/net/wireless/ath/ath5k/qcu.c 	ah->ah_txq[queue].tqi_type = queue_type;
queue             252 drivers/net/wireless/ath/ath5k/qcu.c 		ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
queue             262 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
queue             264 drivers/net/wireless/ath/ath5k/qcu.c 	return queue;
queue             282 drivers/net/wireless/ath/ath5k/qcu.c 				  unsigned int queue)
queue             286 drivers/net/wireless/ath/ath5k/qcu.c 		struct ath5k_txq_info *tq = &ah->ah_txq[queue];
queue             288 drivers/net/wireless/ath/ath5k/qcu.c 		if (queue > 0)
queue             311 drivers/net/wireless/ath/ath5k/qcu.c 			AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
queue             324 drivers/net/wireless/ath/ath5k/qcu.c ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
queue             326 drivers/net/wireless/ath/ath5k/qcu.c 	struct ath5k_txq_info *tq = &ah->ah_txq[queue];
queue             328 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
queue             344 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_QUEUE_DFS_LOCAL_IFS(queue));
queue             349 drivers/net/wireless/ath/ath5k/qcu.c 	ath5k_hw_set_tx_retry_limits(ah, queue);
queue             357 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
queue             362 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
queue             371 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_CBRCFG(queue));
queue             373 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             377 drivers/net/wireless/ath/ath5k/qcu.c 			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             386 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_RDYTIMECFG(queue));
queue             392 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
queue             395 drivers/net/wireless/ath/ath5k/qcu.c 			AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             402 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_DFS_MISC(queue));
queue             407 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_DFS_MISC(queue));
queue             414 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             419 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
queue             429 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             439 drivers/net/wireless/ath/ath5k/qcu.c 					AR5K_QUEUE_RDYTIMECFG(queue));
queue             441 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
queue             447 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
queue             463 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
queue             466 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
queue             469 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
queue             472 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
queue             475 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
queue             478 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
queue             481 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
queue             484 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
queue             487 drivers/net/wireless/ath/ath5k/qcu.c 		AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
queue             539 drivers/net/wireless/ath/ath5k/qcu.c 	AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
queue             663 drivers/net/wireless/ath/ath6kl/htc.h static inline int get_queue_depth(struct list_head *queue)
queue             668 drivers/net/wireless/ath/ath6kl/htc.h 	list_for_each(tmp_list, queue)
queue             601 drivers/net/wireless/ath/ath6kl/htc_mbox.c 				   struct list_head *queue)
queue             630 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		list_move_tail(&packet->list, queue);
queue             683 drivers/net/wireless/ath/ath6kl/htc_mbox.c 					 struct list_head *queue)
queue             695 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		if (list_empty(queue))
queue             698 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		packet = list_first_entry(queue, struct htc_packet, list);
queue             737 drivers/net/wireless/ath/ath6kl/htc_mbox.c 				list_add(&packet->list, queue);
queue             756 drivers/net/wireless/ath/ath6kl/htc_mbox.c 				 struct list_head *queue,
queue             773 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		n_scat = get_queue_depth(queue);
queue             823 drivers/net/wireless/ath/ath6kl/htc_mbox.c 						       queue);
queue            1125 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	struct list_head queue;
queue            1141 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		INIT_LIST_HEAD(&queue);
queue            1142 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		list_add(&packet->list, &queue);
queue            1143 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		htc_tx_complete(endpoint, &queue);
queue            1271 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	struct list_head queue;
queue            1273 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	INIT_LIST_HEAD(&queue);
queue            1274 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	list_add_tail(&packet->list, &queue);
queue            1275 drivers/net/wireless/ath/ath6kl/htc_mbox.c 	return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
queue            1368 drivers/net/wireless/ath/ath6kl/htc_mbox.c 			       u32 *lk_ahds, struct list_head *queue, int n_msg)
queue            1448 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		list_add_tail(&packet->list, queue);
queue            1472 drivers/net/wireless/ath/ath6kl/htc_mbox.c 			       struct list_head *queue)
queue            1538 drivers/net/wireless/ath/ath6kl/htc_mbox.c 					     queue, n_msg);
queue            1557 drivers/net/wireless/ath/ath6kl/htc_mbox.c 		list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
queue              93 drivers/net/wireless/ath/ath6kl/htc_pipe.c 					struct list_head *queue)
queue             169 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		list_add_tail(&packet->list, queue);
queue             175 drivers/net/wireless/ath/ath6kl/htc_pipe.c 			   struct list_head *queue, int resources)
queue             198 drivers/net/wireless/ath/ath6kl/htc_pipe.c 		list_add_tail(&packet->list, queue);
queue            1542 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	struct list_head queue;
queue            1549 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	INIT_LIST_HEAD(&queue);
queue            1550 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	list_add_tail(&packet->list, &queue);
queue            1552 drivers/net/wireless/ath/ath6kl/htc_pipe.c 	return htc_send_packets_multiple(target, &queue);
queue             881 drivers/net/wireless/ath/ath6kl/txrx.c 	struct list_head queue;
queue             889 drivers/net/wireless/ath/ath6kl/txrx.c 	INIT_LIST_HEAD(&queue);
queue             909 drivers/net/wireless/ath/ath6kl/txrx.c 		list_add_tail(&packet->list, &queue);
queue             912 drivers/net/wireless/ath/ath6kl/txrx.c 	if (!list_empty(&queue))
queue             913 drivers/net/wireless/ath/ath6kl/txrx.c 		ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
queue            1098 drivers/net/wireless/ath/ath9k/ar9003_hw.c static bool ath9k_hw_verify_hang(struct ath_hw *ah, unsigned int queue)
queue            1105 drivers/net/wireless/ath/ath9k/ar9003_hw.c 		if (queue < 6)
queue            1112 drivers/net/wireless/ath/ath9k/ar9003_hw.c 		dcu_chain_state = (dma_dbg_chain >> (5 * queue)) & 0x1f;
queue            1120 drivers/net/wireless/ath/ath9k/ar9003_hw.c 		"MAC Hang signature found for queue: %d\n", queue);
queue            1082 drivers/net/wireless/ath/ath9k/ar9003_mci.c 					u32 *payload, bool queue)
queue            1103 drivers/net/wireless/ath/ath9k/ar9003_mci.c 		mci->update_2g5g = queue;
queue            1107 drivers/net/wireless/ath/ath9k/ar9003_mci.c 		mci->wlan_channels_update = queue;
queue            1112 drivers/net/wireless/ath/ath9k/ar9003_mci.c 			mci->unhalt_bt_gpm = queue;
queue            1114 drivers/net/wireless/ath/ath9k/ar9003_mci.c 			if (!queue)
queue            1121 drivers/net/wireless/ath/ath9k/ar9003_mci.c 			mci->halted_bt_gpm = !queue;
queue             594 drivers/net/wireless/ath/ath9k/ath9k.h void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
queue             614 drivers/net/wireless/ath/ath9k/ath9k.h void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
queue             235 drivers/net/wireless/ath/ath9k/hif_usb.c 					    struct sk_buff_head *queue,
queue             240 drivers/net/wireless/ath/ath9k/hif_usb.c 	while ((skb = __skb_dequeue(queue)) != NULL) {
queue             577 drivers/net/wireless/ath/ath9k/htc.h int get_hw_qnum(u16 queue, int *hwq_map);
queue            1371 drivers/net/wireless/ath/ath9k/htc_drv_main.c 			     struct ieee80211_vif *vif, u16 queue,
queue            1379 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	if (queue >= IEEE80211_NUM_ACS)
queue            1392 drivers/net/wireless/ath/ath9k/htc_drv_main.c 	qnum = get_hw_qnum(queue, priv->hwq_map);
queue            1396 drivers/net/wireless/ath/ath9k/htc_drv_main.c 		queue, qnum, params->aifs, params->cw_min,
queue              40 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c int get_hw_qnum(u16 queue, int *hwq_map)
queue              42 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	switch (queue) {
queue             525 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 				       struct sk_buff_head *queue)
queue             529 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	while ((skb = skb_dequeue(queue)) != NULL) {
queue             733 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	struct sk_buff_head queue;
queue             735 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 	skb_queue_head_init(&queue);
queue             741 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_queue_tail(&queue, skb);
queue             748 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 		skb_queue_walk_safe(&queue, skb, tmp) {
queue             749 drivers/net/wireless/ath/ath9k/htc_drv_txrx.c 			__skb_unlink(skb, &queue);
queue            1631 drivers/net/wireless/ath/ath9k/main.c 			 struct ieee80211_vif *vif, u16 queue,
queue            1640 drivers/net/wireless/ath/ath9k/main.c 	if (queue >= IEEE80211_NUM_ACS)
queue            1643 drivers/net/wireless/ath/ath9k/main.c 	txq = sc->tx.txq_map[queue];
queue            1657 drivers/net/wireless/ath/ath9k/main.c 		queue, txq->axq_qnum, params->aifs, params->cw_min,
queue            1660 drivers/net/wireless/ath/ath9k/main.c 	ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
queue             118 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_txq *queue =
queue             121 drivers/net/wireless/ath/ath9k/xmit.c 	ieee80211_schedule_txq(sc->hw, queue);
queue             124 drivers/net/wireless/ath/ath9k/xmit.c void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
queue             128 drivers/net/wireless/ath/ath9k/xmit.c 	struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
queue             132 drivers/net/wireless/ath/ath9k/xmit.c 		queue->sta ? queue->sta->addr : queue->vif->addr,
queue            1109 drivers/net/wireless/ath/ath9k/xmit.c void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
queue            1118 drivers/net/wireless/ath/ath9k/xmit.c 	cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
queue            1119 drivers/net/wireless/ath/ath9k/xmit.c 	cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
queue            1120 drivers/net/wireless/ath/ath9k/xmit.c 	cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
queue            1121 drivers/net/wireless/ath/ath9k/xmit.c 	cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
queue            1919 drivers/net/wireless/ath/ath9k/xmit.c 	struct ieee80211_txq *queue;
queue            1936 drivers/net/wireless/ath/ath9k/xmit.c 	while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
queue            1939 drivers/net/wireless/ath/ath9k/xmit.c 		tid = (struct ath_atx_tid *)queue->drv_priv;
queue            1945 drivers/net/wireless/ath/ath9k/xmit.c 		ieee80211_return_txq(hw, queue, force);
queue             135 drivers/net/wireless/ath/carl9170/carl9170.h 	struct sk_buff_head queue;
queue             335 drivers/net/wireless/ath/carl9170/debug.c 		    " currently queued:%d\n", skb_queue_len(&iter->queue));
queue             338 drivers/net/wireless/ath/carl9170/debug.c 		skb_queue_walk(&iter->queue, skb) {
queue             357 drivers/net/wireless/ath/carl9170/debug.c 	ssize_t *len, size_t bufsize, struct sk_buff_head *queue)
queue             363 drivers/net/wireless/ath/carl9170/debug.c 	spin_lock_bh(&queue->lock);
queue             364 drivers/net/wireless/ath/carl9170/debug.c 	skb_queue_walk(queue, skb) {
queue             370 drivers/net/wireless/ath/carl9170/debug.c 	spin_unlock_bh(&queue->lock);
queue             266 drivers/net/wireless/ath/carl9170/fwcmd.h 	u8 queue:2;
queue             211 drivers/net/wireless/ath/carl9170/main.c 		while ((skb = __skb_dequeue(&tid_info->queue)))
queue             264 drivers/net/wireless/ath/carl9170/main.c 			while ((skb = __skb_dequeue(&tid_info->queue)))
queue             329 drivers/net/wireless/ath/carl9170/main.c #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
queue             331 drivers/net/wireless/ath/carl9170/main.c 	queue.aifs = ai_fs;						\
queue             332 drivers/net/wireless/ath/carl9170/main.c 	queue.cw_min = cwmin;						\
queue             333 drivers/net/wireless/ath/carl9170/main.c 	queue.cw_max = cwmax;						\
queue             334 drivers/net/wireless/ath/carl9170/main.c 	queue.txop = _txop;						\
queue            1383 drivers/net/wireless/ath/carl9170/main.c 			       struct ieee80211_vif *vif, u16 queue,
queue            1390 drivers/net/wireless/ath/carl9170/main.c 	memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
queue            1443 drivers/net/wireless/ath/carl9170/main.c 		skb_queue_head_init(&tid_info->queue);
queue             577 drivers/net/wireless/ath/carl9170/rx.c 	unsigned int queue;
queue             585 drivers/net/wireless/ath/carl9170/rx.c 	queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
queue             590 drivers/net/wireless/ath/carl9170/rx.c 	list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
queue             608 drivers/net/wireless/ath/carl9170/rx.c 			spin_lock_bh(&ar->bar_list_lock[queue]);
queue             610 drivers/net/wireless/ath/carl9170/rx.c 			spin_unlock_bh(&ar->bar_list_lock[queue]);
queue              49 drivers/net/wireless/ath/carl9170/tx.c 						unsigned int queue)
queue              52 drivers/net/wireless/ath/carl9170/tx.c 		return queue;
queue              78 drivers/net/wireless/ath/carl9170/tx.c 	int queue, i;
queue              83 drivers/net/wireless/ath/carl9170/tx.c 	queue = skb_get_queue_mapping(skb);
queue              92 drivers/net/wireless/ath/carl9170/tx.c 	ar->tx_stats[queue].len++;
queue              93 drivers/net/wireless/ath/carl9170/tx.c 	ar->tx_stats[queue].count++;
queue             158 drivers/net/wireless/ath/carl9170/tx.c 	int queue;
queue             160 drivers/net/wireless/ath/carl9170/tx.c 	queue = skb_get_queue_mapping(skb);
queue             164 drivers/net/wireless/ath/carl9170/tx.c 	ar->tx_stats[queue].len--;
queue             455 drivers/net/wireless/ath/carl9170/tx.c 		int queue = skb_get_queue_mapping(skb);
queue             458 drivers/net/wireless/ath/carl9170/tx.c 		list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
queue             460 drivers/net/wireless/ath/carl9170/tx.c 				spin_lock_bh(&ar->bar_list_lock[queue]);
queue             462 drivers/net/wireless/ath/carl9170/tx.c 				spin_unlock_bh(&ar->bar_list_lock[queue]);
queue             469 drivers/net/wireless/ath/carl9170/tx.c 		       queue, bar->ra, bar->ta, bar->control,
queue             514 drivers/net/wireless/ath/carl9170/tx.c 					       struct sk_buff_head *queue)
queue             518 drivers/net/wireless/ath/carl9170/tx.c 	spin_lock_bh(&queue->lock);
queue             519 drivers/net/wireless/ath/carl9170/tx.c 	skb_queue_walk(queue, skb) {
queue             525 drivers/net/wireless/ath/carl9170/tx.c 		__skb_unlink(skb, queue);
queue             526 drivers/net/wireless/ath/carl9170/tx.c 		spin_unlock_bh(&queue->lock);
queue             531 drivers/net/wireless/ath/carl9170/tx.c 	spin_unlock_bh(&queue->lock);
queue             617 drivers/net/wireless/ath/carl9170/tx.c 		skb = skb_peek(&iter->queue);
queue            1134 drivers/net/wireless/ath/carl9170/tx.c 	u16 seq, queue, tmpssn;
queue            1161 drivers/net/wireless/ath/carl9170/tx.c 		queue = TID_TO_WME_AC(tid_info->tid);
queue            1168 drivers/net/wireless/ath/carl9170/tx.c 		first = skb_peek(&tid_info->queue);
queue            1179 drivers/net/wireless/ath/carl9170/tx.c 		while ((skb = skb_peek(&tid_info->queue))) {
queue            1199 drivers/net/wireless/ath/carl9170/tx.c 			__skb_unlink(skb, &tid_info->queue);
queue            1207 drivers/net/wireless/ath/carl9170/tx.c 		if (skb_queue_empty(&tid_info->queue) ||
queue            1208 drivers/net/wireless/ath/carl9170/tx.c 		    carl9170_get_seq(skb_peek(&tid_info->queue)) !=
queue            1230 drivers/net/wireless/ath/carl9170/tx.c 		spin_lock_bh(&ar->tx_pending[queue].lock);
queue            1231 drivers/net/wireless/ath/carl9170/tx.c 		skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
queue            1232 drivers/net/wireless/ath/carl9170/tx.c 		spin_unlock_bh(&ar->tx_pending[queue].lock);
queue            1243 drivers/net/wireless/ath/carl9170/tx.c 					    struct sk_buff_head *queue)
queue            1251 drivers/net/wireless/ath/carl9170/tx.c 	spin_lock_bh(&queue->lock);
queue            1252 drivers/net/wireless/ath/carl9170/tx.c 	skb = skb_peek(queue);
queue            1259 drivers/net/wireless/ath/carl9170/tx.c 	__skb_unlink(skb, queue);
queue            1260 drivers/net/wireless/ath/carl9170/tx.c 	spin_unlock_bh(&queue->lock);
queue            1269 drivers/net/wireless/ath/carl9170/tx.c 	spin_unlock_bh(&queue->lock);
queue            1327 drivers/net/wireless/ath/carl9170/tx.c 		unsigned int queue = skb_get_queue_mapping(skb);
queue            1332 drivers/net/wireless/ath/carl9170/tx.c 			spin_lock_bh(&ar->bar_list_lock[queue]);
queue            1333 drivers/net/wireless/ath/carl9170/tx.c 			list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
queue            1334 drivers/net/wireless/ath/carl9170/tx.c 			spin_unlock_bh(&ar->bar_list_lock[queue]);
queue            1430 drivers/net/wireless/ath/carl9170/tx.c 		__skb_queue_tail(&agg->queue, skb);
queue            1435 drivers/net/wireless/ath/carl9170/tx.c 	skb_queue_reverse_walk(&agg->queue, iter) {
queue            1439 drivers/net/wireless/ath/carl9170/tx.c 			__skb_queue_after(&agg->queue, iter, skb);
queue            1444 drivers/net/wireless/ath/carl9170/tx.c 	__skb_queue_head(&agg->queue, skb);
queue            1448 drivers/net/wireless/ath/carl9170/tx.c 		if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
queue            1513 drivers/net/wireless/ath/carl9170/tx.c 		unsigned int queue = skb_get_queue_mapping(skb);
queue            1517 drivers/net/wireless/ath/carl9170/tx.c 		skb_queue_tail(&ar->tx_pending[queue], skb);
queue             253 drivers/net/wireless/ath/carl9170/wlan.h 	u8 queue:2;
queue             708 drivers/net/wireless/broadcom/b43/b43.h #define B43_QOS_PARAMS(queue)	(B43_SHM_SH_EDCFQ + \
queue             709 drivers/net/wireless/broadcom/b43/b43.h 				 (B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
queue            3789 drivers/net/wireless/broadcom/b43/main.c 	unsigned int queue = (unsigned int)_queue;
queue            3792 drivers/net/wireless/broadcom/b43/main.c 	if (queue >= ARRAY_SIZE(wl->qos_params)) {
queue            3806 drivers/net/wireless/broadcom/b43/main.c 	memcpy(&(wl->qos_params[queue].p), params, sizeof(*params));
queue            3808 drivers/net/wireless/broadcom/b43/main.c 	b43_qos_params_upload(dev, &(wl->qos_params[queue].p),
queue            3809 drivers/net/wireless/broadcom/b43/main.c 			      b43_qos_shm_offsets[queue]);
queue             152 drivers/net/wireless/broadcom/b43/pio.c 		p->queue = q;
queue             209 drivers/net/wireless/broadcom/b43/pio.c #define destroy_queue_tx(pio, queue) do {				\
queue             210 drivers/net/wireless/broadcom/b43/pio.c 	b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue));	\
queue             211 drivers/net/wireless/broadcom/b43/pio.c 	(pio)->queue = NULL;						\
queue             214 drivers/net/wireless/broadcom/b43/pio.c #define destroy_queue_rx(pio, queue) do {				\
queue             215 drivers/net/wireless/broadcom/b43/pio.c 	b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue));	\
queue             216 drivers/net/wireless/broadcom/b43/pio.c 	(pio)->queue = NULL;						\
queue             352 drivers/net/wireless/broadcom/b43/pio.c 	struct b43_pio_txqueue *q = pack->queue;
queue             422 drivers/net/wireless/broadcom/b43/pio.c 	struct b43_pio_txqueue *q = pack->queue;
queue              61 drivers/net/wireless/broadcom/b43/pio.h 	struct b43_pio_txqueue *queue;
queue            2507 drivers/net/wireless/broadcom/b43legacy/main.c 				struct ieee80211_vif *vif, u16 queue,
queue              22 drivers/net/wireless/broadcom/b43legacy/pio.c static void tx_start(struct b43legacy_pioqueue *queue)
queue              24 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue              28 drivers/net/wireless/broadcom/b43legacy/pio.c static void tx_octet(struct b43legacy_pioqueue *queue,
queue              31 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->need_workarounds) {
queue              32 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
queue              33 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue              36 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue              38 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
queue              63 drivers/net/wireless/broadcom/b43legacy/pio.c static void tx_data(struct b43legacy_pioqueue *queue,
queue              71 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->need_workarounds) {
queue              74 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
queue              76 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue              82 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
queue              85 drivers/net/wireless/broadcom/b43legacy/pio.c 		tx_octet(queue, packet[octets -
queue              89 drivers/net/wireless/broadcom/b43legacy/pio.c static void tx_complete(struct b43legacy_pioqueue *queue,
queue              92 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->need_workarounds) {
queue              93 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA,
queue              95 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue              99 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue             103 drivers/net/wireless/broadcom/b43legacy/pio.c static u16 generate_cookie(struct b43legacy_pioqueue *queue,
queue             113 drivers/net/wireless/broadcom/b43legacy/pio.c 	switch (queue->mmio_base) {
queue             141 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue = NULL;
queue             146 drivers/net/wireless/broadcom/b43legacy/pio.c 		queue = pio->queue0;
queue             149 drivers/net/wireless/broadcom/b43legacy/pio.c 		queue = pio->queue1;
queue             152 drivers/net/wireless/broadcom/b43legacy/pio.c 		queue = pio->queue2;
queue             155 drivers/net/wireless/broadcom/b43legacy/pio.c 		queue = pio->queue3;
queue             163 drivers/net/wireless/broadcom/b43legacy/pio.c 	*packet = &(queue->tx_packets_cache[packetindex]);
queue             165 drivers/net/wireless/broadcom/b43legacy/pio.c 	return queue;
queue             172 drivers/net/wireless/broadcom/b43legacy/pio.c static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
queue             185 drivers/net/wireless/broadcom/b43legacy/pio.c 	err = b43legacy_generate_txhdr(queue->dev,
queue             188 drivers/net/wireless/broadcom/b43legacy/pio.c 				 generate_cookie(queue, packet));
queue             192 drivers/net/wireless/broadcom/b43legacy/pio.c 	tx_start(queue);
queue             194 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->need_workarounds)
queue             196 drivers/net/wireless/broadcom/b43legacy/pio.c 	tx_data(queue, txhdr, (u8 *)skb->data, octets);
queue             197 drivers/net/wireless/broadcom/b43legacy/pio.c 	tx_complete(queue, skb);
queue             205 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue = packet->queue;
queue             213 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_move(&packet->list, &queue->txfree);
queue             214 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->nr_txfree++;
queue             219 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue = packet->queue;
queue             225 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->tx_devq_size < octets) {
queue             226 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacywarn(queue->dev->wl, "PIO queue too small. "
queue             232 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(queue->tx_devq_packets >
queue             234 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size);
queue             239 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS)
queue             241 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->tx_devq_used + octets > queue->tx_devq_size)
queue             244 drivers/net/wireless/broadcom/b43legacy/pio.c 	err = pio_tx_write_fragment(queue, skb, packet,
queue             256 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_packets++;
queue             257 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_used += octets;
queue             262 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_move_tail(&packet->list, &queue->txrunning);
queue             269 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
queue             270 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_wldev *dev = queue->dev;
queue             277 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->tx_frozen)
queue             279 drivers/net/wireless/broadcom/b43legacy/pio.c 	txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL);
queue             283 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
queue             299 drivers/net/wireless/broadcom/b43legacy/pio.c static void setup_txqueues(struct b43legacy_pioqueue *queue)
queue             304 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS;
queue             306 drivers/net/wireless/broadcom/b43legacy/pio.c 		packet = &(queue->tx_packets_cache[i]);
queue             308 drivers/net/wireless/broadcom/b43legacy/pio.c 		packet->queue = queue;
queue             311 drivers/net/wireless/broadcom/b43legacy/pio.c 		list_add(&packet->list, &queue->txfree);
queue             319 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue;
queue             323 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue             324 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             327 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->dev = dev;
queue             328 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->mmio_base = pio_mmio_base;
queue             329 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->need_workarounds = (dev->dev->id.revision < 3);
queue             331 drivers/net/wireless/broadcom/b43legacy/pio.c 	INIT_LIST_HEAD(&queue->txfree);
queue             332 drivers/net/wireless/broadcom/b43legacy/pio.c 	INIT_LIST_HEAD(&queue->txqueue);
queue             333 drivers/net/wireless/broadcom/b43legacy/pio.c 	INIT_LIST_HEAD(&queue->txrunning);
queue             334 drivers/net/wireless/broadcom/b43legacy/pio.c 	tasklet_init(&queue->txtask, tx_tasklet,
queue             335 drivers/net/wireless/broadcom/b43legacy/pio.c 		     (unsigned long)queue);
queue             341 drivers/net/wireless/broadcom/b43legacy/pio.c 	qsize = b43legacy_read16(dev, queue->mmio_base
queue             355 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_size = qsize;
queue             357 drivers/net/wireless/broadcom/b43legacy/pio.c 	setup_txqueues(queue);
queue             360 drivers/net/wireless/broadcom/b43legacy/pio.c 	return queue;
queue             363 drivers/net/wireless/broadcom/b43legacy/pio.c 	kfree(queue);
queue             364 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = NULL;
queue             368 drivers/net/wireless/broadcom/b43legacy/pio.c static void cancel_transfers(struct b43legacy_pioqueue *queue)
queue             372 drivers/net/wireless/broadcom/b43legacy/pio.c 	tasklet_kill(&queue->txtask);
queue             374 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
queue             376 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
queue             380 drivers/net/wireless/broadcom/b43legacy/pio.c static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue)
queue             382 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             385 drivers/net/wireless/broadcom/b43legacy/pio.c 	cancel_transfers(queue);
queue             386 drivers/net/wireless/broadcom/b43legacy/pio.c 	kfree(queue);
queue             410 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue;
queue             413 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE);
queue             414 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             416 drivers/net/wireless/broadcom/b43legacy/pio.c 	pio->queue0 = queue;
queue             418 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE);
queue             419 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             421 drivers/net/wireless/broadcom/b43legacy/pio.c 	pio->queue1 = queue;
queue             423 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE);
queue             424 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             426 drivers/net/wireless/broadcom/b43legacy/pio.c 	pio->queue2 = queue;
queue             428 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE);
queue             429 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!queue)
queue             431 drivers/net/wireless/broadcom/b43legacy/pio.c 	pio->queue3 = queue;
queue             456 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue = dev->pio.queue1;
queue             459 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(queue->tx_suspended);
queue             460 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(list_empty(&queue->txfree));
queue             462 drivers/net/wireless/broadcom/b43legacy/pio.c 	packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket,
queue             466 drivers/net/wireless/broadcom/b43legacy/pio.c 	list_move_tail(&packet->list, &queue->txqueue);
queue             467 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->nr_txfree--;
queue             468 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
queue             470 drivers/net/wireless/broadcom/b43legacy/pio.c 	tasklet_schedule(&queue->txtask);
queue             478 drivers/net/wireless/broadcom/b43legacy/pio.c 	struct b43legacy_pioqueue *queue;
queue             483 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue = parse_cookie(dev, status->cookie, &packet);
queue             484 drivers/net/wireless/broadcom/b43legacy/pio.c 	B43legacy_WARN_ON(!queue);
queue             489 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_packets--;
queue             490 drivers/net/wireless/broadcom/b43legacy/pio.c 	queue->tx_devq_used -= (packet->skb->len +
queue             532 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (!list_empty(&queue->txqueue))
queue             533 drivers/net/wireless/broadcom/b43legacy/pio.c 		tasklet_schedule(&queue->txtask);
queue             536 drivers/net/wireless/broadcom/b43legacy/pio.c static void pio_rx_error(struct b43legacy_pioqueue *queue,
queue             542 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error);
queue             543 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
queue             546 drivers/net/wireless/broadcom/b43legacy/pio.c 		B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE);
queue             549 drivers/net/wireless/broadcom/b43legacy/pio.c 			b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
queue             554 drivers/net/wireless/broadcom/b43legacy/pio.c void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
queue             565 drivers/net/wireless/broadcom/b43legacy/pio.c 	tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
queue             568 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
queue             572 drivers/net/wireless/broadcom/b43legacy/pio.c 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
queue             577 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacydbg(queue->dev->wl, "PIO RX timed out\n");
queue             581 drivers/net/wireless/broadcom/b43legacy/pio.c 	len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
queue             583 drivers/net/wireless/broadcom/b43legacy/pio.c 		pio_rx_error(queue, 0, "len > 0x700");
queue             586 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (unlikely(len == 0 && queue->mmio_base !=
queue             588 drivers/net/wireless/broadcom/b43legacy/pio.c 		pio_rx_error(queue, 0, "len == 0");
queue             592 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE)
queue             597 drivers/net/wireless/broadcom/b43legacy/pio.c 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
queue             603 drivers/net/wireless/broadcom/b43legacy/pio.c 		pio_rx_error(queue,
queue             604 drivers/net/wireless/broadcom/b43legacy/pio.c 			     (queue->mmio_base == B43legacy_MMIO_PIO1_BASE),
queue             608 drivers/net/wireless/broadcom/b43legacy/pio.c 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) {
queue             613 drivers/net/wireless/broadcom/b43legacy/pio.c 		b43legacy_handle_hwtxstatus(queue->dev, hw);
queue             620 drivers/net/wireless/broadcom/b43legacy/pio.c 		pio_rx_error(queue, 1, "OOM");
queue             625 drivers/net/wireless/broadcom/b43legacy/pio.c 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
queue             629 drivers/net/wireless/broadcom/b43legacy/pio.c 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
queue             632 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_rx(queue->dev, skb, rxhdr);
queue             635 drivers/net/wireless/broadcom/b43legacy/pio.c void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
queue             637 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_power_saving_ctl_bits(queue->dev, -1, 1);
queue             638 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue             639 drivers/net/wireless/broadcom/b43legacy/pio.c 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
queue             643 drivers/net/wireless/broadcom/b43legacy/pio.c void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
queue             645 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
queue             646 drivers/net/wireless/broadcom/b43legacy/pio.c 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
queue             648 drivers/net/wireless/broadcom/b43legacy/pio.c 	b43legacy_power_saving_ctl_bits(queue->dev, -1, -1);
queue             649 drivers/net/wireless/broadcom/b43legacy/pio.c 	tasklet_schedule(&queue->txtask);
queue              43 drivers/net/wireless/broadcom/b43legacy/pio.h 	struct b43legacy_pioqueue *queue;
queue              49 drivers/net/wireless/broadcom/b43legacy/pio.h 			      (packet)->queue->tx_packets_cache))
queue              84 drivers/net/wireless/broadcom/b43legacy/pio.h u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue,
queue              87 drivers/net/wireless/broadcom/b43legacy/pio.h 	return b43legacy_read16(queue->dev, queue->mmio_base + offset);
queue              91 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_write(struct b43legacy_pioqueue *queue,
queue              94 drivers/net/wireless/broadcom/b43legacy/pio.h 	b43legacy_write16(queue->dev, queue->mmio_base + offset, value);
queue             105 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
queue             108 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue);
queue             109 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue);
queue             137 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
queue             141 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
queue             145 drivers/net/wireless/broadcom/b43legacy/pio.h void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
queue            1256 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 	struct sk_buff_head *queue;
queue            1280 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		queue = &pq->q[prec].skblist;
queue            1282 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		p_head = skb_peek(queue);
queue            1283 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 		p_tail = skb_peek_tail(queue);
queue            1295 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			p_tail = skb_queue_prev(queue, p_tail);
queue            1300 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 			__skb_queue_tail(queue, p);
queue            1309 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				__skb_queue_after(queue, p_tail, p);
queue            1312 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c 				__skb_insert(p, p_tail->prev, p_tail, queue);
queue             220 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct list_head queue;
queue             568 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 					struct brcmf_msgbuf_work_item, queue);
queue             569 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		list_del(&work->queue);
queue             685 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	list_add_tail(&create->queue, &msgbuf->work_queue);
queue            1640 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 						queue);
queue            1641 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 			list_del(&work->queue);
queue             844 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	u8 bitmap[8], queue, tid;
queue             871 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 	queue = txs->frameid & TXFID_QUEUE_MASK;
queue             926 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
queue             992 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				ret = brcms_c_txfifo(wlc, queue, p);
queue             998 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 				WARN_ONCE(ret, "queue %d out of txds\n", queue);
queue            1022 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
queue            1068 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 		u8 queue = txs->frameid & TXFID_QUEUE_MASK;
queue            1082 drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c 			p = dma_getnexttxp(wlc->hw->di[queue],
queue             792 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c brcms_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
queue             798 drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c 	brcms_c_wme_setparams(wl->wlc, queue, params, true);
queue             837 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	uint queue = NFIFO;
queue             868 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	queue = txs->frameid & TXFID_QUEUE_MASK;
queue             869 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	if (queue >= NFIFO) {
queue             870 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		brcms_err(wlc->hw->d11core, "queue %u >= NFIFO\n", queue);
queue             874 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	dma = wlc->hw->di[queue];
queue             876 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
queue             943 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		if (queue < IEEE80211_NUM_ACS) {
queue             944 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 			sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
queue             946 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 			lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
queue            1016 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	if (dma && queue < NFIFO) {
queue            1017 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		u16 ac_queue = brcms_fifo_to_ac(queue);
queue            1018 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		if (dma->txavail > TX_HEADROOM && queue < TX_BCMC_FIFO &&
queue            6211 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		     uint nfrags, uint queue, uint next_frag_len)
queue            6266 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		if (queue == TX_BCMC_FIFO) {
queue            6282 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 			    (queue & TXFID_QUEUE_MASK);
queue            6285 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	frameid |= queue & TXFID_QUEUE_MASK;
queue            6826 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 				if (wlc->fragthresh[queue] !=
queue            6828 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 					wlc->fragthresh[queue] =
queue            6833 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 					   wlc->pub->unit, fifo_names[queue],
queue            6841 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 					   fifo_names[queue],
queue            6842 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 					   phylen, wlc->fragthresh[queue],
queue            6925 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	u16 queue;
queue            6936 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	queue = skb_get_queue_mapping(p);
queue            6938 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 	    !ieee80211_queue_stopped(wlc->pub->ieee_hw, queue))
queue            6939 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c 		ieee80211_stop_queue(wlc->pub->ieee_hw, queue);
queue            5155 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxq->queue[rxq->write] = rxb;
queue            8267 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		rxb = priv->rxq->queue[i];
queue            8272 drivers/net/wireless/intel/ipw2x00/ipw2200.c 		priv->rxq->queue[i] = NULL;
queue             708 drivers/net/wireless/intel/ipw2x00/ipw2200.h 	struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
queue             950 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->queue[rxq->write] = rxb;
queue            1211 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxb = rxq->queue[i];
queue            1218 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		rxq->queue[i] = NULL;
queue             106 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[i] = NULL;
queue             259 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb = rxq->queue[rxq->write];
queue             270 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[rxq->write] = rxb;
queue            4242 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxb = rxq->queue[i];
queue            4249 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		rxq->queue[i] = NULL;
queue            4492 drivers/net/wireless/intel/iwlegacy/common.c il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
queue            4506 drivers/net/wireless/intel/iwlegacy/common.c 	if (queue >= AC_NUM) {
queue            4507 drivers/net/wireless/intel/iwlegacy/common.c 		D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
queue            4511 drivers/net/wireless/intel/iwlegacy/common.c 	q = AC_NUM - 1 - queue;
queue             596 drivers/net/wireless/intel/iwlegacy/common.h 	struct il_rx_buf *queue[RX_QUEUE_SIZE];
queue            1689 drivers/net/wireless/intel/iwlegacy/common.h 		   u16 queue, const struct ieee80211_tx_queue_params *params);
queue            2272 drivers/net/wireless/intel/iwlegacy/common.h 	u8 queue = txq->swq_id;
queue            2273 drivers/net/wireless/intel/iwlegacy/common.h 	u8 ac = queue & 3;
queue            2274 drivers/net/wireless/intel/iwlegacy/common.h 	u8 hwq = (queue >> 2) & 0x1f;
queue            2283 drivers/net/wireless/intel/iwlegacy/common.h 	u8 queue = txq->swq_id;
queue            2284 drivers/net/wireless/intel/iwlegacy/common.h 	u8 ac = queue & 3;
queue            2285 drivers/net/wireless/intel/iwlegacy/common.h 	u8 hwq = (queue >> 2) & 0x1f;
queue            1160 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 			      struct ieee80211_vif *vif, u16 queue,
queue            1178 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	if (queue >= AC_NUM) {
queue            1179 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 		IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
queue            1183 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c 	q = AC_NUM - 1 - queue;
queue            2050 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
queue            2053 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	int mq = priv->queue_to_mac80211[queue];
queue            2061 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			queue, mq);
queue            2069 drivers/net/wireless/intel/iwlwifi/dvm/main.c static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
queue            2072 drivers/net/wireless/intel/iwlwifi/dvm/main.c 	int mq = priv->queue_to_mac80211[queue];
queue            2080 drivers/net/wireless/intel/iwlwifi/dvm/main.c 			queue, mq);
queue             153 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
queue             156 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
queue             157 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
queue             197 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 				      unsigned int queue)
queue             199 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
queue             210 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 					  int queue)
queue             212 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	op_mode->ops->queue_full(op_mode, queue);
queue             216 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 					      int queue)
queue             218 drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h 	op_mode->ops->queue_not_full(op_mode, queue);
queue             559 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 		  struct iwl_device_tx_cmd *dev_cmd, int queue);
queue             560 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
queue             563 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
queue             565 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
queue             568 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void (*txq_disable)(struct iwl_trans *trans, int queue,
queue             575 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	void (*txq_free)(struct iwl_trans *trans, int queue);
queue             576 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
queue             583 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
queue             951 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 			       struct iwl_device_tx_cmd *dev_cmd, int queue)
queue             961 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return trans->ops->tx(trans, skb, dev_cmd, queue);
queue             964 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
queue             972 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	trans->ops->reclaim(trans, queue, ssn, skbs);
queue             975 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
queue             983 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	trans->ops->set_q_ptrs(trans, queue, ptr);
queue             986 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
queue             989 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	trans->ops->txq_disable(trans, queue, configure_scd);
queue             993 drivers/net/wireless/intel/iwlwifi/iwl-trans.h iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
queue            1004 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return trans->ops->txq_enable(trans, queue, ssn,
queue            1009 drivers/net/wireless/intel/iwlwifi/iwl-trans.h iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
queue            1015 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return trans->ops->rxq_dma_data(trans, queue, data);
queue            1019 drivers/net/wireless/intel/iwlwifi/iwl-trans.h iwl_trans_txq_free(struct iwl_trans *trans, int queue)
queue            1024 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	trans->ops->txq_free(trans, queue);
queue            1048 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 						 int queue, bool shared_mode)
queue            1051 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
queue            1054 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
queue            1067 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
queue            1071 drivers/net/wireless/intel/iwlwifi/iwl-trans.h void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
queue            1082 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
queue            1124 drivers/net/wireless/intel/iwlwifi/iwl-trans.h static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
queue            1134 drivers/net/wireless/intel/iwlwifi/iwl-trans.h 	return trans->ops->wait_txq_empty(trans, queue);
queue             675 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	int queue;
queue             742 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			sizeof(*buf) * buf->queue);
queue            1290 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
queue            1292 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
queue            1293 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	       (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
queue            1296 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
queue            1298 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
queue            1299 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 	       (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
queue            1610 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			struct iwl_rx_cmd_buffer *rxb, int queue);
queue            1612 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 				struct iwl_rx_cmd_buffer *rxb, int queue);
queue            1614 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			      struct iwl_rx_cmd_buffer *rxb, int queue);
queue            1616 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 				  struct iwl_rx_cmd_buffer *rxb, int queue);
queue            1620 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 			    struct iwl_rx_cmd_buffer *rxb, int queue);
queue            1958 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
queue            1085 drivers/net/wireless/intel/iwlwifi/mvm/ops.c static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
queue            1087 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 	return queue == mvm->aux_queue || queue == mvm->probe_queue ||
queue            1088 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
queue            1357 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 			      unsigned int queue)
queue            1364 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
queue            1367 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
queue            1369 drivers/net/wireless/intel/iwlwifi/mvm/ops.c 		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
queue              96 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				   int queue, struct ieee80211_sta *sta)
queue             123 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (queue == 0)
queue             159 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
queue             165 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
queue             273 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					    struct sk_buff *skb, int queue,
queue             277 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (iwl_mvm_check_pn(mvm, skb, queue, sta))
queue             309 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			     u32 pkt_flags, int queue, u8 *crypt_len)
queue             418 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
queue             431 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	dup_data = &mvm_sta->dup_data[queue];
queue             545 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		&baid_data->entries[reorder_buf->queue *
queue             584 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 							reorder_buf->queue,
queue             612 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		&baid_data->entries[buf->queue * baid_data->entries_per_queue];
queue             678 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
queue             699 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	reorder_buf = &ba_data->reorder_buf[queue];
queue             716 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					      u8 baid, u16 nssn, int queue,
queue             740 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	reorder_buf = &ba_data->reorder_buf[queue];
queue             752 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      struct napi_struct *napi, int queue,
queue             756 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					  data->nssn, queue,
queue             761 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			    struct iwl_rx_cmd_buffer *rxb, int queue)
queue             780 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
queue             783 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_nssn_sync(mvm, napi, queue,
queue             798 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				     u32 reorder, u32 gp2, int queue)
queue             837 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			 sta->addr, queue, tid);
queue             850 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			    int queue,
queue             920 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	buffer = &baid_data->reorder_buf[queue];
queue             921 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	entries = &baid_data->entries[queue * baid_data->entries_per_queue];
queue             959 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				 rx_status->device_timestamp, queue);
queue            1251 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				       u32 rate_n_flags, int queue)
queue            1381 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			  u32 rate_n_flags, u16 phy_info, int queue)
queue            1415 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
queue            1424 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					   rate_n_flags, queue);
queue            1427 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
queue            1549 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			struct iwl_rx_cmd_buffer *rxb, int queue)
queue            1641 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      phy_info, queue);
queue            1648 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      le32_to_cpu(pkt->len_n_flags), queue,
queue            1692 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
queue            1782 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
queue            1852 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!queue) {
queue            1869 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
queue            1870 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 		iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
queue            1877 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				struct iwl_rx_cmd_buffer *rxb, int queue)
queue            1953 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      phy_info, queue);
queue            2019 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 			      struct iwl_rx_cmd_buffer *rxb, int queue)
queue            2026 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 					  queue, 0);
queue            2030 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 				  struct iwl_rx_cmd_buffer *rxb, int queue)
queue            2063 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c 	iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0);
queue             300 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
queue             313 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	sta_id = mvm->queue_info[queue].ra_sta_id;
queue             349 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			       int queue, u8 tid, u8 flags)
queue             352 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		.scd_queue = queue,
queue             358 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_trans_txq_free(mvm->trans, queue);
queue             363 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
queue             366 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
queue             368 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.action = mvm->queue_info[queue].tid_bitmap ?
queue             371 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
queue             375 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue,
queue             376 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    mvm->queue_info[queue].tid_bitmap);
queue             382 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
queue             383 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.tid = mvm->queue_info[queue].txq_tid;
queue             386 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	WARN(mvm->queue_info[queue].tid_bitmap,
queue             388 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	     queue, mvm->queue_info[queue].tid_bitmap);
queue             391 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].tid_bitmap = 0;
queue             401 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].reserved = false;
queue             403 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_trans_txq_disable(mvm->trans, queue, false);
queue             409 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue, ret);
queue             413 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
queue             427 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	sta_id = mvm->queue_info[queue].ra_sta_id;
queue             428 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue             453 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
queue             467 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	sta_id = mvm->queue_info[queue].ra_sta_id;
queue             468 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue             494 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
queue             512 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
queue             527 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	sta_id = mvm->queue_info[queue].ra_sta_id;
queue             528 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid = mvm->queue_info[queue].txq_tid;
queue             536 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
queue             539 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_mvm_invalidate_sta_queue(mvm, queue,
queue             542 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
queue             546 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue, ret);
queue             553 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
queue             561 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue = 0;
queue             597 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[IEEE80211_AC_BE];
queue             600 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[ac];
queue             604 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[IEEE80211_AC_VI];
queue             607 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[IEEE80211_AC_BK];
queue             610 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[IEEE80211_AC_VI];
queue             613 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = ac_to_queue[IEEE80211_AC_VO];
queue             616 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
queue             617 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
queue             618 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
queue             623 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	return queue;
queue             632 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
queue             637 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		.scd_queue = queue,
queue             654 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
queue             657 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				    queue);
queue             661 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
queue             662 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
queue             663 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cmd.tid = mvm->queue_info[queue].txq_tid;
queue             664 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
queue             667 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
queue             672 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
queue             675 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue);
queue             681 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_trans_txq_disable(mvm->trans, queue, false);
queue             684 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
queue             688 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
queue             691 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].txq_tid = tid;
queue             696 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
queue             700 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].mac80211_ac = ac;
queue             709 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
queue             746 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
queue             754 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	queue = iwl_trans_txq_alloc(mvm->trans,
queue             758 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue < 0) {
queue             761 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				    sta_id, tid, queue);
queue             762 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		return queue;
queue             766 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue, sta_id, tid);
queue             768 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
queue             770 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	return queue;
queue             782 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue = -1;
queue             789 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
queue             790 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue < 0)
queue             791 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		return queue;
queue             793 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmtxq->txq_id = queue;
queue             794 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->tvqm_info[queue].txq_tid = tid;
queue             795 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
queue             797 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
queue             800 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmsta->tid_data[tid].txq_id = queue;
queue             808 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				       int queue, u8 sta_id, u8 tid)
queue             813 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
queue             815 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue, tid);
queue             820 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (mvm->queue_info[queue].tid_bitmap)
queue             823 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
queue             824 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].ra_sta_id = sta_id;
queue             828 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			mvm->queue_info[queue].mac80211_ac =
queue             831 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
queue             833 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->queue_info[queue].txq_tid = tid;
queue             840 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvmtxq->txq_id = queue;
queue             845 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue, mvm->queue_info[queue].tid_bitmap);
queue             851 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			       int queue, u16 ssn,
queue             856 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		.scd_queue = queue,
queue             871 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
queue             874 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
queue             880 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
queue             885 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
queue             888 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		.scd_queue = queue,
queue             900 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue             902 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
queue             913 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue, ret);
queue             917 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].txq_tid = tid;
queue             919 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue, tid);
queue             922 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
queue             939 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	sta_id = mvm->queue_info[queue].ra_sta_id;
queue             940 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue             946 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue, tid_bitmap);
queue             950 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
queue             964 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
queue             969 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
queue             991 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 					    queue);
queue             994 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
queue             998 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
queue            1009 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 					 struct iwl_mvm_sta *mvmsta, int queue,
queue            1034 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
queue            1035 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
queue            1047 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
queue            1049 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue            1062 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
queue            1063 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			set_bit(queue, changetid_queues);
queue            1067 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				    tid, queue);
queue            1071 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
queue            1072 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    mvm->queue_info[queue].tid_bitmap);
queue            1078 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
queue            1081 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
queue            1082 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
queue            1084 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				    queue);
queue            1085 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		set_bit(queue, unshare_queues);
queue            1213 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue = -1;
queue            1232 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
queue            1235 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
queue            1237 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 					    queue);
queue            1242 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
queue            1245 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = mvmsta->reserved_queue;
queue            1246 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->queue_info[queue].reserved = true;
queue            1247 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
queue            1250 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue < 0)
queue            1251 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
queue            1254 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue < 0) {
queue            1256 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
queue            1260 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue <= 0) {
queue            1261 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
queue            1262 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (queue > 0) {
queue            1264 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
queue            1274 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue > 0 && !shared_queue)
queue            1275 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
queue            1278 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (WARN_ON(queue <= 0)) {
queue            1281 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		return queue;
queue            1290 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue            1291 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
queue            1295 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    shared_queue ? "shared " : "", queue,
queue            1300 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
queue            1304 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 					    queue);
queue            1305 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			iwl_mvm_invalidate_sta_queue(mvm, queue,
queue            1310 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
queue            1319 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
queue            1331 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmsta->tid_data[tid].txq_id = queue;
queue            1332 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmsta->tfd_queue_msk |= BIT(queue);
queue            1335 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (mvmsta->reserved_queue == queue)
queue            1346 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
queue            1352 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
queue            1362 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
queue            1413 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue;
queue            1427 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
queue            1429 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
queue            1432 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (queue < 0) {
queue            1434 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
queue            1435 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (queue < 0) {
queue            1440 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
queue            1442 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmsta->reserved_queue = queue;
queue            1445 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			    queue, mvmsta->sta_id);
queue            1965 drivers/net/wireless/intel/iwlwifi/mvm/sta.c static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
queue            1981 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
queue            1999 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 					  u16 *queue, int fifo)
queue            2005 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
queue            2010 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			iwl_mvm_disable_txq(mvm, NULL, *queue,
queue            2028 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		*queue = txq;
queue            2109 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue;
queue            2126 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue = mvm->probe_queue;
queue            2128 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			queue = mvm->p2p_dev_queue;
queue            2134 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		bsta->tfd_queue_msk |= BIT(queue);
queue            2136 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
queue            2155 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
queue            2158 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (queue < 0) {
queue            2160 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			return queue;
queue            2165 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			mvm->probe_queue = queue;
queue            2167 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			mvm->p2p_dev_queue = queue;
queue            2177 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue;
queue            2186 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = mvm->probe_queue;
queue            2189 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		queue = mvm->p2p_dev_queue;
queue            2197 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
queue            2201 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
queue            2202 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
queue            2347 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
queue            2350 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		if (queue < 0) {
queue            2351 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 			ret = queue;
queue            2354 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvmvif->cab_queue = queue;
queue            2517 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		reorder_buf->queue = i;
queue            2691 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		       int tid, u8 queue, bool start)
queue            2701 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm_sta->tfd_queue_msk |= BIT(queue);
queue            2873 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	int queue, ret;
queue            2897 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	queue = tid_data->txq_id;
queue            2919 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
queue            2927 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	queue_status = mvm->queue_info[queue].status;
queue            2930 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
queue            2943 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 						     BIT(queue));
queue            2950 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
queue            2955 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 				"Error reconfiguring TXQ #%d\n", queue);
queue            2961 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
queue            2966 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
queue            2972 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
queue             541 drivers/net/wireless/intel/iwlwifi/mvm/sta.h 		       int tid, u8 queue, bool start);
queue             717 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	int queue = -1;
queue             742 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
queue             744 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			queue = mvm->snif_queue;
queue             757 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			queue = mvm->aux_queue;
queue             761 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (queue < 0) {
queue             769 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
queue             778 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
queue            1727 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	int queue = SEQ_TO_QUEUE(sequence);
queue            1730 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 	if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
queue            1731 drivers/net/wireless/intel/iwlwifi/mvm/tx.c 			 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
queue             609 drivers/net/wireless/intel/iwlwifi/mvm/utils.c int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
queue             613 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 		.scd_queue = queue,
queue             619 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue             620 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
queue             628 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
queue             629 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 		 "Trying to reconfig unallocated queue %d\n", queue))
queue             632 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
queue             636 drivers/net/wireless/intel/iwlwifi/mvm/utils.c 		  queue, fifo, ret);
queue             219 drivers/net/wireless/intel/iwlwifi/pcie/internal.h 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
queue             683 drivers/net/wireless/intel/iwlwifi/pcie/internal.h bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
queue             686 drivers/net/wireless/intel/iwlwifi/pcie/internal.h void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
queue            1112 drivers/net/wireless/intel/iwlwifi/pcie/internal.h void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
queue             368 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = rxq->queue[rxq->write];
queue             379 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue[rxq->write] = rxb;
queue            1063 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		def_rxq->queue[i] = NULL;
queue            1396 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxb = rxq->queue[i];
queue            1397 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		rxq->queue[i] = NULL;
queue            1429 drivers/net/wireless/intel/iwlwifi/pcie/rx.c static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
queue            1437 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
queue            1440 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	rxq = &trans_pcie->rxq[queue];
queue            1554 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	u8 queue = entry->entry;
queue            1555 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	struct msix_entry *entries = entry - queue;
queue            2219 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	int queue;
queue            2221 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
queue            2222 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_txq *txq = trans_pcie->txq[queue];
queue            2233 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				    freeze ? "Freezing" : "Waking", queue);
queue            2327 drivers/net/wireless/intel/iwlwifi/pcie/trans.c static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
queue            2332 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
queue            2335 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
queue            2336 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
queue            2337 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
queue            2979 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
queue            1268 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
queue            1278 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
queue            1280 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 			  "queue %d not used", queue);
queue            1284 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	iwl_pcie_gen2_txq_unmap(trans, queue);
queue            1286 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
queue            1287 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	trans_pcie->txq[queue] = NULL;
queue            1289 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
queue            1311 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	struct iwl_txq *queue;
queue            1316 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue            1317 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		if (!queue) {
queue            1321 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		trans_pcie->txq[txq_id] = queue;
queue            1322 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
queue            1328 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 		queue = trans_pcie->txq[txq_id];
queue            1331 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c 	ret = iwl_pcie_txq_init(trans, queue, queue_size,
queue             590 drivers/net/wireless/intersil/p54/fwio.c 	memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
queue             404 drivers/net/wireless/intersil/p54/lmac.h 	struct p54_edcf_queue_param queue[8];
queue             448 drivers/net/wireless/intersil/p54/lmac.h 	u8 queue;
queue             407 drivers/net/wireless/intersil/p54/main.c 		       struct ieee80211_vif *vif, u16 queue,
queue             414 drivers/net/wireless/intersil/p54/main.c 	P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
queue             101 drivers/net/wireless/intersil/p54/p54.h #define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, _txop)	\
queue             103 drivers/net/wireless/intersil/p54/p54.h 	queue.aifs = cpu_to_le16(ai_fs);			\
queue             104 drivers/net/wireless/intersil/p54/p54.h 	queue.cwmin = cpu_to_le16(cw_min);			\
queue             105 drivers/net/wireless/intersil/p54/p54.h 	queue.cwmax = cpu_to_le16(cw_max);			\
queue             106 drivers/net/wireless/intersil/p54/p54.h 	queue.txop = cpu_to_le16(_txop);			\
queue             189 drivers/net/wireless/intersil/p54/txrx.c 	struct p54_tx_queue_stats *queue;
queue             195 drivers/net/wireless/intersil/p54/txrx.c 	queue = &priv->tx_stats[p54_queue];
queue             198 drivers/net/wireless/intersil/p54/txrx.c 	if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) {
queue             203 drivers/net/wireless/intersil/p54/txrx.c 	queue->len++;
queue             204 drivers/net/wireless/intersil/p54/txrx.c 	queue->count++;
queue             206 drivers/net/wireless/intersil/p54/txrx.c 	if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) {
queue             690 drivers/net/wireless/intersil/p54/txrx.c 				u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
queue             709 drivers/net/wireless/intersil/p54/txrx.c 	*queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
queue             729 drivers/net/wireless/intersil/p54/txrx.c 			*queue = P54_QUEUE_CAB;
queue             753 drivers/net/wireless/intersil/p54/txrx.c 				*queue = P54_QUEUE_BEACON;
queue             792 drivers/net/wireless/intersil/p54/txrx.c 	u8 rate, queue = 0, crypt_offset = 0;
queue             799 drivers/net/wireless/intersil/p54/txrx.c 	p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
queue             802 drivers/net/wireless/intersil/p54/txrx.c 	if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
queue             926 drivers/net/wireless/intersil/p54/txrx.c 	txhdr->hw_queue = queue;
queue             927 drivers/net/wireless/intersil/p54/txrx.c 	txhdr->backlog = priv->tx_stats[queue].len - 1;
queue             212 drivers/net/wireless/intersil/prism54/isl_38xx.c isl38xx_in_queue(isl38xx_control_block *cb, int queue)
queue             214 drivers/net/wireless/intersil/prism54/isl_38xx.c 	const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) -
queue             215 drivers/net/wireless/intersil/prism54/isl_38xx.c 			   le32_to_cpu(cb->device_curr_frag[queue]));
queue             222 drivers/net/wireless/intersil/prism54/isl_38xx.c 	switch (queue) {
queue             147 drivers/net/wireless/intersil/prism54/isl_38xx.h int isl38xx_in_queue(isl38xx_control_block *cb, int queue);
queue            1850 drivers/net/wireless/mac80211_hwsim.c 	struct ieee80211_vif *vif, u16 queue,
queue            1855 drivers/net/wireless/mac80211_hwsim.c 		  __func__, queue,
queue            5361 drivers/net/wireless/marvell/mwl8k.c 			 struct ieee80211_vif *vif, u16 queue,
queue            5369 drivers/net/wireless/marvell/mwl8k.c 		BUG_ON(queue > MWL8K_TX_WMM_QUEUES - 1);
queue            5370 drivers/net/wireless/marvell/mwl8k.c 		memcpy(&priv->wmm_params[queue], params, sizeof(*params));
queue            5376 drivers/net/wireless/marvell/mwl8k.c 			int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
queue             169 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	int i, port, queue;
queue             173 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		queue = 8; /* free queue */
queue             176 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		queue = 1; /* MCU queue */
queue             189 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 			FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
queue             495 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
queue             503 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	queue = dev->mt76.q_tx[queue].q->hw_idx;
queue             513 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val = mt76_rr(dev, MT_WMM_TXOP(queue));
queue             514 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
queue             515 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
queue             516 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	mt76_wr(dev, MT_WMM_TXOP(queue), val);
queue             519 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
queue             520 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
queue             524 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
queue             525 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
queue             528 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val = mt76_rr(dev, MT_WMM_CWMAX(queue));
queue             529 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
queue             530 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
queue             531 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	mt76_wr(dev, MT_WMM_CWMAX(queue), val);
queue             249 drivers/net/wireless/mediatek/mt76/mt7615/main.c mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
queue             255 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
queue             257 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	return mt7615_mcu_set_wmm(dev, queue, params);
queue             638 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
queue             650 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		u8 queue;
queue             658 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		.queue = queue,
queue             187 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
queue             154 drivers/net/wireless/mediatek/mt76/mt76x02.h 		    u16 queue, const struct ieee80211_tx_queue_params *params);
queue             486 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 		    u16 queue, const struct ieee80211_tx_queue_params *params)
queue             492 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	qid = dev->mt76.q_tx[queue].q->hw_idx;
queue             371 drivers/net/wireless/mediatek/mt7601u/mt7601u.h 		    u16 queue, const struct ieee80211_tx_queue_params *params);
queue             261 drivers/net/wireless/mediatek/mt7601u/tx.c 		    u16 queue, const struct ieee80211_tx_queue_params *params)
queue             264 drivers/net/wireless/mediatek/mt7601u/tx.c 	u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
queue             627 drivers/net/wireless/ralink/rt2x00/rt2400pci.c static void rt2400pci_start_queue(struct data_queue *queue)
queue             629 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             632 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	switch (queue->qid) {
queue             650 drivers/net/wireless/ralink/rt2x00/rt2400pci.c static void rt2400pci_kick_queue(struct data_queue *queue)
queue             652 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             655 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	switch (queue->qid) {
queue             676 drivers/net/wireless/ralink/rt2x00/rt2400pci.c static void rt2400pci_stop_queue(struct data_queue *queue)
queue             678 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             681 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	switch (queue->qid) {
queue             719 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	if (entry->queue->qid == QID_RX) {
queue             737 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	if (entry->queue->qid == QID_RX) {
queue            1168 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1210 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1254 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	    entry->queue->rt2x00dev->rssi_offset;
queue            1268 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue            1274 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	while (!rt2x00queue_empty(queue)) {
queue            1275 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            1655 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 			     struct ieee80211_vif *vif, u16 queue,
queue            1665 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	if (queue != 0)
queue            1668 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	if (rt2x00mac_conf_tx(hw, vif, queue, params))
queue            1756 drivers/net/wireless/ralink/rt2x00/rt2400pci.c static void rt2400pci_queue_init(struct data_queue *queue)
queue            1758 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 	switch (queue->qid) {
queue            1760 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->limit = 24;
queue            1761 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1762 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->desc_size = RXD_DESC_SIZE;
queue            1763 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            1770 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->limit = 24;
queue            1771 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1772 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1773 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            1777 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->limit = 1;
queue            1778 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->data_size = MGMT_FRAME_SIZE;
queue            1779 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1780 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            1784 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->limit = 8;
queue            1785 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1786 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1787 drivers/net/wireless/ralink/rt2x00/rt2400pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue             286 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct data_queue *queue = rt2x00dev->bcn;
queue             297 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min);
queue             716 drivers/net/wireless/ralink/rt2x00/rt2500pci.c static void rt2500pci_start_queue(struct data_queue *queue)
queue             718 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             721 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	switch (queue->qid) {
queue             739 drivers/net/wireless/ralink/rt2x00/rt2500pci.c static void rt2500pci_kick_queue(struct data_queue *queue)
queue             741 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             744 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	switch (queue->qid) {
queue             765 drivers/net/wireless/ralink/rt2x00/rt2500pci.c static void rt2500pci_stop_queue(struct data_queue *queue)
queue             767 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             770 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	switch (queue->qid) {
queue             808 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	if (entry->queue->qid == QID_RX) {
queue             826 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	if (entry->queue->qid == QID_RX) {
queue            1264 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
queue            1265 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
queue            1266 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
queue            1320 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1379 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	    entry->queue->rt2x00dev->rssi_offset;
queue            1396 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue            1402 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	while (!rt2x00queue_empty(queue)) {
queue            1403 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            2055 drivers/net/wireless/ralink/rt2x00/rt2500pci.c static void rt2500pci_queue_init(struct data_queue *queue)
queue            2057 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 	switch (queue->qid) {
queue            2059 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->limit = 32;
queue            2060 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2061 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->desc_size = RXD_DESC_SIZE;
queue            2062 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            2069 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->limit = 32;
queue            2070 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2071 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            2072 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            2076 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->limit = 1;
queue            2077 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->data_size = MGMT_FRAME_SIZE;
queue            2078 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            2079 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            2083 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->limit = 8;
queue            2084 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2085 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            2086 drivers/net/wireless/ralink/rt2x00/rt2500pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue             717 drivers/net/wireless/ralink/rt2x00/rt2500usb.c static void rt2500usb_start_queue(struct data_queue *queue)
queue             719 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             722 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	switch (queue->qid) {
queue             740 drivers/net/wireless/ralink/rt2x00/rt2500usb.c static void rt2500usb_stop_queue(struct data_queue *queue)
queue             742 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             745 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	switch (queue->qid) {
queue            1086 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
queue            1087 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
queue            1088 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
queue            1121 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1124 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
queue            1208 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	length += (2 * !(length % entry->queue->usb_maxpacket));
queue            1219 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1225 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 			entry->queue->desc_size));
queue            1297 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
queue            1846 drivers/net/wireless/ralink/rt2x00/rt2500usb.c static void rt2500usb_queue_init(struct data_queue *queue)
queue            1848 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 	switch (queue->qid) {
queue            1850 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->limit = 32;
queue            1851 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1852 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->desc_size = RXD_DESC_SIZE;
queue            1853 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue            1860 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->limit = 32;
queue            1861 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1862 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1863 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue            1867 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->limit = 1;
queue            1868 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->data_size = MGMT_FRAME_SIZE;
queue            1869 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1870 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn);
queue            1874 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->limit = 8;
queue            1875 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->data_size = DATA_FRAME_SIZE;
queue            1876 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->desc_size = TXD_DESC_SIZE;
queue            1877 drivers/net/wireless/ralink/rt2x00/rt2500usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue             816 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
queue             829 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	for (i = 2; i < entry->queue->winfo_size / sizeof(__le32); i++)
queue             914 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
queue             918 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	skb_pull(entry->skb, entry->queue->winfo_size);
queue             987 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		rt2x00_dbg(entry->queue->rt2x00dev,
queue             989 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			   entry->queue->qid, entry->entry_idx);
queue             999 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1100 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            1112 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
queue            1114 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (unlikely(rt2x00queue_empty(queue))) {
queue            1120 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            1151 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		rt2x00_dbg(entry->queue->rt2x00dev,
queue            1153 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			   entry->entry_idx, entry->queue->qid);
queue            1159 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            1162 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	tx_queue_for_each(rt2x00dev, queue) {
queue            1163 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            1178 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            1180 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	tx_queue_for_each(rt2x00dev, queue) {
queue            1181 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
queue            1182 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		    rt2x00queue_get_entry(queue, Q_INDEX_DONE))
queue            1191 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            1201 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	tx_queue_for_each(rt2x00dev, queue) {
queue            1202 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		while (!rt2x00queue_empty(queue)) {
queue            1203 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            1219 drivers/net/wireless/ralink/rt2x00/rt2800lib.c static int rt2800_check_hung(struct data_queue *queue)
queue            1221 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	unsigned int cur_idx = rt2800_drv_get_dma_done(queue);
queue            1223 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	if (queue->wd_idx != cur_idx)
queue            1224 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		queue->wd_count = 0;
queue            1226 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		queue->wd_count++;
queue            1228 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	return queue->wd_count > 16;
queue            1233 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            1240 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	queue_for_each(rt2x00dev, queue) {
queue            1241 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		switch (queue->qid) {
queue            1247 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			if (rt2x00queue_empty(queue))
queue            1249 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			hung_tx = rt2800_check_hung(queue);
queue            1258 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 			hung_rx = rt2800_check_hung(queue);
queue            1290 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue = rt2x00dev->bcn;
queue            1299 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	for (i = 0; i < queue->limit; i++) {
queue            1300 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		entry = &queue->entries[i];
queue            1322 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1327 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	const int txwi_desc_size = entry->queue->winfo_size;
queue            1416 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1862 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue = rt2x00dev->bcn;
queue            1871 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	for (i = 0; i < queue->limit; i++) {
queue            1872 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 		entry = &queue->entries[i];
queue            10380 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	struct data_queue *queue;
queue            10403 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue            10411 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, field, queue->txop);
queue            10419 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, field, queue->aifs);
queue            10423 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, field, queue->cw_min);
queue            10427 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, field, queue->cw_max);
queue            10434 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
queue            10435 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
queue            10436 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
queue            10437 drivers/net/wireless/ralink/rt2x00/rt2800lib.c 	rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
queue              68 drivers/net/wireless/ralink/rt2x00/rt2800lib.h 	unsigned int (*drv_get_dma_done)(struct data_queue *queue);
queue             165 drivers/net/wireless/ralink/rt2x00/rt2800lib.h 	const struct rt2800_ops *rt2800ops = entry->queue->rt2x00dev->ops->drv;
queue             170 drivers/net/wireless/ralink/rt2x00/rt2800lib.h static inline unsigned int rt2800_drv_get_dma_done(struct data_queue *queue)
queue             172 drivers/net/wireless/ralink/rt2x00/rt2800lib.h 	const struct rt2800_ops *rt2800ops = queue->rt2x00dev->ops->drv;
queue             174 drivers/net/wireless/ralink/rt2x00/rt2800lib.h 	return rt2800ops->drv_get_dma_done(queue);
queue              27 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c unsigned int rt2800mmio_get_dma_done(struct data_queue *queue)
queue              29 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue              33 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue              38 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		qid = queue->qid;
queue              45 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
queue              74 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	const unsigned int txwi_size = entry->queue->winfo_size;
queue             420 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c void rt2800mmio_start_queue(struct data_queue *queue)
queue             422 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             425 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue             451 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c void rt2800mmio_kick_queue(struct data_queue *queue)
queue             453 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             456 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue             461 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		WARN_ON_ONCE(rt2x00queue_empty(queue));
queue             462 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX);
queue             463 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
queue             469 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX);
queue             479 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
queue             481 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             485 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue             504 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		if (rt2x00queue_empty(queue))
queue             523 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c void rt2800mmio_stop_queue(struct data_queue *queue)
queue             525 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             528 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue             560 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c void rt2800mmio_queue_init(struct data_queue *queue)
queue             562 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             567 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	switch (queue->qid) {
queue             569 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->limit = 128;
queue             570 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->data_size = AGGREGATION_SIZE;
queue             571 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->desc_size = RXD_DESC_SIZE;
queue             572 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->winfo_size = rxwi_size;
queue             573 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue             580 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->limit = 64;
queue             581 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->data_size = AGGREGATION_SIZE;
queue             582 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->desc_size = TXD_DESC_SIZE;
queue             583 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->winfo_size = txwi_size;
queue             584 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue             588 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->limit = 8;
queue             589 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->data_size = 0; /* No DMA required for beacons */
queue             590 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->desc_size = TXD_DESC_SIZE;
queue             591 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->winfo_size = txwi_size;
queue             592 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue             612 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	if (entry->queue->qid == QID_RX) {
queue             628 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             631 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 	if (entry->queue->qid == QID_RX) {
queue             652 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c 		if (entry->queue->length == 1)
queue             117 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h unsigned int rt2800mmio_get_dma_done(struct data_queue *queue);
queue             139 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h void rt2800mmio_start_queue(struct data_queue *queue);
queue             140 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h void rt2800mmio_kick_queue(struct data_queue *queue);
queue             141 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h void rt2800mmio_flush_queue(struct data_queue *queue, bool drop);
queue             142 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h void rt2800mmio_stop_queue(struct data_queue *queue);
queue             143 drivers/net/wireless/ralink/rt2x00/rt2800mmio.h void rt2800mmio_queue_init(struct data_queue *queue);
queue              46 drivers/net/wireless/ralink/rt2x00/rt2800usb.c static void rt2800usb_start_queue(struct data_queue *queue)
queue              48 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue              51 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	switch (queue->qid) {
queue              69 drivers/net/wireless/ralink/rt2x00/rt2800usb.c static void rt2800usb_stop_queue(struct data_queue *queue)
queue              71 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue              74 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	switch (queue->qid) {
queue             157 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             382 drivers/net/wireless/ralink/rt2x00/rt2800usb.c static unsigned int rt2800usb_get_dma_done(struct data_queue *queue)
queue             386 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
queue             395 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	if (entry->queue->qid == QID_BEACON)
queue             434 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	skbdesc->desc_len = TXINFO_DESC_SIZE + entry->queue->winfo_size;
queue             514 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 			rx_pkt_len > entry->queue->data_size)) {
queue             515 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		rt2x00_err(entry->queue->rt2x00dev,
queue             712 drivers/net/wireless/ralink/rt2x00/rt2800usb.c static void rt2800usb_queue_init(struct data_queue *queue)
queue             714 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             719 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 	switch (queue->qid) {
queue             721 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->limit = 128;
queue             722 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->data_size = AGGREGATION_SIZE;
queue             723 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->desc_size = RXINFO_DESC_SIZE;
queue             724 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->winfo_size = rxwi_size;
queue             725 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue             732 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->limit = 16;
queue             733 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->data_size = AGGREGATION_SIZE;
queue             734 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->desc_size = TXINFO_DESC_SIZE;
queue             735 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->winfo_size = txwi_size;
queue             736 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue             740 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->limit = 8;
queue             741 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->data_size = MGMT_FRAME_SIZE;
queue             742 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->desc_size = TXINFO_DESC_SIZE;
queue             743 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->winfo_size = txwi_size;
queue             744 drivers/net/wireless/ralink/rt2x00/rt2800usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue             568 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	void (*start_queue) (struct data_queue *queue);
queue             569 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	void (*kick_queue) (struct data_queue *queue);
queue             570 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	void (*stop_queue) (struct data_queue *queue);
queue             571 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	void (*flush_queue) (struct data_queue *queue, bool drop);
queue             637 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	void (*queue_init)(struct data_queue *queue);
queue            1302 drivers/net/wireless/ralink/rt2x00/rt2x00.h 			 const enum data_queue_qid queue)
queue            1304 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
queue            1305 drivers/net/wireless/ralink/rt2x00/rt2x00.h 		return &rt2x00dev->tx[queue];
queue            1307 drivers/net/wireless/ralink/rt2x00/rt2x00.h 	if (queue == QID_ATIM)
queue            1318 drivers/net/wireless/ralink/rt2x00/rt2x00.h struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
queue            1329 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00queue_pause_queue(struct data_queue *queue);
queue            1338 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00queue_unpause_queue(struct data_queue *queue);
queue            1346 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00queue_start_queue(struct data_queue *queue);
queue            1354 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00queue_stop_queue(struct data_queue *queue);
queue            1364 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
queue            1472 drivers/net/wireless/ralink/rt2x00/rt2x00.h 		      struct ieee80211_vif *vif, u16 queue,
queue             171 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	dump_hdr->queue_index = entry->queue->qid;
queue             303 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	struct data_queue *queue;
queue             320 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 	queue_for_each(intf->rt2x00dev, queue) {
queue             321 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 		spin_lock_irqsave(&queue->index_lock, irqflags);
queue             324 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				queue->qid, (unsigned int)queue->flags,
queue             325 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				queue->count, queue->limit, queue->length,
queue             326 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				queue->index[Q_INDEX],
queue             327 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				queue->index[Q_INDEX_DMA_DONE],
queue             328 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 				queue->index[Q_INDEX_DONE]);
queue             330 drivers/net/wireless/ralink/rt2x00/rt2x00debug.c 		spin_unlock_irqrestore(&queue->index_lock, irqflags);
queue             263 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             404 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	spin_lock_bh(&entry->queue->tx_lock);
queue             405 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	if (!rt2x00queue_threshold(entry->queue))
queue             406 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		rt2x00queue_unpause_queue(entry->queue);
queue             407 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	spin_unlock_bh(&entry->queue->tx_lock);
queue             413 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             463 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             739 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             777 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 		     rxdesc.size > entry->queue->data_size)) {
queue             779 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 			   rxdesc.size, entry->queue->data_size);
queue             136 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
queue              20 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 				struct data_queue *queue,
queue              80 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
queue              96 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue = NULL;
queue             114 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
queue             115 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(!queue)) {
queue             134 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		if (rt2x00queue_available(queue) <= 1) {
queue             139 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 			spin_lock(&queue->tx_lock);
queue             140 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 			if (rt2x00queue_threshold(queue))
queue             141 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 				rt2x00queue_pause_queue(queue);
queue             142 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 			spin_unlock(&queue->tx_lock);
queue             147 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
queue             151 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
queue             188 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue = rt2x00dev->bcn;
queue             206 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	for (i = 0; i < queue->limit; i++) {
queue             207 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		entry = &queue->entries[i];
queue             212 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(i == queue->limit))
queue             652 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue;
queue             654 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue             655 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	if (unlikely(!queue))
queue             663 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		queue->cw_min = fls(params->cw_min);
queue             665 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		queue->cw_min = 5; /* cw_min: 2^5 = 32. */
queue             668 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		queue->cw_max = fls(params->cw_max);
queue             670 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
queue             672 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	queue->aifs = params->aifs;
queue             673 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	queue->txop = params->txop;
queue             677 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		   queue_idx, queue->cw_min, queue->cw_max, queue->aifs,
queue             678 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		   queue->txop);
queue             697 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue;
queue             704 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	tx_queue_for_each(rt2x00dev, queue)
queue             705 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		rt2x00queue_flush_queue(queue, drop);
queue             782 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue;
queue             784 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	tx_queue_for_each(rt2x00dev, queue) {
queue             785 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		*tx += queue->length;
queue             786 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		*tx_max += queue->limit;
queue             797 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	struct data_queue *queue;
queue             799 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 	tx_queue_for_each(rt2x00dev, queue) {
queue             800 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c 		if (!rt2x00queue_empty(queue))
queue              51 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	struct data_queue *queue = rt2x00dev->rx;
queue              58 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		entry = rt2x00queue_get_entry(queue, Q_INDEX);
queue              69 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		skbdesc->desc_len = entry->queue->desc_size;
queue              88 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
queue              92 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
queue             101 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 				      struct data_queue *queue)
queue             112 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 				  queue->limit * queue->desc_size, &dma,
queue             120 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	for (i = 0; i < queue->limit; i++) {
queue             121 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		entry_priv = queue->entries[i].priv_data;
queue             122 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		entry_priv->desc = addr + i * queue->desc_size;
queue             123 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		entry_priv->desc_dma = dma + i * queue->desc_size;
queue             130 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 				      struct data_queue *queue)
queue             133 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	    queue->entries[0].priv_data;
queue             137 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 				  queue->limit * queue->desc_size,
queue             144 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	struct data_queue *queue;
queue             150 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	queue_for_each(rt2x00dev, queue) {
queue             151 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
queue             171 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	queue_for_each(rt2x00dev, queue)
queue             172 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
queue             180 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	struct data_queue *queue;
queue             190 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 	queue_for_each(rt2x00dev, queue)
queue             191 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c 		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
queue              95 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop);
queue              25 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue = entry->queue;
queue              26 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue              37 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
queue              95 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct device *dev = entry->queue->rt2x00dev->dev;
queue             112 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct device *dev = entry->queue->rt2x00dev->dev;
queue             490 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             502 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			   entry->queue->qid, DRV_PROJECT);
queue             531 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue = entry->queue;
queue             533 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
queue             539 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
queue             542 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
queue             554 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (rt2x00queue_threshold(queue) ||
queue             556 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->rt2x00dev->ops->lib->kick_queue(queue);
queue             561 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             601 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
queue             616 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
queue             641 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
queue             655 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
queue             657 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
queue             663 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock(&queue->tx_lock);
queue             665 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (unlikely(rt2x00queue_full(queue))) {
queue             666 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
queue             667 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			   queue->qid);
queue             672 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entry = rt2x00queue_get_entry(queue, Q_INDEX);
queue             676 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_err(queue->rt2x00dev,
queue             679 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			   queue->qid, DRV_PROJECT);
queue             707 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_kick_tx_queue(queue, &txdesc);
queue             715 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (rt2x00queue_threshold(queue))
queue             716 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_pause_queue(queue);
queue             718 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_unlock(&queue->tx_lock);
queue             786 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c bool rt2x00queue_for_each_entry(struct data_queue *queue,
queue             799 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_err(queue->rt2x00dev,
queue             811 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_irqsave(&queue->index_lock, irqflags);
queue             812 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	index_start = queue->index[start];
queue             813 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	index_end = queue->index[end];
queue             814 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
queue             822 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			if (fn(&queue->entries[i], data))
queue             826 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		for (i = index_start; i < queue->limit; i++) {
queue             827 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			if (fn(&queue->entries[i], data))
queue             832 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			if (fn(&queue->entries[i], data))
queue             841 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
queue             848 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
queue             853 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_irqsave(&queue->index_lock, irqflags);
queue             855 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entry = &queue->entries[queue->index[index]];
queue             857 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
queue             865 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue = entry->queue;
queue             869 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_err(queue->rt2x00dev,
queue             874 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_irqsave(&queue->index_lock, irqflags);
queue             876 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->index[index]++;
queue             877 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (queue->index[index] >= queue->limit)
queue             878 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->index[index] = 0;
queue             883 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->length++;
queue             885 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->length--;
queue             886 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->count++;
queue             889 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
queue             892 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
queue             894 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	switch (queue->qid) {
queue             903 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
queue             909 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_pause_queue(struct data_queue *queue)
queue             911 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
queue             912 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    !test_bit(QUEUE_STARTED, &queue->flags) ||
queue             913 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
queue             916 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_pause_queue_nocheck(queue);
queue             920 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_unpause_queue(struct data_queue *queue)
queue             922 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
queue             923 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    !test_bit(QUEUE_STARTED, &queue->flags) ||
queue             924 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
queue             927 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	switch (queue->qid) {
queue             936 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
queue             943 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->rt2x00dev->ops->lib->kick_queue(queue);
queue             950 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_start_queue(struct data_queue *queue)
queue             952 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	mutex_lock(&queue->status_lock);
queue             954 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
queue             955 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	    test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
queue             956 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		mutex_unlock(&queue->status_lock);
queue             960 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	set_bit(QUEUE_PAUSED, &queue->flags);
queue             962 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->rt2x00dev->ops->lib->start_queue(queue);
queue             964 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_unpause_queue(queue);
queue             966 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	mutex_unlock(&queue->status_lock);
queue             970 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_stop_queue(struct data_queue *queue)
queue             972 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	mutex_lock(&queue->status_lock);
queue             974 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
queue             975 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		mutex_unlock(&queue->status_lock);
queue             979 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_pause_queue_nocheck(queue);
queue             981 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->rt2x00dev->ops->lib->stop_queue(queue);
queue             983 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	mutex_unlock(&queue->status_lock);
queue             987 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
queue             990 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		(queue->qid == QID_AC_VO) ||
queue             991 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		(queue->qid == QID_AC_VI) ||
queue             992 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		(queue->qid == QID_AC_BE) ||
queue             993 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		(queue->qid == QID_AC_BK);
queue             995 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (rt2x00queue_empty(queue))
queue            1005 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->rt2x00dev->ops->lib->kick_queue(queue);
queue            1012 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (likely(queue->rt2x00dev->ops->lib->flush_queue))
queue            1013 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
queue            1018 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (unlikely(!rt2x00queue_empty(queue)))
queue            1019 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
queue            1020 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			    queue->qid);
queue            1026 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1032 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_queue_for_each(rt2x00dev, queue)
queue            1033 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_start_queue(queue);
queue            1041 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1051 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_queue_for_each(rt2x00dev, queue)
queue            1052 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_stop_queue(queue);
queue            1060 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1062 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_queue_for_each(rt2x00dev, queue)
queue            1063 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_flush_queue(queue, drop);
queue            1069 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static void rt2x00queue_reset(struct data_queue *queue)
queue            1074 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_irqsave(&queue->index_lock, irqflags);
queue            1076 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->count = 0;
queue            1077 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->length = 0;
queue            1080 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->index[i] = 0;
queue            1082 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
queue            1087 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1090 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue_for_each(rt2x00dev, queue) {
queue            1091 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_reset(queue);
queue            1093 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		for (i = 0; i < queue->limit; i++)
queue            1094 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
queue            1098 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static int rt2x00queue_alloc_entries(struct data_queue *queue)
queue            1104 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00queue_reset(queue);
queue            1109 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entry_size = sizeof(*entries) + queue->priv_size;
queue            1110 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
queue            1118 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	for (i = 0; i < queue->limit; i++) {
queue            1120 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		entries[i].queue = queue;
queue            1124 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
queue            1125 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 					    sizeof(*entries), queue->priv_size);
queue            1130 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->entries = entries;
queue            1135 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static void rt2x00queue_free_skbs(struct data_queue *queue)
queue            1139 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!queue->entries)
queue            1142 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	for (i = 0; i < queue->limit; i++) {
queue            1143 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_free_skb(&queue->entries[i]);
queue            1147 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
queue            1152 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	for (i = 0; i < queue->limit; i++) {
queue            1153 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
queue            1156 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->entries[i].skb = skb;
queue            1164 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1171 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_queue_for_each(rt2x00dev, queue) {
queue            1172 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		status = rt2x00queue_alloc_entries(queue);
queue            1203 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1207 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue_for_each(rt2x00dev, queue) {
queue            1208 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		kfree(queue->entries);
queue            1209 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		queue->entries = NULL;
queue            1214 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 			     struct data_queue *queue, enum data_queue_qid qid)
queue            1216 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	mutex_init(&queue->status_lock);
queue            1217 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_init(&queue->tx_lock);
queue            1218 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	spin_lock_init(&queue->index_lock);
queue            1220 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->rt2x00dev = rt2x00dev;
queue            1221 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->qid = qid;
queue            1222 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->txop = 0;
queue            1223 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->aifs = 2;
queue            1224 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->cw_min = 5;
queue            1225 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->cw_max = 10;
queue            1227 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00dev->ops->queue_init(queue);
queue            1229 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue->threshold = DIV_ROUND_UP(queue->limit, 10);
queue            1234 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	struct data_queue *queue;
queue            1248 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
queue            1249 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	if (!queue)
queue            1255 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00dev->rx = queue;
queue            1256 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00dev->tx = &queue[1];
queue            1257 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
queue            1258 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
queue            1272 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	tx_queue_for_each(rt2x00dev, queue)
queue            1273 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 		rt2x00queue_init(rt2x00dev, queue, qid++);
queue             372 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	struct data_queue *queue;
queue             580 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h bool rt2x00queue_for_each_entry(struct data_queue *queue,
queue             591 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h static inline int rt2x00queue_empty(struct data_queue *queue)
queue             593 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	return queue->length == 0;
queue             600 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h static inline int rt2x00queue_full(struct data_queue *queue)
queue             602 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	return queue->length == queue->limit;
queue             609 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h static inline int rt2x00queue_available(struct data_queue *queue)
queue             611 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	return queue->limit - queue->length;
queue             618 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h static inline int rt2x00queue_threshold(struct data_queue *queue)
queue             620 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h 	return rt2x00queue_available(queue) < queue->threshold;
queue             249 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct data_queue *queue;
queue             252 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	tx_queue_for_each(rt2x00dev, queue) {
queue             253 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		while (!rt2x00queue_empty(queue)) {
queue             254 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue             268 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             295 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             323 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
queue             360 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		skbdesc->desc_len = entry->queue->desc_size;
queue             372 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             382 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (urb->actual_length < entry->queue->desc_size || urb->status)
queue             398 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             409 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			  usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
queue             424 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c void rt2x00usb_kick_queue(struct data_queue *queue)
queue             426 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	switch (queue->qid) {
queue             431 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		if (!rt2x00queue_empty(queue))
queue             432 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			rt2x00queue_for_each_entry(queue,
queue             439 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		if (!rt2x00queue_full(queue))
queue             440 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			rt2x00queue_for_each_entry(queue,
queue             454 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue             466 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if ((entry->queue->qid == QID_BEACON) &&
queue             473 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
queue             479 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
queue             485 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	switch (queue->qid) {
queue             490 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		completion = &queue->rt2x00dev->txdone_work;
queue             493 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		completion = &queue->rt2x00dev->rxdone_work;
queue             505 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		if (rt2x00queue_empty(queue))
queue             512 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		queue_work(queue->rt2x00dev->workqueue, completion);
queue             523 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
queue             525 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
queue             526 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		    queue->qid);
queue             528 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	rt2x00queue_stop_queue(queue);
queue             529 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	rt2x00queue_flush_queue(queue, true);
queue             530 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	rt2x00queue_start_queue(queue);
queue             533 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static int rt2x00usb_dma_timeout(struct data_queue *queue)
queue             537 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
queue             543 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct data_queue *queue;
queue             545 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	tx_queue_for_each(rt2x00dev, queue) {
queue             546 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		if (!rt2x00queue_empty(queue)) {
queue             547 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			if (rt2x00usb_dma_timeout(queue))
queue             548 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 				rt2x00usb_watchdog_tx_dma(queue);
queue             571 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (entry->queue->qid == QID_RX)
queue             576 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static void rt2x00usb_assign_endpoint(struct data_queue *queue,
queue             579 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
queue             582 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	queue->usb_endpoint = usb_endpoint_num(ep_desc);
queue             584 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (queue->qid == QID_RX) {
queue             585 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
queue             586 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
queue             588 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
queue             589 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
queue             592 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (!queue->usb_maxpacket)
queue             593 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		queue->usb_maxpacket = 1;
queue             601 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct data_queue *queue = rt2x00dev->tx;
queue             617 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			   (queue != queue_end(rt2x00dev))) {
queue             618 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			rt2x00usb_assign_endpoint(queue, ep_desc);
queue             619 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			queue = queue_next(queue);
queue             638 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	txall_queue_for_each(rt2x00dev, queue) {
queue             639 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		if (!queue->usb_endpoint)
queue             640 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 			rt2x00usb_assign_endpoint(queue, tx_ep_desc);
queue             646 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static int rt2x00usb_alloc_entries(struct data_queue *queue)
queue             648 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             653 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	for (i = 0; i < queue->limit; i++) {
queue             654 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		entry_priv = queue->entries[i].priv_data;
queue             665 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (queue->qid != QID_BEACON ||
queue             669 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	for (i = 0; i < queue->limit; i++) {
queue             670 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		bcn_priv = queue->entries[i].priv_data;
queue             679 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c static void rt2x00usb_free_entries(struct data_queue *queue)
queue             681 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue             686 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (!queue->entries)
queue             689 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	for (i = 0; i < queue->limit; i++) {
queue             690 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		entry_priv = queue->entries[i].priv_data;
queue             700 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	if (queue->qid != QID_BEACON ||
queue             704 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	for (i = 0; i < queue->limit; i++) {
queue             705 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		bcn_priv = queue->entries[i].priv_data;
queue             713 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct data_queue *queue;
queue             726 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	queue_for_each(rt2x00dev, queue) {
queue             727 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		status = rt2x00usb_alloc_entries(queue);
queue             743 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	struct data_queue *queue;
queue             750 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 	queue_for_each(rt2x00dev, queue)
queue             751 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c 		rt2x00usb_free_entries(queue);
queue             365 drivers/net/wireless/ralink/rt2x00/rt2x00usb.h void rt2x00usb_kick_queue(struct data_queue *queue);
queue             376 drivers/net/wireless/ralink/rt2x00/rt2x00usb.h void rt2x00usb_flush_queue(struct data_queue *queue, bool drop);
queue            1041 drivers/net/wireless/ralink/rt2x00/rt61pci.c static void rt61pci_start_queue(struct data_queue *queue)
queue            1043 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue            1046 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	switch (queue->qid) {
queue            1064 drivers/net/wireless/ralink/rt2x00/rt61pci.c static void rt61pci_kick_queue(struct data_queue *queue)
queue            1066 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue            1069 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	switch (queue->qid) {
queue            1095 drivers/net/wireless/ralink/rt2x00/rt61pci.c static void rt61pci_stop_queue(struct data_queue *queue)
queue            1097 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue            1100 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	switch (queue->qid) {
queue            1292 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	if (entry->queue->qid == QID_RX) {
queue            1310 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	if (entry->queue->qid == QID_RX) {
queue            1787 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
queue            1788 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
queue            1789 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
queue            1790 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
queue            1812 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
queue            1815 drivers/net/wireless/ralink/rt2x00/rt61pci.c 			   TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
queue            1819 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	if (entry->queue->qid != QID_BEACON) {
queue            1865 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
queue            1875 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1939 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1997 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            2061 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct data_queue *queue;
queue            2091 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
queue            2092 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		if (unlikely(!queue))
queue            2100 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		if (unlikely(index >= queue->limit))
queue            2103 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		entry = &queue->entries[index];
queue            2111 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            2120 drivers/net/wireless/ralink/rt2x00/rt61pci.c 			entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
queue            2802 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	struct data_queue *queue;
queue            2825 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue            2833 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&reg, field, queue->txop);
queue            2841 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&reg, field, queue->aifs);
queue            2845 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&reg, field, queue->cw_min);
queue            2849 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	rt2x00_set_field32(&reg, field, queue->cw_max);
queue            2928 drivers/net/wireless/ralink/rt2x00/rt61pci.c static void rt61pci_queue_init(struct data_queue *queue)
queue            2930 drivers/net/wireless/ralink/rt2x00/rt61pci.c 	switch (queue->qid) {
queue            2932 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->limit = 32;
queue            2933 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2934 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->desc_size = RXD_DESC_SIZE;
queue            2935 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            2942 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->limit = 32;
queue            2943 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2944 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->desc_size = TXD_DESC_SIZE;
queue            2945 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            2949 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->limit = 4;
queue            2950 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->data_size = 0; /* No DMA required for beacons */
queue            2951 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->desc_size = TXINFO_SIZE;
queue            2952 drivers/net/wireless/ralink/rt2x00/rt61pci.c 		queue->priv_size = sizeof(struct queue_entry_priv_mmio);
queue            1013 drivers/net/wireless/ralink/rt2x00/rt73usb.c static void rt73usb_start_queue(struct data_queue *queue)
queue            1015 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue            1018 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	switch (queue->qid) {
queue            1036 drivers/net/wireless/ralink/rt2x00/rt73usb.c static void rt73usb_stop_queue(struct data_queue *queue)
queue            1038 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
queue            1041 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	switch (queue->qid) {
queue            1481 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
queue            1482 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
queue            1483 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
queue            1484 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
queue            1506 drivers/net/wireless/ralink/rt2x00/rt73usb.c 			   TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
queue            1524 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1590 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1624 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	length += (4 * !(length % entry->queue->usb_maxpacket));
queue            1670 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
queue            1742 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	skb_pull(entry->skb, entry->queue->desc_size);
queue            2225 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	struct data_queue *queue;
queue            2248 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
queue            2256 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&reg, field, queue->txop);
queue            2264 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&reg, field, queue->aifs);
queue            2268 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&reg, field, queue->cw_min);
queue            2272 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	rt2x00_set_field32(&reg, field, queue->cw_max);
queue            2348 drivers/net/wireless/ralink/rt2x00/rt73usb.c static void rt73usb_queue_init(struct data_queue *queue)
queue            2350 drivers/net/wireless/ralink/rt2x00/rt73usb.c 	switch (queue->qid) {
queue            2352 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->limit = 32;
queue            2353 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2354 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->desc_size = RXD_DESC_SIZE;
queue            2355 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue            2362 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->limit = 32;
queue            2363 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->data_size = DATA_FRAME_SIZE;
queue            2364 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->desc_size = TXD_DESC_SIZE;
queue            2365 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue            2369 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->limit = 4;
queue            2370 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->data_size = MGMT_FRAME_SIZE;
queue            2371 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->desc_size = TXINFO_SIZE;
queue            2372 drivers/net/wireless/ralink/rt2x00/rt73usb.c 		queue->priv_size = sizeof(struct queue_entry_priv_usb);
queue             347 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	while (skb_queue_len(&ring->queue)) {
queue             357 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		skb = __skb_dequeue(&ring->queue);
queue             371 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		if (ring->entries - skb_queue_len(&ring->queue) == 2)
queue             544 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
queue             573 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	__skb_queue_tail(&ring->queue, skb);
queue             574 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	if (ring->entries - skb_queue_len(&ring->queue) < 2)
queue            1088 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	skb_queue_head_init(&priv->tx_ring[prio].queue);
queue            1102 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	while (skb_queue_len(&ring->queue)) {
queue            1104 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue            1375 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
queue            1387 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	params = &priv->queue_param[queue];
queue            1402 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	switch (queue) {
queue            1419 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			    struct ieee80211_vif *vif, u16 queue,
queue            1433 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		priv->queue_param[queue] = *params;
queue            1434 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		rtl8187se_conf_ac_parm(dev, queue);
queue              93 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h 	struct sk_buff_head queue;
queue             199 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			skb_queue_tail(&priv->b_tx_status.queue, skb);
queue             202 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			while (skb_queue_len(&priv->b_tx_status.queue) > 5) {
queue             208 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 				old_skb = skb_dequeue(&priv->b_tx_status.queue);
queue             222 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_queue_tail(&priv->b_tx_status.queue, skb);
queue             508 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags);
queue             510 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) {
queue             532 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			__skb_unlink(skb, &priv->b_tx_status.queue);
queue             539 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		spin_unlock_irqrestore(&priv->b_tx_status.queue.lock, flags);
queue             903 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	length = skb_queue_len(&priv->b_tx_status.queue);
queue             909 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
queue             912 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		old_skb = skb_dequeue(&priv->b_tx_status.queue);
queue            1044 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
queue            1199 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		int queue;
queue            1233 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		for (queue = 0; queue < 4; queue++)
queue            1234 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			rtl818x_iowrite8(priv, (u8 *) rtl8187b_ac_addr[queue],
queue            1235 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 					 priv->aifsn[queue] * priv->slot_time +
queue            1341 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 			   struct ieee80211_vif *vif, u16 queue,
queue            1347 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	if (queue > 3)
queue            1354 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		priv->aifsn[queue] = params->aifs;
queue            1363 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		rtl818x_iowrite32(priv, rtl8187b_ac_addr[queue],
queue            1368 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 		if (queue != 0)
queue            1630 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 	skb_queue_head_init(&priv->b_tx_status.queue);
queue             145 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h 		struct sk_buff_head queue;
queue            4581 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
queue            4585 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	switch (queue) {
queue            4608 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	u32 queue;
queue            4611 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		queue = TXDESC_QUEUE_MGNT;
queue            4613 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
queue            4615 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	return queue;
queue            4919 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	u32 queue, rts_rate;
queue            4965 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	queue = rtl8xxxu_queue_select(hw, skb);
queue            4966 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
queue            5020 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue],
queue            5544 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 			    struct ieee80211_vif *vif, u16 queue,
queue            5562 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 		__func__, queue, val32, param->acm, acm_ctrl);
queue            5564 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c 	switch (queue) {
queue             561 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_queue_head_init(&rtlpriv->tx_report.queue);
queue            1573 drivers/net/wireless/realtek/rtlwifi/base.c 	__skb_queue_tail(&tx_report->queue, skb);
queue            1652 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff_head *queue = &tx_report->queue;
queue            1669 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_queue_walk(queue, skb) {
queue            1672 drivers/net/wireless/realtek/rtlwifi/base.c 			skb_unlink(skb, queue);
queue            1950 drivers/net/wireless/realtek/rtlwifi/base.c 	struct sk_buff_head *queue = &tx_report->queue;
queue            1954 drivers/net/wireless/realtek/rtlwifi/base.c 	skb_queue_walk_safe(queue, skb, tmp) {
queue            1959 drivers/net/wireless/realtek/rtlwifi/base.c 		skb_unlink(skb, queue);
queue             954 drivers/net/wireless/realtek/rtlwifi/core.c static int _rtl_get_hal_qnum(u16 queue)
queue             958 drivers/net/wireless/realtek/rtlwifi/core.c 	switch (queue) {
queue             983 drivers/net/wireless/realtek/rtlwifi/core.c 			  struct ieee80211_vif *vif, u16 queue,
queue             990 drivers/net/wireless/realtek/rtlwifi/core.c 	if (queue >= AC_MAX) {
queue             992 drivers/net/wireless/realtek/rtlwifi/core.c 			 "queue number %d is incorrect!\n", queue);
queue             996 drivers/net/wireless/realtek/rtlwifi/core.c 	aci = _rtl_get_hal_qnum(queue);
queue            1858 drivers/net/wireless/realtek/rtlwifi/core.c 	pskb = __skb_dequeue(&ring->queue);
queue            1867 drivers/net/wireless/realtek/rtlwifi/core.c 	__skb_queue_tail(&ring->queue, skb);
queue             504 drivers/net/wireless/realtek/rtlwifi/pci.c 			    (ring->entries - skb_queue_len(&ring->queue) >
queue             533 drivers/net/wireless/realtek/rtlwifi/pci.c 	while (skb_queue_len(&ring->queue)) {
queue             549 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = __skb_dequeue(&ring->queue);
queue             563 drivers/net/wireless/realtek/rtlwifi/pci.c 			 skb_queue_len(&ring->queue),
queue             610 drivers/net/wireless/realtek/rtlwifi/pci.c 		if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
queue             614 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb_queue_len(&ring->queue));
queue            1089 drivers/net/wireless/realtek/rtlwifi/pci.c 	pskb = __skb_dequeue(&ring->queue);
queue            1116 drivers/net/wireless/realtek/rtlwifi/pci.c 	__skb_queue_tail(&ring->queue, pskb);
queue            1252 drivers/net/wireless/realtek/rtlwifi/pci.c 	skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
queue            1341 drivers/net/wireless/realtek/rtlwifi/pci.c 	while (skb_queue_len(&ring->queue)) {
queue            1343 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue            1520 drivers/net/wireless/realtek/rtlwifi/pci.c 			while (skb_queue_len(&ring->queue)) {
queue            1523 drivers/net/wireless/realtek/rtlwifi/pci.c 					__skb_dequeue(&ring->queue);
queue            1638 drivers/net/wireless/realtek/rtlwifi/pci.c 			idx = (ring->idx + skb_queue_len(&ring->queue)) %
queue            1655 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb_queue_len(&ring->queue));
queue            1677 drivers/net/wireless/realtek/rtlwifi/pci.c 	__skb_queue_tail(&ring->queue, skb);
queue            1687 drivers/net/wireless/realtek/rtlwifi/pci.c 	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
queue            1692 drivers/net/wireless/realtek/rtlwifi/pci.c 			 skb_queue_len(&ring->queue));
queue            1725 drivers/net/wireless/realtek/rtlwifi/pci.c 		queue_len = skb_queue_len(&ring->queue);
queue             150 drivers/net/wireless/realtek/rtlwifi/pci.h 	struct sk_buff_head queue;
queue              74 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	while (skb_queue_len(&ring->queue)) {
queue              76 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue             173 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	u8 queue;
queue             185 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
queue             186 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		ring = &rtlpci->tx_ring[queue];
queue             187 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		if (skb_queue_len(&ring->queue)) {
queue            2227 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			    skb_queue_len(&ring->queue) == 0) {
queue            2234 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 					 skb_queue_len(&ring->queue));
queue            2244 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 					  skb_queue_len(&ring->queue));
queue            2270 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				if (skb_queue_len(&ring->queue) == 0) {
queue            2277 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 						 skb_queue_len(&ring->queue));
queue            2287 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 						 skb_queue_len(&ring->queue));
queue             497 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 				    skb_queue_len(&ring->queue) == 0) {
queue             504 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 						 skb_queue_len(&ring->queue));
queue             514 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 						 skb_queue_len(&ring->queue));
queue             467 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c 			    u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
queue             480 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c 	rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
queue              33 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h 							u16 queue,
queue             419 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			if (skb_queue_len(&ring->queue) == 0 ||
queue             428 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
queue             437 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
queue             462 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
queue             469 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
queue             478 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
queue             466 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	pskb = __skb_dequeue(&ring->queue);
queue             473 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	__skb_queue_tail(&ring->queue, skb);
queue            3116 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			if (skb_queue_len(&ring->queue) == 0 ||
queue            3129 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 					 skb_queue_len(&ring->queue));
queue            3138 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 					 skb_queue_len(&ring->queue));
queue             150 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 	u8 queue;
queue             163 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 	for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
queue             164 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 		ring = &rtlpci->tx_ring[queue];
queue             165 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 		if (skb_queue_len(&ring->queue)) {
queue            3097 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			    skb_queue_len(&ring->queue) == 0) {
queue            3104 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					 skb_queue_len(&ring->queue));
queue            3114 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					  skb_queue_len(&ring->queue));
queue            3140 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
queue            3147 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					 skb_queue_len(&ring->queue));
queue            3156 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					  skb_queue_len(&ring->queue));
queue             123 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
queue             126 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	__skb_queue_tail(&ring->queue, skb);
queue             584 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 				if (skb_queue_len(&ring->queue) == 0 ||
queue             592 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 						 skb_queue_len(&ring->queue));
queue             603 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 						 skb_queue_len(&ring->queue));
queue            1631 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			    skb_queue_len(&ring->queue) == 0) {
queue            1638 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 					 skb_queue_len(&ring->queue));
queue            1648 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 					  skb_queue_len(&ring->queue));
queue              36 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	while (skb_queue_len(&ring->queue)) {
queue              38 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue             176 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	u8 queue;
queue             188 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
queue             189 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		ring = &rtlpci->tx_ring[queue];
queue             190 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		if (skb_queue_len(&ring->queue)) {
queue            2560 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			    skb_queue_len(&ring->queue) == 0) {
queue            2567 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
queue            2577 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					  skb_queue_len(&ring->queue));
queue            2604 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
queue            2611 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
queue            2621 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
queue             231 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	pskb = __skb_dequeue(&ring->queue);
queue             241 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	__skb_queue_tail(&ring->queue, skb);
queue              32 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	while (skb_queue_len(&ring->queue)) {
queue              34 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue             174 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	u8 queue;
queue             186 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
queue             187 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		ring = &rtlpci->tx_ring[queue];
queue             188 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		if (skb_queue_len(&ring->queue)) {
queue            4771 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 			    skb_queue_len(&ring->queue) == 0) {
queue            4778 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 					 skb_queue_len(&ring->queue));
queue            4788 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 					  skb_queue_len(&ring->queue));
queue            2001 drivers/net/wireless/realtek/rtlwifi/wifi.h 	struct sk_buff_head queue;
queue             455 drivers/net/wireless/realtek/rtw88/coex.c 	skb_queue_tail(&coex->queue, skb);
queue             469 drivers/net/wireless/realtek/rtw88/coex.c 	if (!wait_event_timeout(coex->wait, !skb_queue_empty(&coex->queue),
queue             475 drivers/net/wireless/realtek/rtw88/coex.c 	skb_resp = skb_dequeue(&coex->queue);
queue            1161 drivers/net/wireless/realtek/rtw88/main.c 	skb_queue_head_init(&rtwdev->coex.queue);
queue            1162 drivers/net/wireless/realtek/rtw88/main.c 	skb_queue_head_init(&rtwdev->tx_report.queue);
queue            1208 drivers/net/wireless/realtek/rtw88/main.c 	skb_queue_purge(&rtwdev->tx_report.queue);
queue             574 drivers/net/wireless/realtek/rtw88/main.h 	struct sk_buff_head queue;
queue            1065 drivers/net/wireless/realtek/rtw88/main.h 	struct sk_buff_head queue;
queue              28 drivers/net/wireless/realtek/rtw88/pci.c static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
queue              30 drivers/net/wireless/realtek/rtw88/pci.c 	switch (queue) {
queue             102 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
queue             103 drivers/net/wireless/realtek/rtw88/pci.c 		__skb_unlink(skb, &tx_ring->queue);
queue             193 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_head_init(&tx_ring->queue);
queue             506 drivers/net/wireless/realtek/rtw88/pci.c 	u8 queue;
queue             508 drivers/net/wireless/realtek/rtw88/pci.c 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
queue             509 drivers/net/wireless/realtek/rtw88/pci.c 		tx_ring = &rtwpci->tx_rings[queue];
queue             551 drivers/net/wireless/realtek/rtw88/pci.c 	u8 queue;
queue             554 drivers/net/wireless/realtek/rtw88/pci.c 		queue = RTW_TX_QUEUE_BCN;
queue             556 drivers/net/wireless/realtek/rtw88/pci.c 		queue = RTW_TX_QUEUE_MGMT;
queue             558 drivers/net/wireless/realtek/rtw88/pci.c 		queue = ac_to_hwq[IEEE80211_AC_BE];
queue             560 drivers/net/wireless/realtek/rtw88/pci.c 		queue = ac_to_hwq[q_mapping];
queue             562 drivers/net/wireless/realtek/rtw88/pci.c 	return queue;
queue             568 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *prev = skb_dequeue(&ring->queue);
queue             605 drivers/net/wireless/realtek/rtw88/pci.c 			struct sk_buff *skb, u8 queue)
queue             620 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->tx_rings[queue];
queue             624 drivers/net/wireless/realtek/rtw88/pci.c 	if (queue == RTW_TX_QUEUE_BCN)
queue             631 drivers/net/wireless/realtek/rtw88/pci.c 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
queue             642 drivers/net/wireless/realtek/rtw88/pci.c 	if (queue == RTW_TX_QUEUE_BCN)
queue             654 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_tail(&ring->queue, skb);
queue             657 drivers/net/wireless/realtek/rtw88/pci.c 	if (queue != RTW_TX_QUEUE_BCN) {
queue             660 drivers/net/wireless/realtek/rtw88/pci.c 		bd_idx = rtw_pci_tx_queue_idx_addr[queue];
queue             723 drivers/net/wireless/realtek/rtw88/pci.c 	u8 queue = rtw_hw_queue_mapping(skb);
queue             726 drivers/net/wireless/realtek/rtw88/pci.c 	ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
queue             730 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->tx_rings[queue];
queue             764 drivers/net/wireless/realtek/rtw88/pci.c 		skb = skb_dequeue(&ring->queue);
queue             165 drivers/net/wireless/realtek/rtw88/pci.h 	struct sk_buff_head queue;
queue             197 drivers/net/wireless/realtek/rtw88/pci.h static u32 max_num_of_tx_queue(u8 queue)
queue             201 drivers/net/wireless/realtek/rtw88/pci.h 	switch (queue) {
queue             153 drivers/net/wireless/realtek/rtw88/tx.c 	if (skb_queue_len(&tx_report->queue) == 0)
queue             159 drivers/net/wireless/realtek/rtw88/tx.c 	skb_queue_purge(&tx_report->queue);
queue             174 drivers/net/wireless/realtek/rtw88/tx.c 	__skb_queue_tail(&tx_report->queue, skb);
queue             211 drivers/net/wireless/realtek/rtw88/tx.c 	skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
queue             214 drivers/net/wireless/realtek/rtw88/tx.c 			__skb_unlink(cur, &tx_report->queue);
queue             885 drivers/net/wireless/rsi/rsi_91x_mac80211.c 				struct ieee80211_vif *vif, u16 queue,
queue             892 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	if (queue >= IEEE80211_NUM_ACS)
queue             897 drivers/net/wireless/rsi/rsi_91x_mac80211.c 		__func__, queue, params->aifs,
queue             902 drivers/net/wireless/rsi/rsi_91x_mac80211.c 	switch (queue) {
queue              26 drivers/net/wireless/st/cw1200/queue.c static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
queue              28 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue              29 drivers/net/wireless/st/cw1200/queue.c 	if (queue->tx_locked_cnt++ == 0) {
queue              31 drivers/net/wireless/st/cw1200/queue.c 			 queue->queue_id);
queue              32 drivers/net/wireless/st/cw1200/queue.c 		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
queue              36 drivers/net/wireless/st/cw1200/queue.c static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
queue              38 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue              39 drivers/net/wireless/st/cw1200/queue.c 	BUG_ON(!queue->tx_locked_cnt);
queue              40 drivers/net/wireless/st/cw1200/queue.c 	if (--queue->tx_locked_cnt == 0) {
queue              42 drivers/net/wireless/st/cw1200/queue.c 			 queue->queue_id);
queue              43 drivers/net/wireless/st/cw1200/queue.c 		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
queue              89 drivers/net/wireless/st/cw1200/queue.c static void __cw1200_queue_gc(struct cw1200_queue *queue,
queue              93 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue              97 drivers/net/wireless/st/cw1200/queue.c 	list_for_each_entry_safe(item, tmp, &queue->queue, head) {
queue              98 drivers/net/wireless/st/cw1200/queue.c 		if (jiffies - item->queue_timestamp < queue->ttl)
queue             100 drivers/net/wireless/st/cw1200/queue.c 		--queue->num_queued;
queue             101 drivers/net/wireless/st/cw1200/queue.c 		--queue->link_map_cache[item->txpriv.link_id];
queue             110 drivers/net/wireless/st/cw1200/queue.c 		list_move_tail(&item->head, &queue->free_pool);
queue             116 drivers/net/wireless/st/cw1200/queue.c 	if (queue->overfull) {
queue             117 drivers/net/wireless/st/cw1200/queue.c 		if (queue->num_queued <= (queue->capacity >> 1)) {
queue             118 drivers/net/wireless/st/cw1200/queue.c 			queue->overfull = false;
queue             120 drivers/net/wireless/st/cw1200/queue.c 				__cw1200_queue_unlock(queue);
queue             122 drivers/net/wireless/st/cw1200/queue.c 			unsigned long tmo = item->queue_timestamp + queue->ttl;
queue             123 drivers/net/wireless/st/cw1200/queue.c 			mod_timer(&queue->gc, tmo);
queue             133 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue *queue =
queue             134 drivers/net/wireless/st/cw1200/queue.c 		from_timer(queue, t, gc);
queue             136 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             137 drivers/net/wireless/st/cw1200/queue.c 	__cw1200_queue_gc(queue, &list, true);
queue             138 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             139 drivers/net/wireless/st/cw1200/queue.c 	cw1200_queue_post_gc(queue->stats, &list);
queue             162 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_init(struct cw1200_queue *queue,
queue             170 drivers/net/wireless/st/cw1200/queue.c 	memset(queue, 0, sizeof(*queue));
queue             171 drivers/net/wireless/st/cw1200/queue.c 	queue->stats = stats;
queue             172 drivers/net/wireless/st/cw1200/queue.c 	queue->capacity = capacity;
queue             173 drivers/net/wireless/st/cw1200/queue.c 	queue->queue_id = queue_id;
queue             174 drivers/net/wireless/st/cw1200/queue.c 	queue->ttl = ttl;
queue             175 drivers/net/wireless/st/cw1200/queue.c 	INIT_LIST_HEAD(&queue->queue);
queue             176 drivers/net/wireless/st/cw1200/queue.c 	INIT_LIST_HEAD(&queue->pending);
queue             177 drivers/net/wireless/st/cw1200/queue.c 	INIT_LIST_HEAD(&queue->free_pool);
queue             178 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_init(&queue->lock);
queue             179 drivers/net/wireless/st/cw1200/queue.c 	timer_setup(&queue->gc, cw1200_queue_gc, 0);
queue             181 drivers/net/wireless/st/cw1200/queue.c 	queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
queue             183 drivers/net/wireless/st/cw1200/queue.c 	if (!queue->pool)
queue             186 drivers/net/wireless/st/cw1200/queue.c 	queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
queue             188 drivers/net/wireless/st/cw1200/queue.c 	if (!queue->link_map_cache) {
queue             189 drivers/net/wireless/st/cw1200/queue.c 		kfree(queue->pool);
queue             190 drivers/net/wireless/st/cw1200/queue.c 		queue->pool = NULL;
queue             195 drivers/net/wireless/st/cw1200/queue.c 		list_add_tail(&queue->pool[i].head, &queue->free_pool);
queue             200 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_clear(struct cw1200_queue *queue)
queue             204 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             207 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             208 drivers/net/wireless/st/cw1200/queue.c 	queue->generation++;
queue             209 drivers/net/wireless/st/cw1200/queue.c 	list_splice_tail_init(&queue->queue, &queue->pending);
queue             210 drivers/net/wireless/st/cw1200/queue.c 	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
queue             214 drivers/net/wireless/st/cw1200/queue.c 		list_move_tail(&item->head, &queue->free_pool);
queue             216 drivers/net/wireless/st/cw1200/queue.c 	queue->num_queued = 0;
queue             217 drivers/net/wireless/st/cw1200/queue.c 	queue->num_pending = 0;
queue             221 drivers/net/wireless/st/cw1200/queue.c 		stats->num_queued -= queue->link_map_cache[i];
queue             222 drivers/net/wireless/st/cw1200/queue.c 		stats->link_map_cache[i] -= queue->link_map_cache[i];
queue             223 drivers/net/wireless/st/cw1200/queue.c 		queue->link_map_cache[i] = 0;
queue             226 drivers/net/wireless/st/cw1200/queue.c 	if (queue->overfull) {
queue             227 drivers/net/wireless/st/cw1200/queue.c 		queue->overfull = false;
queue             228 drivers/net/wireless/st/cw1200/queue.c 		__cw1200_queue_unlock(queue);
queue             230 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             242 drivers/net/wireless/st/cw1200/queue.c void cw1200_queue_deinit(struct cw1200_queue *queue)
queue             244 drivers/net/wireless/st/cw1200/queue.c 	cw1200_queue_clear(queue);
queue             245 drivers/net/wireless/st/cw1200/queue.c 	del_timer_sync(&queue->gc);
queue             246 drivers/net/wireless/st/cw1200/queue.c 	INIT_LIST_HEAD(&queue->free_pool);
queue             247 drivers/net/wireless/st/cw1200/queue.c 	kfree(queue->pool);
queue             248 drivers/net/wireless/st/cw1200/queue.c 	kfree(queue->link_map_cache);
queue             249 drivers/net/wireless/st/cw1200/queue.c 	queue->pool = NULL;
queue             250 drivers/net/wireless/st/cw1200/queue.c 	queue->link_map_cache = NULL;
queue             251 drivers/net/wireless/st/cw1200/queue.c 	queue->capacity = 0;
queue             254 drivers/net/wireless/st/cw1200/queue.c size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
queue             259 drivers/net/wireless/st/cw1200/queue.c 	size_t map_capacity = queue->stats->map_capacity;
queue             264 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             266 drivers/net/wireless/st/cw1200/queue.c 		ret = queue->num_queued - queue->num_pending;
queue             271 drivers/net/wireless/st/cw1200/queue.c 				ret += queue->link_map_cache[i];
queue             274 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             278 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_put(struct cw1200_queue *queue,
queue             283 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             285 drivers/net/wireless/st/cw1200/queue.c 	if (txpriv->link_id >= queue->stats->map_capacity)
queue             288 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             289 drivers/net/wireless/st/cw1200/queue.c 	if (!WARN_ON(list_empty(&queue->free_pool))) {
queue             291 drivers/net/wireless/st/cw1200/queue.c 			&queue->free_pool, struct cw1200_queue_item, head);
queue             294 drivers/net/wireless/st/cw1200/queue.c 		list_move_tail(&item->head, &queue->queue);
queue             298 drivers/net/wireless/st/cw1200/queue.c 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
queue             299 drivers/net/wireless/st/cw1200/queue.c 							    queue->queue_id,
queue             301 drivers/net/wireless/st/cw1200/queue.c 							    item - queue->pool);
queue             304 drivers/net/wireless/st/cw1200/queue.c 		++queue->num_queued;
queue             305 drivers/net/wireless/st/cw1200/queue.c 		++queue->link_map_cache[txpriv->link_id];
queue             315 drivers/net/wireless/st/cw1200/queue.c 		if (queue->overfull == false &&
queue             316 drivers/net/wireless/st/cw1200/queue.c 		    queue->num_queued >=
queue             317 drivers/net/wireless/st/cw1200/queue.c 		    (queue->capacity - (num_present_cpus() - 1))) {
queue             318 drivers/net/wireless/st/cw1200/queue.c 			queue->overfull = true;
queue             319 drivers/net/wireless/st/cw1200/queue.c 			__cw1200_queue_lock(queue);
queue             320 drivers/net/wireless/st/cw1200/queue.c 			mod_timer(&queue->gc, jiffies);
queue             325 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             329 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_get(struct cw1200_queue *queue,
queue             337 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             340 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             341 drivers/net/wireless/st/cw1200/queue.c 	list_for_each_entry(item, &queue->queue, head) {
queue             353 drivers/net/wireless/st/cw1200/queue.c 		list_move_tail(&item->head, &queue->pending);
queue             354 drivers/net/wireless/st/cw1200/queue.c 		++queue->num_pending;
queue             355 drivers/net/wireless/st/cw1200/queue.c 		--queue->link_map_cache[item->txpriv.link_id];
queue             364 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             370 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
queue             375 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             380 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
queue             382 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             383 drivers/net/wireless/st/cw1200/queue.c 	BUG_ON(queue_id != queue->queue_id);
queue             384 drivers/net/wireless/st/cw1200/queue.c 	if (queue_generation != queue->generation) {
queue             386 drivers/net/wireless/st/cw1200/queue.c 	} else if (item_id >= (unsigned) queue->capacity) {
queue             393 drivers/net/wireless/st/cw1200/queue.c 		--queue->num_pending;
queue             394 drivers/net/wireless/st/cw1200/queue.c 		++queue->link_map_cache[item->txpriv.link_id];
queue             406 drivers/net/wireless/st/cw1200/queue.c 		list_move(&item->head, &queue->queue);
queue             408 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             412 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_requeue_all(struct cw1200_queue *queue)
queue             415 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             416 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             418 drivers/net/wireless/st/cw1200/queue.c 	list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
queue             419 drivers/net/wireless/st/cw1200/queue.c 		--queue->num_pending;
queue             420 drivers/net/wireless/st/cw1200/queue.c 		++queue->link_map_cache[item->txpriv.link_id];
queue             428 drivers/net/wireless/st/cw1200/queue.c 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
queue             429 drivers/net/wireless/st/cw1200/queue.c 							    queue->queue_id,
queue             431 drivers/net/wireless/st/cw1200/queue.c 							    item - queue->pool);
queue             432 drivers/net/wireless/st/cw1200/queue.c 		list_move(&item->head, &queue->queue);
queue             434 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             439 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
queue             444 drivers/net/wireless/st/cw1200/queue.c 	struct cw1200_queue_stats *stats = queue->stats;
queue             451 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
queue             453 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             454 drivers/net/wireless/st/cw1200/queue.c 	BUG_ON(queue_id != queue->queue_id);
queue             455 drivers/net/wireless/st/cw1200/queue.c 	if (queue_generation != queue->generation) {
queue             457 drivers/net/wireless/st/cw1200/queue.c 	} else if (item_id >= (unsigned) queue->capacity) {
queue             467 drivers/net/wireless/st/cw1200/queue.c 		--queue->num_pending;
queue             468 drivers/net/wireless/st/cw1200/queue.c 		--queue->num_queued;
queue             469 drivers/net/wireless/st/cw1200/queue.c 		++queue->num_sent;
queue             474 drivers/net/wireless/st/cw1200/queue.c 		list_move(&item->head, &queue->free_pool);
queue             476 drivers/net/wireless/st/cw1200/queue.c 		if (queue->overfull &&
queue             477 drivers/net/wireless/st/cw1200/queue.c 		    (queue->num_queued <= (queue->capacity >> 1))) {
queue             478 drivers/net/wireless/st/cw1200/queue.c 			queue->overfull = false;
queue             479 drivers/net/wireless/st/cw1200/queue.c 			__cw1200_queue_unlock(queue);
queue             482 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             490 drivers/net/wireless/st/cw1200/queue.c int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
queue             500 drivers/net/wireless/st/cw1200/queue.c 	item = &queue->pool[item_id];
queue             502 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             503 drivers/net/wireless/st/cw1200/queue.c 	BUG_ON(queue_id != queue->queue_id);
queue             504 drivers/net/wireless/st/cw1200/queue.c 	if (queue_generation != queue->generation) {
queue             506 drivers/net/wireless/st/cw1200/queue.c 	} else if (item_id >= (unsigned) queue->capacity) {
queue             516 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             520 drivers/net/wireless/st/cw1200/queue.c void cw1200_queue_lock(struct cw1200_queue *queue)
queue             522 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             523 drivers/net/wireless/st/cw1200/queue.c 	__cw1200_queue_lock(queue);
queue             524 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             527 drivers/net/wireless/st/cw1200/queue.c void cw1200_queue_unlock(struct cw1200_queue *queue)
queue             529 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             530 drivers/net/wireless/st/cw1200/queue.c 	__cw1200_queue_unlock(queue);
queue             531 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue             534 drivers/net/wireless/st/cw1200/queue.c bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
queue             541 drivers/net/wireless/st/cw1200/queue.c 	spin_lock_bh(&queue->lock);
queue             542 drivers/net/wireless/st/cw1200/queue.c 	ret = !list_empty(&queue->pending);
queue             544 drivers/net/wireless/st/cw1200/queue.c 		list_for_each_entry(item, &queue->pending, head) {
queue             551 drivers/net/wireless/st/cw1200/queue.c 	spin_unlock_bh(&queue->lock);
queue              33 drivers/net/wireless/st/cw1200/queue.h 	struct list_head	queue;
queue              68 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_init(struct cw1200_queue *queue,
queue              73 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_clear(struct cw1200_queue *queue);
queue              75 drivers/net/wireless/st/cw1200/queue.h void cw1200_queue_deinit(struct cw1200_queue *queue);
queue              77 drivers/net/wireless/st/cw1200/queue.h size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
queue              79 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_put(struct cw1200_queue *queue,
queue              82 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_get(struct cw1200_queue *queue,
queue              87 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
queue              88 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_requeue_all(struct cw1200_queue *queue);
queue              89 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_remove(struct cw1200_queue *queue,
queue              91 drivers/net/wireless/st/cw1200/queue.h int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
queue              94 drivers/net/wireless/st/cw1200/queue.h void cw1200_queue_lock(struct cw1200_queue *queue);
queue              95 drivers/net/wireless/st/cw1200/queue.h void cw1200_queue_unlock(struct cw1200_queue *queue);
queue              96 drivers/net/wireless/st/cw1200/queue.h bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
queue             361 drivers/net/wireless/st/cw1200/scan.c 	struct cw1200_queue *queue = &priv->tx_queue[queue_id];
queue             399 drivers/net/wireless/st/cw1200/scan.c 	if (cw1200_queue_get_skb(queue,	priv->pending_frame_id,
queue             457 drivers/net/wireless/st/cw1200/scan.c 	BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id));
queue             609 drivers/net/wireless/st/cw1200/sta.c 		   u16 queue, const struct ieee80211_tx_queue_params *params)
queue             618 drivers/net/wireless/st/cw1200/sta.c 	if (queue < dev->queues) {
queue             621 drivers/net/wireless/st/cw1200/sta.c 		WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
queue             623 drivers/net/wireless/st/cw1200/sta.c 					      &priv->tx_queue_params.params[queue], queue);
queue             629 drivers/net/wireless/st/cw1200/sta.c 		WSM_EDCA_SET(&priv->edca, queue, params->aifs,
queue             846 drivers/net/wireless/st/cw1200/sta.c 	struct cw1200_queue *queue = &priv->tx_queue[queue_id];
queue             855 drivers/net/wireless/st/cw1200/sta.c 	cw1200_queue_requeue(queue, priv->pending_frame_id);
queue              31 drivers/net/wireless/st/cw1200/sta.h 		   u16 queue, const struct ieee80211_tx_queue_params *params);
queue             402 drivers/net/wireless/st/cw1200/txrx.c 	unsigned queue;
queue             469 drivers/net/wireless/st/cw1200/txrx.c 	if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
queue             579 drivers/net/wireless/st/cw1200/txrx.c 	wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
queue             710 drivers/net/wireless/st/cw1200/txrx.c 		.queue = skb_get_queue_mapping(skb),
queue             732 drivers/net/wireless/st/cw1200/txrx.c 	if (WARN_ON(t.queue >= 4))
queue             740 drivers/net/wireless/st/cw1200/txrx.c 		 skb->len, t.queue, t.txpriv.link_id,
queue             771 drivers/net/wireless/st/cw1200/txrx.c 		BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
queue             855 drivers/net/wireless/st/cw1200/txrx.c 	struct cw1200_queue *queue = &priv->tx_queue[queue_id];
queue             886 drivers/net/wireless/st/cw1200/txrx.c 		cw1200_queue_requeue(queue, arg->packet_id);
queue             896 drivers/net/wireless/st/cw1200/txrx.c 	} else if (!cw1200_queue_get_skb(queue, arg->packet_id,
queue             956 drivers/net/wireless/st/cw1200/txrx.c 		cw1200_queue_remove(queue, arg->packet_id);
queue            1060 drivers/net/wireless/st/cw1200/wsm.c 	arg.queue = (flags >> 1) & 3;
queue            1459 drivers/net/wireless/st/cw1200/wsm.c 			       struct cw1200_queue *queue)
queue            1556 drivers/net/wireless/st/cw1200/wsm.c 		BUG_ON(cw1200_queue_remove(queue, wsm->packet_id));
queue            1665 drivers/net/wireless/st/cw1200/wsm.c 	struct cw1200_queue *queue = NULL;
queue            1691 drivers/net/wireless/st/cw1200/wsm.c 			ret = wsm_get_tx_queue_and_mask(priv, &queue,
queue            1693 drivers/net/wireless/st/cw1200/wsm.c 			queue_num = queue - priv->tx_queue;
queue            1711 drivers/net/wireless/st/cw1200/wsm.c 			if (cw1200_queue_get(queue,
queue            1717 drivers/net/wireless/st/cw1200/wsm.c 					       tx_info, txpriv, queue))
queue            1732 drivers/net/wireless/st/cw1200/wsm.c 					     (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1);
queue            1099 drivers/net/wireless/st/cw1200/wsm.h #define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
queue            1102 drivers/net/wireless/st/cw1200/wsm.h 	struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
queue            1256 drivers/net/wireless/st/cw1200/wsm.h 	/* [out] */ int queue;
queue            1023 drivers/net/wireless/ti/wl1251/acx.c int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
queue            1032 drivers/net/wireless/ti/wl1251/acx.c 		     "ps_scheme %d ack_policy %d", queue, type, tsid,
queue            1039 drivers/net/wireless/ti/wl1251/acx.c 	acx->queue = queue;
queue            1296 drivers/net/wireless/ti/wl1251/acx.h 	u8 queue;
queue            1480 drivers/net/wireless/ti/wl1251/acx.h int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
queue            1285 drivers/net/wireless/ti/wl1251/main.c 			     struct ieee80211_vif *vif, u16 queue,
queue            1294 drivers/net/wireless/ti/wl1251/main.c 	wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
queue            1301 drivers/net/wireless/ti/wl1251/main.c 	ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
queue            1312 drivers/net/wireless/ti/wl1251/main.c 	ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
queue            1314 drivers/net/wireless/ti/wl1251/main.c 				 wl1251_tx_get_queue(queue), ps_scheme,
queue             197 drivers/net/wireless/ti/wl1251/tx.h static inline int wl1251_tx_get_queue(int queue)
queue             199 drivers/net/wireless/ti/wl1251/tx.h 	switch (queue) {
queue            4915 drivers/net/wireless/ti/wlcore/main.c 			     struct ieee80211_vif *vif, u16 queue,
queue            4928 drivers/net/wireless/ti/wlcore/main.c 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
queue            4948 drivers/net/wireless/ti/wlcore/main.c 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
queue            4954 drivers/net/wireless/ti/wlcore/main.c 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
queue            4956 drivers/net/wireless/ti/wlcore/main.c 				 wl1271_tx_get_queue(queue),
queue            1198 drivers/net/wireless/ti/wlcore/tx.c 			      u8 queue, enum wlcore_queue_stop_reason reason)
queue            1200 drivers/net/wireless/ti/wlcore/tx.c 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
queue            1212 drivers/net/wireless/ti/wlcore/tx.c void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
queue            1218 drivers/net/wireless/ti/wlcore/tx.c 	wlcore_stop_queue_locked(wl, wlvif, queue, reason);
queue            1222 drivers/net/wireless/ti/wlcore/tx.c void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
queue            1226 drivers/net/wireless/ti/wlcore/tx.c 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
queue            1285 drivers/net/wireless/ti/wlcore/tx.c 				       struct wl12xx_vif *wlvif, u8 queue,
queue            1292 drivers/net/wireless/ti/wlcore/tx.c 	stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
queue            1300 drivers/net/wireless/ti/wlcore/tx.c 				       struct wl12xx_vif *wlvif, u8 queue,
queue            1303 drivers/net/wireless/ti/wlcore/tx.c 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
queue            1310 drivers/net/wireless/ti/wlcore/tx.c 				    u8 queue)
queue            1312 drivers/net/wireless/ti/wlcore/tx.c 	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
queue             184 drivers/net/wireless/ti/wlcore/tx.h static inline int wl1271_tx_get_queue(int queue)
queue             186 drivers/net/wireless/ti/wlcore/tx.h 	switch (queue) {
queue             201 drivers/net/wireless/ti/wlcore/tx.h int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue)
queue             205 drivers/net/wireless/ti/wlcore/tx.h 	switch (queue) {
queue             249 drivers/net/wireless/ti/wlcore/tx.h 			      u8 queue, enum wlcore_queue_stop_reason reason);
queue             250 drivers/net/wireless/ti/wlcore/tx.h void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
queue             252 drivers/net/wireless/ti/wlcore/tx.h void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
queue             259 drivers/net/wireless/ti/wlcore/tx.h 				       struct wl12xx_vif *wlvif, u8 queue,
queue             264 drivers/net/wireless/ti/wlcore/tx.h 					 u8 queue,
queue             267 drivers/net/wireless/ti/wlcore/tx.h 				    u8 queue);
queue             335 drivers/net/xen-netback/common.h int xenvif_init_queue(struct xenvif_queue *queue);
queue             336 drivers/net/xen-netback/common.h void xenvif_deinit_queue(struct xenvif_queue *queue);
queue             338 drivers/net/xen-netback/common.h int xenvif_connect_data(struct xenvif_queue *queue,
queue             354 drivers/net/xen-netback/common.h int xenvif_queue_stopped(struct xenvif_queue *queue);
queue             355 drivers/net/xen-netback/common.h void xenvif_wake_queue(struct xenvif_queue *queue);
queue             358 drivers/net/xen-netback/common.h void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
queue             359 drivers/net/xen-netback/common.h int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
queue             364 drivers/net/xen-netback/common.h void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
queue             369 drivers/net/xen-netback/common.h int xenvif_tx_action(struct xenvif_queue *queue, int budget);
queue             372 drivers/net/xen-netback/common.h void xenvif_kick_thread(struct xenvif_queue *queue);
queue             378 drivers/net/xen-netback/common.h void xenvif_rx_action(struct xenvif_queue *queue);
queue             379 drivers/net/xen-netback/common.h void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
queue             387 drivers/net/xen-netback/common.h void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
queue             389 drivers/net/xen-netback/common.h static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
queue             392 drivers/net/xen-netback/common.h 		queue->pending_prod + queue->pending_cons;
queue             408 drivers/net/xen-netback/common.h void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
queue             410 drivers/net/xen-netback/common.h void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
queue              55 drivers/net/xen-netback/interface.c void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
queue              59 drivers/net/xen-netback/interface.c 	atomic_inc(&queue->inflight_packets);
queue              62 drivers/net/xen-netback/interface.c void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
queue              64 drivers/net/xen-netback/interface.c 	atomic_dec(&queue->inflight_packets);
queue              70 drivers/net/xen-netback/interface.c 	wake_up(&queue->dealloc_wq);
queue              82 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = dev_id;
queue              84 drivers/net/xen-netback/interface.c 	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
queue              85 drivers/net/xen-netback/interface.c 		napi_schedule(&queue->napi);
queue              92 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue =
queue             100 drivers/net/xen-netback/interface.c 	if (unlikely(queue->vif->disabled)) {
queue             105 drivers/net/xen-netback/interface.c 	work_done = xenvif_tx_action(queue, budget);
queue             112 drivers/net/xen-netback/interface.c 		if (likely(!queue->rate_limited))
queue             113 drivers/net/xen-netback/interface.c 			xenvif_napi_schedule_or_enable_events(queue);
queue             121 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = dev_id;
queue             123 drivers/net/xen-netback/interface.c 	xenvif_kick_thread(queue);
queue             136 drivers/net/xen-netback/interface.c int xenvif_queue_stopped(struct xenvif_queue *queue)
queue             138 drivers/net/xen-netback/interface.c 	struct net_device *dev = queue->vif->dev;
queue             139 drivers/net/xen-netback/interface.c 	unsigned int id = queue->id;
queue             143 drivers/net/xen-netback/interface.c void xenvif_wake_queue(struct xenvif_queue *queue)
queue             145 drivers/net/xen-netback/interface.c 	struct net_device *dev = queue->vif->dev;
queue             146 drivers/net/xen-netback/interface.c 	unsigned int id = queue->id;
queue             180 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = NULL;
queue             202 drivers/net/xen-netback/interface.c 	queue = &vif->queues[index];
queue             205 drivers/net/xen-netback/interface.c 	if (queue->task == NULL ||
queue             206 drivers/net/xen-netback/interface.c 	    queue->dealloc_task == NULL ||
queue             227 drivers/net/xen-netback/interface.c 	xenvif_rx_queue_tail(queue, skb);
queue             228 drivers/net/xen-netback/interface.c 	xenvif_kick_thread(queue);
queue             241 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = NULL;
queue             254 drivers/net/xen-netback/interface.c 		queue = &vif->queues[index];
queue             255 drivers/net/xen-netback/interface.c 		rx_bytes += queue->stats.rx_bytes;
queue             256 drivers/net/xen-netback/interface.c 		rx_packets += queue->stats.rx_packets;
queue             257 drivers/net/xen-netback/interface.c 		tx_bytes += queue->stats.tx_bytes;
queue             258 drivers/net/xen-netback/interface.c 		tx_packets += queue->stats.tx_packets;
queue             273 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = NULL;
queue             278 drivers/net/xen-netback/interface.c 		queue = &vif->queues[queue_index];
queue             279 drivers/net/xen-netback/interface.c 		napi_enable(&queue->napi);
queue             280 drivers/net/xen-netback/interface.c 		enable_irq(queue->tx_irq);
queue             281 drivers/net/xen-netback/interface.c 		if (queue->tx_irq != queue->rx_irq)
queue             282 drivers/net/xen-netback/interface.c 			enable_irq(queue->rx_irq);
queue             283 drivers/net/xen-netback/interface.c 		xenvif_napi_schedule_or_enable_events(queue);
queue             289 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = NULL;
queue             294 drivers/net/xen-netback/interface.c 		queue = &vif->queues[queue_index];
queue             295 drivers/net/xen-netback/interface.c 		disable_irq(queue->tx_irq);
queue             296 drivers/net/xen-netback/interface.c 		if (queue->tx_irq != queue->rx_irq)
queue             297 drivers/net/xen-netback/interface.c 			disable_irq(queue->rx_irq);
queue             298 drivers/net/xen-netback/interface.c 		napi_disable(&queue->napi);
queue             299 drivers/net/xen-netback/interface.c 		del_timer_sync(&queue->credit_timeout);
queue             526 drivers/net/xen-netback/interface.c int xenvif_init_queue(struct xenvif_queue *queue)
queue             530 drivers/net/xen-netback/interface.c 	queue->credit_bytes = queue->remaining_credit = ~0UL;
queue             531 drivers/net/xen-netback/interface.c 	queue->credit_usec  = 0UL;
queue             532 drivers/net/xen-netback/interface.c 	timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
queue             533 drivers/net/xen-netback/interface.c 	queue->credit_window_start = get_jiffies_64();
queue             535 drivers/net/xen-netback/interface.c 	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
queue             537 drivers/net/xen-netback/interface.c 	skb_queue_head_init(&queue->rx_queue);
queue             538 drivers/net/xen-netback/interface.c 	skb_queue_head_init(&queue->tx_queue);
queue             540 drivers/net/xen-netback/interface.c 	queue->pending_cons = 0;
queue             541 drivers/net/xen-netback/interface.c 	queue->pending_prod = MAX_PENDING_REQS;
queue             543 drivers/net/xen-netback/interface.c 		queue->pending_ring[i] = i;
queue             545 drivers/net/xen-netback/interface.c 	spin_lock_init(&queue->callback_lock);
queue             546 drivers/net/xen-netback/interface.c 	spin_lock_init(&queue->response_lock);
queue             553 drivers/net/xen-netback/interface.c 				 queue->mmap_pages);
queue             555 drivers/net/xen-netback/interface.c 		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
queue             560 drivers/net/xen-netback/interface.c 		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
queue             564 drivers/net/xen-netback/interface.c 		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
queue             629 drivers/net/xen-netback/interface.c int xenvif_connect_data(struct xenvif_queue *queue,
queue             638 drivers/net/xen-netback/interface.c 	BUG_ON(queue->tx_irq);
queue             639 drivers/net/xen-netback/interface.c 	BUG_ON(queue->task);
queue             640 drivers/net/xen-netback/interface.c 	BUG_ON(queue->dealloc_task);
queue             642 drivers/net/xen-netback/interface.c 	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
queue             647 drivers/net/xen-netback/interface.c 	init_waitqueue_head(&queue->wq);
queue             648 drivers/net/xen-netback/interface.c 	init_waitqueue_head(&queue->dealloc_wq);
queue             649 drivers/net/xen-netback/interface.c 	atomic_set(&queue->inflight_packets, 0);
queue             651 drivers/net/xen-netback/interface.c 	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
queue             657 drivers/net/xen-netback/interface.c 			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue             658 drivers/net/xen-netback/interface.c 			queue->name, queue);
queue             661 drivers/net/xen-netback/interface.c 		queue->tx_irq = queue->rx_irq = err;
queue             662 drivers/net/xen-netback/interface.c 		disable_irq(queue->tx_irq);
queue             665 drivers/net/xen-netback/interface.c 		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
queue             666 drivers/net/xen-netback/interface.c 			 "%s-tx", queue->name);
queue             668 drivers/net/xen-netback/interface.c 			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue             669 drivers/net/xen-netback/interface.c 			queue->tx_irq_name, queue);
queue             672 drivers/net/xen-netback/interface.c 		queue->tx_irq = err;
queue             673 drivers/net/xen-netback/interface.c 		disable_irq(queue->tx_irq);
queue             675 drivers/net/xen-netback/interface.c 		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
queue             676 drivers/net/xen-netback/interface.c 			 "%s-rx", queue->name);
queue             678 drivers/net/xen-netback/interface.c 			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue             679 drivers/net/xen-netback/interface.c 			queue->rx_irq_name, queue);
queue             682 drivers/net/xen-netback/interface.c 		queue->rx_irq = err;
queue             683 drivers/net/xen-netback/interface.c 		disable_irq(queue->rx_irq);
queue             686 drivers/net/xen-netback/interface.c 	queue->stalled = true;
queue             689 drivers/net/xen-netback/interface.c 			      (void *)queue, "%s-guest-rx", queue->name);
queue             691 drivers/net/xen-netback/interface.c 		pr_warn("Could not allocate kthread for %s\n", queue->name);
queue             695 drivers/net/xen-netback/interface.c 	queue->task = task;
queue             699 drivers/net/xen-netback/interface.c 			      (void *)queue, "%s-dealloc", queue->name);
queue             701 drivers/net/xen-netback/interface.c 		pr_warn("Could not allocate kthread for %s\n", queue->name);
queue             705 drivers/net/xen-netback/interface.c 	queue->dealloc_task = task;
queue             707 drivers/net/xen-netback/interface.c 	wake_up_process(queue->task);
queue             708 drivers/net/xen-netback/interface.c 	wake_up_process(queue->dealloc_task);
queue             713 drivers/net/xen-netback/interface.c 	unbind_from_irqhandler(queue->rx_irq, queue);
queue             714 drivers/net/xen-netback/interface.c 	queue->rx_irq = 0;
queue             716 drivers/net/xen-netback/interface.c 	unbind_from_irqhandler(queue->tx_irq, queue);
queue             717 drivers/net/xen-netback/interface.c 	queue->tx_irq = 0;
queue             719 drivers/net/xen-netback/interface.c 	xenvif_unmap_frontend_data_rings(queue);
queue             720 drivers/net/xen-netback/interface.c 	netif_napi_del(&queue->napi);
queue             740 drivers/net/xen-netback/interface.c 	struct xenvif_queue *queue = NULL;
queue             747 drivers/net/xen-netback/interface.c 		queue = &vif->queues[queue_index];
queue             749 drivers/net/xen-netback/interface.c 		netif_napi_del(&queue->napi);
queue             751 drivers/net/xen-netback/interface.c 		if (queue->task) {
queue             752 drivers/net/xen-netback/interface.c 			kthread_stop(queue->task);
queue             753 drivers/net/xen-netback/interface.c 			put_task_struct(queue->task);
queue             754 drivers/net/xen-netback/interface.c 			queue->task = NULL;
queue             757 drivers/net/xen-netback/interface.c 		if (queue->dealloc_task) {
queue             758 drivers/net/xen-netback/interface.c 			kthread_stop(queue->dealloc_task);
queue             759 drivers/net/xen-netback/interface.c 			queue->dealloc_task = NULL;
queue             762 drivers/net/xen-netback/interface.c 		if (queue->tx_irq) {
queue             763 drivers/net/xen-netback/interface.c 			if (queue->tx_irq == queue->rx_irq)
queue             764 drivers/net/xen-netback/interface.c 				unbind_from_irqhandler(queue->tx_irq, queue);
queue             766 drivers/net/xen-netback/interface.c 				unbind_from_irqhandler(queue->tx_irq, queue);
queue             767 drivers/net/xen-netback/interface.c 				unbind_from_irqhandler(queue->rx_irq, queue);
queue             769 drivers/net/xen-netback/interface.c 			queue->tx_irq = 0;
queue             772 drivers/net/xen-netback/interface.c 		xenvif_unmap_frontend_data_rings(queue);
queue             797 drivers/net/xen-netback/interface.c void xenvif_deinit_queue(struct xenvif_queue *queue)
queue             799 drivers/net/xen-netback/interface.c 	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
queue              99 drivers/net/xen-netback/netback.c static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
queue             102 drivers/net/xen-netback/netback.c static void make_tx_response(struct xenvif_queue *queue,
queue             106 drivers/net/xen-netback/netback.c static void push_tx_responses(struct xenvif_queue *queue);
queue             108 drivers/net/xen-netback/netback.c static inline int tx_work_todo(struct xenvif_queue *queue);
queue             110 drivers/net/xen-netback/netback.c static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
queue             113 drivers/net/xen-netback/netback.c 	return page_to_pfn(queue->mmap_pages[idx]);
queue             116 drivers/net/xen-netback/netback.c static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
queue             119 drivers/net/xen-netback/netback.c 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
queue             152 drivers/net/xen-netback/netback.c void xenvif_kick_thread(struct xenvif_queue *queue)
queue             154 drivers/net/xen-netback/netback.c 	wake_up(&queue->wq);
queue             157 drivers/net/xen-netback/netback.c void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
queue             161 drivers/net/xen-netback/netback.c 	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
queue             164 drivers/net/xen-netback/netback.c 		napi_schedule(&queue->napi);
queue             167 drivers/net/xen-netback/netback.c static void tx_add_credit(struct xenvif_queue *queue)
queue             175 drivers/net/xen-netback/netback.c 	max_burst = max(131072UL, queue->credit_bytes);
queue             178 drivers/net/xen-netback/netback.c 	max_credit = queue->remaining_credit + queue->credit_bytes;
queue             179 drivers/net/xen-netback/netback.c 	if (max_credit < queue->remaining_credit)
queue             182 drivers/net/xen-netback/netback.c 	queue->remaining_credit = min(max_credit, max_burst);
queue             183 drivers/net/xen-netback/netback.c 	queue->rate_limited = false;
queue             188 drivers/net/xen-netback/netback.c 	struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
queue             189 drivers/net/xen-netback/netback.c 	tx_add_credit(queue);
queue             190 drivers/net/xen-netback/netback.c 	xenvif_napi_schedule_or_enable_events(queue);
queue             193 drivers/net/xen-netback/netback.c static void xenvif_tx_err(struct xenvif_queue *queue,
queue             197 drivers/net/xen-netback/netback.c 	RING_IDX cons = queue->tx.req_cons;
queue             201 drivers/net/xen-netback/netback.c 		spin_lock_irqsave(&queue->response_lock, flags);
queue             202 drivers/net/xen-netback/netback.c 		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
queue             203 drivers/net/xen-netback/netback.c 		push_tx_responses(queue);
queue             204 drivers/net/xen-netback/netback.c 		spin_unlock_irqrestore(&queue->response_lock, flags);
queue             207 drivers/net/xen-netback/netback.c 		RING_COPY_REQUEST(&queue->tx, cons++, txp);
queue             210 drivers/net/xen-netback/netback.c 	queue->tx.req_cons = cons;
queue             222 drivers/net/xen-netback/netback.c static int xenvif_count_requests(struct xenvif_queue *queue,
queue             228 drivers/net/xen-netback/netback.c 	RING_IDX cons = queue->tx.req_cons;
queue             240 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev,
queue             243 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             251 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev,
queue             254 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             267 drivers/net/xen-netback/netback.c 				netdev_dbg(queue->vif->dev,
queue             276 drivers/net/xen-netback/netback.c 		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
queue             289 drivers/net/xen-netback/netback.c 				netdev_dbg(queue->vif->dev,
queue             299 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
queue             301 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             313 drivers/net/xen-netback/netback.c 		xenvif_tx_err(queue, first, extra_count, cons + slots);
queue             327 drivers/net/xen-netback/netback.c static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
queue             333 drivers/net/xen-netback/netback.c 	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
queue             334 drivers/net/xen-netback/netback.c 	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
queue             336 drivers/net/xen-netback/netback.c 			  txp->gref, queue->vif->domid);
queue             338 drivers/net/xen-netback/netback.c 	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
queue             340 drivers/net/xen-netback/netback.c 	queue->pending_tx_info[pending_idx].extra_count = extra_count;
queue             360 drivers/net/xen-netback/netback.c static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
queue             381 drivers/net/xen-netback/netback.c 		index = pending_index(queue->pending_cons++);
queue             382 drivers/net/xen-netback/netback.c 		pending_idx = queue->pending_ring[index];
queue             383 drivers/net/xen-netback/netback.c 		xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
queue             394 drivers/net/xen-netback/netback.c 			index = pending_index(queue->pending_cons++);
queue             395 drivers/net/xen-netback/netback.c 			pending_idx = queue->pending_ring[index];
queue             396 drivers/net/xen-netback/netback.c 			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
queue             408 drivers/net/xen-netback/netback.c static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
queue             412 drivers/net/xen-netback/netback.c 	if (unlikely(queue->grant_tx_handle[pending_idx] !=
queue             414 drivers/net/xen-netback/netback.c 		netdev_err(queue->vif->dev,
queue             419 drivers/net/xen-netback/netback.c 	queue->grant_tx_handle[pending_idx] = handle;
queue             422 drivers/net/xen-netback/netback.c static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
queue             425 drivers/net/xen-netback/netback.c 	if (unlikely(queue->grant_tx_handle[pending_idx] ==
queue             427 drivers/net/xen-netback/netback.c 		netdev_err(queue->vif->dev,
queue             432 drivers/net/xen-netback/netback.c 	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
queue             435 drivers/net/xen-netback/netback.c static int xenvif_tx_check_gop(struct xenvif_queue *queue,
queue             459 drivers/net/xen-netback/netback.c 			netdev_dbg(queue->vif->dev,
queue             466 drivers/net/xen-netback/netback.c 			xenvif_idx_release(queue, pending_idx,
queue             481 drivers/net/xen-netback/netback.c 			xenvif_grant_handle_set(queue,
queue             486 drivers/net/xen-netback/netback.c 				xenvif_idx_unmap(queue, pending_idx);
queue             492 drivers/net/xen-netback/netback.c 					xenvif_idx_release(queue, pending_idx,
queue             495 drivers/net/xen-netback/netback.c 					xenvif_idx_release(queue, pending_idx,
queue             503 drivers/net/xen-netback/netback.c 			netdev_dbg(queue->vif->dev,
queue             510 drivers/net/xen-netback/netback.c 		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
queue             520 drivers/net/xen-netback/netback.c 			xenvif_idx_release(queue,
queue             527 drivers/net/xen-netback/netback.c 			xenvif_idx_unmap(queue, pending_idx);
queue             528 drivers/net/xen-netback/netback.c 			xenvif_idx_release(queue, pending_idx,
queue             538 drivers/net/xen-netback/netback.c 				xenvif_idx_unmap(queue, pending_idx);
queue             539 drivers/net/xen-netback/netback.c 				xenvif_idx_release(queue, pending_idx,
queue             560 drivers/net/xen-netback/netback.c static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
queue             578 drivers/net/xen-netback/netback.c 				&callback_param(queue, pending_idx);
queue             580 drivers/net/xen-netback/netback.c 			callback_param(queue, prev_pending_idx).ctx =
queue             581 drivers/net/xen-netback/netback.c 				&callback_param(queue, pending_idx);
queue             583 drivers/net/xen-netback/netback.c 		callback_param(queue, pending_idx).ctx = NULL;
queue             586 drivers/net/xen-netback/netback.c 		txp = &queue->pending_tx_info[pending_idx].req;
queue             587 drivers/net/xen-netback/netback.c 		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
queue             594 drivers/net/xen-netback/netback.c 		get_page(queue->mmap_pages[pending_idx]);
queue             598 drivers/net/xen-netback/netback.c static int xenvif_get_extras(struct xenvif_queue *queue,
queue             604 drivers/net/xen-netback/netback.c 	RING_IDX cons = queue->tx.req_cons;
queue             608 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev, "Missing extra info\n");
queue             609 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             613 drivers/net/xen-netback/netback.c 		RING_COPY_REQUEST(&queue->tx, cons, &extra);
queue             615 drivers/net/xen-netback/netback.c 		queue->tx.req_cons = ++cons;
queue             620 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev,
queue             622 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             661 drivers/net/xen-netback/netback.c static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
queue             671 drivers/net/xen-netback/netback.c 		queue->stats.rx_gso_checksum_fixup++;
queue             683 drivers/net/xen-netback/netback.c static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
queue             686 drivers/net/xen-netback/netback.c 	u64 next_credit = queue->credit_window_start +
queue             687 drivers/net/xen-netback/netback.c 		msecs_to_jiffies(queue->credit_usec / 1000);
queue             690 drivers/net/xen-netback/netback.c 	if (timer_pending(&queue->credit_timeout)) {
queue             691 drivers/net/xen-netback/netback.c 		queue->rate_limited = true;
queue             697 drivers/net/xen-netback/netback.c 		queue->credit_window_start = now;
queue             698 drivers/net/xen-netback/netback.c 		tx_add_credit(queue);
queue             702 drivers/net/xen-netback/netback.c 	if (size > queue->remaining_credit) {
queue             703 drivers/net/xen-netback/netback.c 		mod_timer(&queue->credit_timeout,
queue             705 drivers/net/xen-netback/netback.c 		queue->credit_window_start = next_credit;
queue             706 drivers/net/xen-netback/netback.c 		queue->rate_limited = true;
queue             788 drivers/net/xen-netback/netback.c static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue             793 drivers/net/xen-netback/netback.c 	struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
queue             798 drivers/net/xen-netback/netback.c 	while (skb_queue_len(&queue->tx_queue) < budget) {
queue             809 drivers/net/xen-netback/netback.c 		if (queue->tx.sring->req_prod - queue->tx.req_cons >
queue             811 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev,
queue             814 drivers/net/xen-netback/netback.c 				   queue->tx.sring->req_prod, queue->tx.req_cons,
queue             816 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             820 drivers/net/xen-netback/netback.c 		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
queue             824 drivers/net/xen-netback/netback.c 		idx = queue->tx.req_cons;
queue             826 drivers/net/xen-netback/netback.c 		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
queue             829 drivers/net/xen-netback/netback.c 		if (txreq.size > queue->remaining_credit &&
queue             830 drivers/net/xen-netback/netback.c 		    tx_credit_exceeded(queue, txreq.size))
queue             833 drivers/net/xen-netback/netback.c 		queue->remaining_credit -= txreq.size;
queue             836 drivers/net/xen-netback/netback.c 		queue->tx.req_cons = ++idx;
queue             841 drivers/net/xen-netback/netback.c 			work_to_do = xenvif_get_extras(queue, extras,
queue             844 drivers/net/xen-netback/netback.c 			idx = queue->tx.req_cons;
queue             853 drivers/net/xen-netback/netback.c 			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
queue             855 drivers/net/xen-netback/netback.c 			make_tx_response(queue, &txreq, extra_count,
queue             859 drivers/net/xen-netback/netback.c 			push_tx_responses(queue);
queue             867 drivers/net/xen-netback/netback.c 			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
queue             869 drivers/net/xen-netback/netback.c 			make_tx_response(queue, &txreq, extra_count,
queue             871 drivers/net/xen-netback/netback.c 			push_tx_responses(queue);
queue             875 drivers/net/xen-netback/netback.c 		ret = xenvif_count_requests(queue, &txreq, extra_count,
queue             883 drivers/net/xen-netback/netback.c 			netdev_dbg(queue->vif->dev,
queue             885 drivers/net/xen-netback/netback.c 			xenvif_tx_err(queue, &txreq, extra_count, idx);
queue             891 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev,
queue             895 drivers/net/xen-netback/netback.c 			xenvif_fatal_tx_err(queue->vif);
queue             899 drivers/net/xen-netback/netback.c 		index = pending_index(queue->pending_cons);
queue             900 drivers/net/xen-netback/netback.c 		pending_idx = queue->pending_ring[index];
queue             908 drivers/net/xen-netback/netback.c 			netdev_dbg(queue->vif->dev,
queue             910 drivers/net/xen-netback/netback.c 			xenvif_tx_err(queue, &txreq, extra_count, idx);
queue             930 drivers/net/xen-netback/netback.c 				xenvif_tx_err(queue, &txreq, extra_count, idx);
queue             932 drivers/net/xen-netback/netback.c 					netdev_err(queue->vif->dev,
queue             942 drivers/net/xen-netback/netback.c 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
queue             981 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
queue             982 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
queue             983 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue             985 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
queue             987 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue             988 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].dest.offset =
queue             991 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].len = data_len;
queue             992 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
queue             999 drivers/net/xen-netback/netback.c 			xenvif_tx_create_map_op(queue, pending_idx, &txreq,
queue            1005 drivers/net/xen-netback/netback.c 			memcpy(&queue->pending_tx_info[pending_idx].req,
queue            1007 drivers/net/xen-netback/netback.c 			queue->pending_tx_info[pending_idx].extra_count =
queue            1011 drivers/net/xen-netback/netback.c 		queue->pending_cons++;
queue            1013 drivers/net/xen-netback/netback.c 		gop = xenvif_get_requests(queue, skb, txfrags, gop,
queue            1016 drivers/net/xen-netback/netback.c 		__skb_queue_tail(&queue->tx_queue, skb);
queue            1018 drivers/net/xen-netback/netback.c 		queue->tx.req_cons = idx;
queue            1020 drivers/net/xen-netback/netback.c 		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
queue            1021 drivers/net/xen-netback/netback.c 		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
queue            1025 drivers/net/xen-netback/netback.c 	(*map_ops) = gop - queue->tx_map_ops;
queue            1032 drivers/net/xen-netback/netback.c static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
queue            1040 drivers/net/xen-netback/netback.c 	queue->stats.tx_zerocopy_sent += 2;
queue            1041 drivers/net/xen-netback/netback.c 	queue->stats.tx_frag_overflow++;
queue            1043 drivers/net/xen-netback/netback.c 	xenvif_fill_frags(queue, nskb);
queue            1082 drivers/net/xen-netback/netback.c 	atomic_inc(&queue->inflight_packets);
queue            1094 drivers/net/xen-netback/netback.c static int xenvif_tx_submit(struct xenvif_queue *queue)
queue            1096 drivers/net/xen-netback/netback.c 	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
queue            1097 drivers/net/xen-netback/netback.c 	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
queue            1101 drivers/net/xen-netback/netback.c 	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
queue            1107 drivers/net/xen-netback/netback.c 		txp = &queue->pending_tx_info[pending_idx].req;
queue            1110 drivers/net/xen-netback/netback.c 		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
queue            1126 drivers/net/xen-netback/netback.c 		callback_param(queue, pending_idx).ctx = NULL;
queue            1133 drivers/net/xen-netback/netback.c 			xenvif_idx_release(queue, pending_idx,
queue            1142 drivers/net/xen-netback/netback.c 		xenvif_fill_frags(queue, skb);
queue            1146 drivers/net/xen-netback/netback.c 			xenvif_skb_zerocopy_prepare(queue, nskb);
queue            1147 drivers/net/xen-netback/netback.c 			if (xenvif_handle_frag_list(queue, skb)) {
queue            1149 drivers/net/xen-netback/netback.c 					netdev_err(queue->vif->dev,
queue            1151 drivers/net/xen-netback/netback.c 				xenvif_skb_zerocopy_prepare(queue, skb);
queue            1160 drivers/net/xen-netback/netback.c 		skb->dev      = queue->vif->dev;
queue            1164 drivers/net/xen-netback/netback.c 		if (checksum_setup(queue, skb)) {
queue            1165 drivers/net/xen-netback/netback.c 			netdev_dbg(queue->vif->dev,
queue            1169 drivers/net/xen-netback/netback.c 				xenvif_skb_zerocopy_prepare(queue, skb);
queue            1199 drivers/net/xen-netback/netback.c 		queue->stats.rx_bytes += skb->len;
queue            1200 drivers/net/xen-netback/netback.c 		queue->stats.rx_packets++;
queue            1210 drivers/net/xen-netback/netback.c 			xenvif_skb_zerocopy_prepare(queue, skb);
queue            1211 drivers/net/xen-netback/netback.c 			queue->stats.tx_zerocopy_sent++;
queue            1224 drivers/net/xen-netback/netback.c 	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
queue            1229 drivers/net/xen-netback/netback.c 	spin_lock_irqsave(&queue->callback_lock, flags);
queue            1233 drivers/net/xen-netback/netback.c 		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
queue            1235 drivers/net/xen-netback/netback.c 		index = pending_index(queue->dealloc_prod);
queue            1236 drivers/net/xen-netback/netback.c 		queue->dealloc_ring[index] = pending_idx;
queue            1241 drivers/net/xen-netback/netback.c 		queue->dealloc_prod++;
queue            1243 drivers/net/xen-netback/netback.c 	spin_unlock_irqrestore(&queue->callback_lock, flags);
queue            1246 drivers/net/xen-netback/netback.c 		queue->stats.tx_zerocopy_success++;
queue            1248 drivers/net/xen-netback/netback.c 		queue->stats.tx_zerocopy_fail++;
queue            1249 drivers/net/xen-netback/netback.c 	xenvif_skb_zerocopy_complete(queue);
queue            1252 drivers/net/xen-netback/netback.c static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
queue            1259 drivers/net/xen-netback/netback.c 	dc = queue->dealloc_cons;
queue            1260 drivers/net/xen-netback/netback.c 	gop = queue->tx_unmap_ops;
queue            1264 drivers/net/xen-netback/netback.c 		dp = queue->dealloc_prod;
queue            1272 drivers/net/xen-netback/netback.c 			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
queue            1274 drivers/net/xen-netback/netback.c 				queue->dealloc_ring[pending_index(dc++)];
queue            1276 drivers/net/xen-netback/netback.c 			pending_idx_release[gop - queue->tx_unmap_ops] =
queue            1278 drivers/net/xen-netback/netback.c 			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue            1279 drivers/net/xen-netback/netback.c 				queue->mmap_pages[pending_idx];
queue            1281 drivers/net/xen-netback/netback.c 					    idx_to_kaddr(queue, pending_idx),
queue            1283 drivers/net/xen-netback/netback.c 					    queue->grant_tx_handle[pending_idx]);
queue            1284 drivers/net/xen-netback/netback.c 			xenvif_grant_handle_reset(queue, pending_idx);
queue            1288 drivers/net/xen-netback/netback.c 	} while (dp != queue->dealloc_prod);
queue            1290 drivers/net/xen-netback/netback.c 	queue->dealloc_cons = dc;
queue            1292 drivers/net/xen-netback/netback.c 	if (gop - queue->tx_unmap_ops > 0) {
queue            1294 drivers/net/xen-netback/netback.c 		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
queue            1296 drivers/net/xen-netback/netback.c 					queue->pages_to_unmap,
queue            1297 drivers/net/xen-netback/netback.c 					gop - queue->tx_unmap_ops);
queue            1299 drivers/net/xen-netback/netback.c 			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
queue            1300 drivers/net/xen-netback/netback.c 				   gop - queue->tx_unmap_ops, ret);
queue            1301 drivers/net/xen-netback/netback.c 			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
queue            1303 drivers/net/xen-netback/netback.c 					netdev_err(queue->vif->dev,
queue            1313 drivers/net/xen-netback/netback.c 	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
queue            1314 drivers/net/xen-netback/netback.c 		xenvif_idx_release(queue, pending_idx_release[i],
queue            1320 drivers/net/xen-netback/netback.c int xenvif_tx_action(struct xenvif_queue *queue, int budget)
queue            1325 drivers/net/xen-netback/netback.c 	if (unlikely(!tx_work_todo(queue)))
queue            1328 drivers/net/xen-netback/netback.c 	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
queue            1333 drivers/net/xen-netback/netback.c 	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
queue            1335 drivers/net/xen-netback/netback.c 		ret = gnttab_map_refs(queue->tx_map_ops,
queue            1337 drivers/net/xen-netback/netback.c 				      queue->pages_to_map,
queue            1342 drivers/net/xen-netback/netback.c 	work_done = xenvif_tx_submit(queue);
queue            1347 drivers/net/xen-netback/netback.c static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
queue            1354 drivers/net/xen-netback/netback.c 	pending_tx_info = &queue->pending_tx_info[pending_idx];
queue            1356 drivers/net/xen-netback/netback.c 	spin_lock_irqsave(&queue->response_lock, flags);
queue            1358 drivers/net/xen-netback/netback.c 	make_tx_response(queue, &pending_tx_info->req,
queue            1365 drivers/net/xen-netback/netback.c 	index = pending_index(queue->pending_prod++);
queue            1366 drivers/net/xen-netback/netback.c 	queue->pending_ring[index] = pending_idx;
queue            1368 drivers/net/xen-netback/netback.c 	push_tx_responses(queue);
queue            1370 drivers/net/xen-netback/netback.c 	spin_unlock_irqrestore(&queue->response_lock, flags);
queue            1374 drivers/net/xen-netback/netback.c static void make_tx_response(struct xenvif_queue *queue,
queue            1379 drivers/net/xen-netback/netback.c 	RING_IDX i = queue->tx.rsp_prod_pvt;
queue            1382 drivers/net/xen-netback/netback.c 	resp = RING_GET_RESPONSE(&queue->tx, i);
queue            1387 drivers/net/xen-netback/netback.c 		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue            1389 drivers/net/xen-netback/netback.c 	queue->tx.rsp_prod_pvt = ++i;
queue            1392 drivers/net/xen-netback/netback.c static void push_tx_responses(struct xenvif_queue *queue)
queue            1396 drivers/net/xen-netback/netback.c 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
queue            1398 drivers/net/xen-netback/netback.c 		notify_remote_via_irq(queue->tx_irq);
queue            1401 drivers/net/xen-netback/netback.c void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
queue            1407 drivers/net/xen-netback/netback.c 			    idx_to_kaddr(queue, pending_idx),
queue            1409 drivers/net/xen-netback/netback.c 			    queue->grant_tx_handle[pending_idx]);
queue            1410 drivers/net/xen-netback/netback.c 	xenvif_grant_handle_reset(queue, pending_idx);
queue            1413 drivers/net/xen-netback/netback.c 				&queue->mmap_pages[pending_idx], 1);
queue            1415 drivers/net/xen-netback/netback.c 		netdev_err(queue->vif->dev,
queue            1426 drivers/net/xen-netback/netback.c static inline int tx_work_todo(struct xenvif_queue *queue)
queue            1428 drivers/net/xen-netback/netback.c 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
queue            1434 drivers/net/xen-netback/netback.c static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
queue            1436 drivers/net/xen-netback/netback.c 	return queue->dealloc_cons != queue->dealloc_prod;
queue            1439 drivers/net/xen-netback/netback.c void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
queue            1441 drivers/net/xen-netback/netback.c 	if (queue->tx.sring)
queue            1442 drivers/net/xen-netback/netback.c 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
queue            1443 drivers/net/xen-netback/netback.c 					queue->tx.sring);
queue            1444 drivers/net/xen-netback/netback.c 	if (queue->rx.sring)
queue            1445 drivers/net/xen-netback/netback.c 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
queue            1446 drivers/net/xen-netback/netback.c 					queue->rx.sring);
queue            1449 drivers/net/xen-netback/netback.c int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
queue            1459 drivers/net/xen-netback/netback.c 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
queue            1465 drivers/net/xen-netback/netback.c 	BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
queue            1467 drivers/net/xen-netback/netback.c 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
queue            1473 drivers/net/xen-netback/netback.c 	BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
queue            1478 drivers/net/xen-netback/netback.c 	xenvif_unmap_frontend_data_rings(queue);
queue            1482 drivers/net/xen-netback/netback.c static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
queue            1488 drivers/net/xen-netback/netback.c 		!atomic_read(&queue->inflight_packets);
queue            1493 drivers/net/xen-netback/netback.c 	struct xenvif_queue *queue = data;
queue            1496 drivers/net/xen-netback/netback.c 		wait_event_interruptible(queue->dealloc_wq,
queue            1497 drivers/net/xen-netback/netback.c 					 tx_dealloc_work_todo(queue) ||
queue            1498 drivers/net/xen-netback/netback.c 					 xenvif_dealloc_kthread_should_stop(queue));
queue            1499 drivers/net/xen-netback/netback.c 		if (xenvif_dealloc_kthread_should_stop(queue))
queue            1502 drivers/net/xen-netback/netback.c 		xenvif_tx_dealloc_action(queue);
queue            1507 drivers/net/xen-netback/netback.c 	if (tx_dealloc_work_todo(queue))
queue            1508 drivers/net/xen-netback/netback.c 		xenvif_tx_dealloc_action(queue);
queue              36 drivers/net/xen-netback/rx.c static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
queue              42 drivers/net/xen-netback/rx.c 	skb = skb_peek(&queue->rx_queue);
queue              53 drivers/net/xen-netback/rx.c 		prod = queue->rx.sring->req_prod;
queue              54 drivers/net/xen-netback/rx.c 		cons = queue->rx.req_cons;
queue              59 drivers/net/xen-netback/rx.c 		queue->rx.sring->req_event = prod + 1;
queue              65 drivers/net/xen-netback/rx.c 	} while (queue->rx.sring->req_prod != prod);
queue              70 drivers/net/xen-netback/rx.c void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
queue              74 drivers/net/xen-netback/rx.c 	spin_lock_irqsave(&queue->rx_queue.lock, flags);
queue              76 drivers/net/xen-netback/rx.c 	__skb_queue_tail(&queue->rx_queue, skb);
queue              78 drivers/net/xen-netback/rx.c 	queue->rx_queue_len += skb->len;
queue              79 drivers/net/xen-netback/rx.c 	if (queue->rx_queue_len > queue->rx_queue_max) {
queue              80 drivers/net/xen-netback/rx.c 		struct net_device *dev = queue->vif->dev;
queue              82 drivers/net/xen-netback/rx.c 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
queue              85 drivers/net/xen-netback/rx.c 	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
queue              88 drivers/net/xen-netback/rx.c static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
queue              92 drivers/net/xen-netback/rx.c 	spin_lock_irq(&queue->rx_queue.lock);
queue              94 drivers/net/xen-netback/rx.c 	skb = __skb_dequeue(&queue->rx_queue);
queue              96 drivers/net/xen-netback/rx.c 		queue->rx_queue_len -= skb->len;
queue              97 drivers/net/xen-netback/rx.c 		if (queue->rx_queue_len < queue->rx_queue_max) {
queue             100 drivers/net/xen-netback/rx.c 			txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
queue             105 drivers/net/xen-netback/rx.c 	spin_unlock_irq(&queue->rx_queue.lock);
queue             110 drivers/net/xen-netback/rx.c static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
queue             114 drivers/net/xen-netback/rx.c 	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
queue             118 drivers/net/xen-netback/rx.c static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
queue             123 drivers/net/xen-netback/rx.c 		skb = skb_peek(&queue->rx_queue);
queue             128 drivers/net/xen-netback/rx.c 		xenvif_rx_dequeue(queue);
queue             133 drivers/net/xen-netback/rx.c static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
queue             138 drivers/net/xen-netback/rx.c 	gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
queue             140 drivers/net/xen-netback/rx.c 	for (i = 0; i < queue->rx_copy.num; i++) {
queue             143 drivers/net/xen-netback/rx.c 		op = &queue->rx_copy.op[i];
queue             151 drivers/net/xen-netback/rx.c 			rsp = RING_GET_RESPONSE(&queue->rx,
queue             152 drivers/net/xen-netback/rx.c 						queue->rx_copy.idx[i]);
queue             157 drivers/net/xen-netback/rx.c 	queue->rx_copy.num = 0;
queue             160 drivers/net/xen-netback/rx.c 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
queue             162 drivers/net/xen-netback/rx.c 		notify_remote_via_irq(queue->rx_irq);
queue             164 drivers/net/xen-netback/rx.c 	__skb_queue_purge(queue->rx_copy.completed);
queue             167 drivers/net/xen-netback/rx.c static void xenvif_rx_copy_add(struct xenvif_queue *queue,
queue             175 drivers/net/xen-netback/rx.c 	if (queue->rx_copy.num == COPY_BATCH_SIZE)
queue             176 drivers/net/xen-netback/rx.c 		xenvif_rx_copy_flush(queue);
queue             178 drivers/net/xen-netback/rx.c 	op = &queue->rx_copy.op[queue->rx_copy.num];
queue             196 drivers/net/xen-netback/rx.c 	op->dest.domid    = queue->vif->domid;
queue             200 drivers/net/xen-netback/rx.c 	queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
queue             201 drivers/net/xen-netback/rx.c 	queue->rx_copy.num++;
queue             226 drivers/net/xen-netback/rx.c static void xenvif_rx_next_skb(struct xenvif_queue *queue,
queue             232 drivers/net/xen-netback/rx.c 	skb = xenvif_rx_dequeue(queue);
queue             234 drivers/net/xen-netback/rx.c 	queue->stats.tx_bytes += skb->len;
queue             235 drivers/net/xen-netback/rx.c 	queue->stats.tx_packets++;
queue             246 drivers/net/xen-netback/rx.c 	if ((1 << gso_type) & queue->vif->gso_mask) {
queue             289 drivers/net/xen-netback/rx.c static void xenvif_rx_complete(struct xenvif_queue *queue,
queue             293 drivers/net/xen-netback/rx.c 	queue->rx.rsp_prod_pvt = queue->rx.req_cons;
queue             295 drivers/net/xen-netback/rx.c 	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
queue             316 drivers/net/xen-netback/rx.c static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
queue             354 drivers/net/xen-netback/rx.c static void xenvif_rx_data_slot(struct xenvif_queue *queue,
queue             366 drivers/net/xen-netback/rx.c 		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
queue             367 drivers/net/xen-netback/rx.c 		xenvif_rx_copy_add(queue, req, offset, data, len);
queue             398 drivers/net/xen-netback/rx.c static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
queue             422 drivers/net/xen-netback/rx.c static void xenvif_rx_skb(struct xenvif_queue *queue)
queue             426 drivers/net/xen-netback/rx.c 	xenvif_rx_next_skb(queue, &pkt);
queue             428 drivers/net/xen-netback/rx.c 	queue->last_rx_time = jiffies;
queue             434 drivers/net/xen-netback/rx.c 		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
queue             435 drivers/net/xen-netback/rx.c 		rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
queue             439 drivers/net/xen-netback/rx.c 			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
queue             441 drivers/net/xen-netback/rx.c 			xenvif_rx_data_slot(queue, &pkt, req, rsp);
queue             443 drivers/net/xen-netback/rx.c 		queue->rx.req_cons++;
queue             447 drivers/net/xen-netback/rx.c 	xenvif_rx_complete(queue, &pkt);
queue             452 drivers/net/xen-netback/rx.c void xenvif_rx_action(struct xenvif_queue *queue)
queue             458 drivers/net/xen-netback/rx.c 	queue->rx_copy.completed = &completed_skbs;
queue             460 drivers/net/xen-netback/rx.c 	while (xenvif_rx_ring_slots_available(queue) &&
queue             462 drivers/net/xen-netback/rx.c 		xenvif_rx_skb(queue);
queue             467 drivers/net/xen-netback/rx.c 	xenvif_rx_copy_flush(queue);
queue             470 drivers/net/xen-netback/rx.c static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
queue             474 drivers/net/xen-netback/rx.c 	prod = queue->rx.sring->req_prod;
queue             475 drivers/net/xen-netback/rx.c 	cons = queue->rx.req_cons;
queue             477 drivers/net/xen-netback/rx.c 	return !queue->stalled &&
queue             480 drivers/net/xen-netback/rx.c 			   queue->last_rx_time + queue->vif->stall_timeout);
queue             483 drivers/net/xen-netback/rx.c static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
queue             487 drivers/net/xen-netback/rx.c 	prod = queue->rx.sring->req_prod;
queue             488 drivers/net/xen-netback/rx.c 	cons = queue->rx.req_cons;
queue             490 drivers/net/xen-netback/rx.c 	return queue->stalled && prod - cons >= 1;
queue             493 drivers/net/xen-netback/rx.c static bool xenvif_have_rx_work(struct xenvif_queue *queue)
queue             495 drivers/net/xen-netback/rx.c 	return xenvif_rx_ring_slots_available(queue) ||
queue             496 drivers/net/xen-netback/rx.c 		(queue->vif->stall_timeout &&
queue             497 drivers/net/xen-netback/rx.c 		 (xenvif_rx_queue_stalled(queue) ||
queue             498 drivers/net/xen-netback/rx.c 		  xenvif_rx_queue_ready(queue))) ||
queue             500 drivers/net/xen-netback/rx.c 		queue->vif->disabled;
queue             503 drivers/net/xen-netback/rx.c static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
queue             508 drivers/net/xen-netback/rx.c 	skb = skb_peek(&queue->rx_queue);
queue             526 drivers/net/xen-netback/rx.c static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
queue             530 drivers/net/xen-netback/rx.c 	if (xenvif_have_rx_work(queue))
queue             536 drivers/net/xen-netback/rx.c 		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
queue             537 drivers/net/xen-netback/rx.c 		if (xenvif_have_rx_work(queue))
queue             539 drivers/net/xen-netback/rx.c 		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
queue             543 drivers/net/xen-netback/rx.c 	finish_wait(&queue->wq, &wait);
queue             546 drivers/net/xen-netback/rx.c static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
queue             548 drivers/net/xen-netback/rx.c 	struct xenvif *vif = queue->vif;
queue             550 drivers/net/xen-netback/rx.c 	queue->stalled = true;
queue             561 drivers/net/xen-netback/rx.c static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
queue             563 drivers/net/xen-netback/rx.c 	struct xenvif *vif = queue->vif;
queue             565 drivers/net/xen-netback/rx.c 	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
queue             566 drivers/net/xen-netback/rx.c 	queue->stalled = false;
queue             579 drivers/net/xen-netback/rx.c 	struct xenvif_queue *queue = data;
queue             580 drivers/net/xen-netback/rx.c 	struct xenvif *vif = queue->vif;
queue             583 drivers/net/xen-netback/rx.c 		xenvif_queue_carrier_on(queue);
queue             586 drivers/net/xen-netback/rx.c 		xenvif_wait_for_rx_work(queue);
queue             598 drivers/net/xen-netback/rx.c 		if (unlikely(vif->disabled && queue->id == 0)) {
queue             603 drivers/net/xen-netback/rx.c 		if (!skb_queue_empty(&queue->rx_queue))
queue             604 drivers/net/xen-netback/rx.c 			xenvif_rx_action(queue);
queue             611 drivers/net/xen-netback/rx.c 			if (xenvif_rx_queue_stalled(queue))
queue             612 drivers/net/xen-netback/rx.c 				xenvif_queue_carrier_off(queue);
queue             613 drivers/net/xen-netback/rx.c 			else if (xenvif_rx_queue_ready(queue))
queue             614 drivers/net/xen-netback/rx.c 				xenvif_queue_carrier_on(queue);
queue             622 drivers/net/xen-netback/rx.c 		xenvif_rx_queue_drop_expired(queue);
queue             628 drivers/net/xen-netback/rx.c 	xenvif_rx_queue_purge(queue);
queue              14 drivers/net/xen-netback/xenbus.c 			      struct xenvif_queue *queue);
queue              28 drivers/net/xen-netback/xenbus.c 	struct xenvif_queue *queue = m->private;
queue              29 drivers/net/xen-netback/xenbus.c 	struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
queue              30 drivers/net/xen-netback/xenbus.c 	struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
queue              36 drivers/net/xen-netback/xenbus.c 		seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
queue              52 drivers/net/xen-netback/xenbus.c 			   queue->pending_prod,
queue              53 drivers/net/xen-netback/xenbus.c 			   queue->pending_cons,
queue              54 drivers/net/xen-netback/xenbus.c 			   nr_pending_reqs(queue));
queue              56 drivers/net/xen-netback/xenbus.c 			   queue->dealloc_prod,
queue              57 drivers/net/xen-netback/xenbus.c 			   queue->dealloc_cons,
queue              58 drivers/net/xen-netback/xenbus.c 			   queue->dealloc_prod - queue->dealloc_cons);
queue              83 drivers/net/xen-netback/xenbus.c 		   queue->napi.state, queue->napi.weight,
queue              84 drivers/net/xen-netback/xenbus.c 		   skb_queue_len(&queue->tx_queue),
queue              85 drivers/net/xen-netback/xenbus.c 		   timer_pending(&queue->credit_timeout),
queue              86 drivers/net/xen-netback/xenbus.c 		   queue->credit_bytes,
queue              87 drivers/net/xen-netback/xenbus.c 		   queue->credit_usec,
queue              88 drivers/net/xen-netback/xenbus.c 		   queue->remaining_credit,
queue              89 drivers/net/xen-netback/xenbus.c 		   queue->credit_timeout.expires,
queue              92 drivers/net/xen-netback/xenbus.c 	dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
queue              95 drivers/net/xen-netback/xenbus.c 		   queue->rx_queue_len, queue->rx_queue_max,
queue              96 drivers/net/xen-netback/xenbus.c 		   skb_queue_len(&queue->rx_queue),
queue             109 drivers/net/xen-netback/xenbus.c 	struct xenvif_queue *queue =
queue             131 drivers/net/xen-netback/xenbus.c 		xenvif_interrupt(0, (void *)queue);
queue             134 drivers/net/xen-netback/xenbus.c 			queue->id);
queue             143 drivers/net/xen-netback/xenbus.c 	void *queue = NULL;
queue             146 drivers/net/xen-netback/xenbus.c 		queue = inode->i_private;
queue             147 drivers/net/xen-netback/xenbus.c 	ret = single_open(filp, xenvif_read_io_ring, queue);
queue             690 drivers/net/xen-netback/xenbus.c 		struct xenvif_queue *queue = &vif->queues[queue_index];
queue             692 drivers/net/xen-netback/xenbus.c 		queue->credit_bytes = credit_bytes;
queue             693 drivers/net/xen-netback/xenbus.c 		queue->credit_usec = credit_usec;
queue             694 drivers/net/xen-netback/xenbus.c 		if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
queue             695 drivers/net/xen-netback/xenbus.c 			queue->remaining_credit > queue->credit_bytes) {
queue             696 drivers/net/xen-netback/xenbus.c 			queue->remaining_credit = queue->credit_bytes;
queue             883 drivers/net/xen-netback/xenbus.c 	struct xenvif_queue *queue;
queue             928 drivers/net/xen-netback/xenbus.c 		queue = &be->vif->queues[queue_index];
queue             929 drivers/net/xen-netback/xenbus.c 		queue->vif = be->vif;
queue             930 drivers/net/xen-netback/xenbus.c 		queue->id = queue_index;
queue             931 drivers/net/xen-netback/xenbus.c 		snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue             932 drivers/net/xen-netback/xenbus.c 				be->vif->dev->name, queue->id);
queue             934 drivers/net/xen-netback/xenbus.c 		err = xenvif_init_queue(queue);
queue             946 drivers/net/xen-netback/xenbus.c 		queue->credit_bytes = credit_bytes;
queue             947 drivers/net/xen-netback/xenbus.c 		queue->remaining_credit = credit_bytes;
queue             948 drivers/net/xen-netback/xenbus.c 		queue->credit_usec = credit_usec;
queue             950 drivers/net/xen-netback/xenbus.c 		err = connect_data_rings(be, queue);
queue             957 drivers/net/xen-netback/xenbus.c 			xenvif_deinit_queue(queue);
queue            1002 drivers/net/xen-netback/xenbus.c 			      struct xenvif_queue *queue)
queue            1005 drivers/net/xen-netback/xenbus.c 	unsigned int num_queues = queue->vif->num_queues;
queue            1036 drivers/net/xen-netback/xenbus.c 			 queue->id);
queue            1066 drivers/net/xen-netback/xenbus.c 	err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
queue             205 drivers/net/xen-netfront.c static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
queue             209 drivers/net/xen-netfront.c 	struct sk_buff *skb = queue->rx_skbs[i];
queue             210 drivers/net/xen-netfront.c 	queue->rx_skbs[i] = NULL;
queue             214 drivers/net/xen-netfront.c static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
queue             218 drivers/net/xen-netfront.c 	grant_ref_t ref = queue->grant_rx_ref[i];
queue             219 drivers/net/xen-netfront.c 	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
queue             235 drivers/net/xen-netfront.c 	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
queue             236 drivers/net/xen-netfront.c 	napi_schedule(&queue->napi);
queue             239 drivers/net/xen-netfront.c static int netfront_tx_slot_available(struct netfront_queue *queue)
queue             241 drivers/net/xen-netfront.c 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
queue             245 drivers/net/xen-netfront.c static void xennet_maybe_wake_tx(struct netfront_queue *queue)
queue             247 drivers/net/xen-netfront.c 	struct net_device *dev = queue->info->netdev;
queue             248 drivers/net/xen-netfront.c 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
queue             251 drivers/net/xen-netfront.c 	    netfront_tx_slot_available(queue) &&
queue             253 drivers/net/xen-netfront.c 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
queue             257 drivers/net/xen-netfront.c static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
queue             262 drivers/net/xen-netfront.c 	skb = __netdev_alloc_skb(queue->info->netdev,
queue             277 drivers/net/xen-netfront.c 	skb->dev = queue->info->netdev;
queue             283 drivers/net/xen-netfront.c static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue             285 drivers/net/xen-netfront.c 	RING_IDX req_prod = queue->rx.req_prod_pvt;
queue             289 drivers/net/xen-netfront.c 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
queue             292 drivers/net/xen-netfront.c 	for (req_prod = queue->rx.req_prod_pvt;
queue             293 drivers/net/xen-netfront.c 	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
queue             301 drivers/net/xen-netfront.c 		skb = xennet_alloc_one_rx_buffer(queue);
queue             309 drivers/net/xen-netfront.c 		BUG_ON(queue->rx_skbs[id]);
queue             310 drivers/net/xen-netfront.c 		queue->rx_skbs[id] = skb;
queue             312 drivers/net/xen-netfront.c 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
queue             314 drivers/net/xen-netfront.c 		queue->grant_rx_ref[id] = ref;
queue             318 drivers/net/xen-netfront.c 		req = RING_GET_REQUEST(&queue->rx, req_prod);
queue             320 drivers/net/xen-netfront.c 							 queue->info->xbdev->otherend_id,
queue             327 drivers/net/xen-netfront.c 	queue->rx.req_prod_pvt = req_prod;
queue             334 drivers/net/xen-netfront.c 	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
queue             336 drivers/net/xen-netfront.c 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
queue             340 drivers/net/xen-netfront.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
queue             342 drivers/net/xen-netfront.c 		notify_remote_via_irq(queue->rx_irq);
queue             350 drivers/net/xen-netfront.c 	struct netfront_queue *queue = NULL;
queue             356 drivers/net/xen-netfront.c 		queue = &np->queues[i];
queue             357 drivers/net/xen-netfront.c 		napi_enable(&queue->napi);
queue             359 drivers/net/xen-netfront.c 		spin_lock_bh(&queue->rx_lock);
queue             361 drivers/net/xen-netfront.c 			xennet_alloc_rx_buffers(queue);
queue             362 drivers/net/xen-netfront.c 			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
queue             363 drivers/net/xen-netfront.c 			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
queue             364 drivers/net/xen-netfront.c 				napi_schedule(&queue->napi);
queue             366 drivers/net/xen-netfront.c 		spin_unlock_bh(&queue->rx_lock);
queue             374 drivers/net/xen-netfront.c static void xennet_tx_buf_gc(struct netfront_queue *queue)
queue             381 drivers/net/xen-netfront.c 	BUG_ON(!netif_carrier_ok(queue->info->netdev));
queue             384 drivers/net/xen-netfront.c 		prod = queue->tx.sring->rsp_prod;
queue             387 drivers/net/xen-netfront.c 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
queue             390 drivers/net/xen-netfront.c 			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
queue             395 drivers/net/xen-netfront.c 			skb = queue->tx_skbs[id].skb;
queue             397 drivers/net/xen-netfront.c 				queue->grant_tx_ref[id]) != 0)) {
queue             403 drivers/net/xen-netfront.c 				queue->grant_tx_ref[id], GNTMAP_readonly);
queue             405 drivers/net/xen-netfront.c 				&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue             406 drivers/net/xen-netfront.c 			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
queue             407 drivers/net/xen-netfront.c 			queue->grant_tx_page[id] = NULL;
queue             408 drivers/net/xen-netfront.c 			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
queue             412 drivers/net/xen-netfront.c 		queue->tx.rsp_cons = prod;
queue             414 drivers/net/xen-netfront.c 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
queue             417 drivers/net/xen-netfront.c 	xennet_maybe_wake_tx(queue);
queue             421 drivers/net/xen-netfront.c 	struct netfront_queue *queue;
queue             437 drivers/net/xen-netfront.c 	struct netfront_queue *queue = info->queue;
queue             440 drivers/net/xen-netfront.c 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
queue             441 drivers/net/xen-netfront.c 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
queue             442 drivers/net/xen-netfront.c 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
queue             445 drivers/net/xen-netfront.c 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
queue             448 drivers/net/xen-netfront.c 	queue->tx_skbs[id].skb = skb;
queue             449 drivers/net/xen-netfront.c 	queue->grant_tx_page[id] = page;
queue             450 drivers/net/xen-netfront.c 	queue->grant_tx_ref[id] = ref;
queue             463 drivers/net/xen-netfront.c 	struct netfront_queue *queue, struct sk_buff *skb,
queue             467 drivers/net/xen-netfront.c 		.queue = queue,
queue             489 drivers/net/xen-netfront.c 	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
queue             494 drivers/net/xen-netfront.c 		.queue = queue,
queue             577 drivers/net/xen-netfront.c 	struct netfront_queue *queue = NULL;
queue             587 drivers/net/xen-netfront.c 	queue = &np->queues[queue_index];
queue             625 drivers/net/xen-netfront.c 	spin_lock_irqsave(&queue->tx_lock, flags);
queue             630 drivers/net/xen-netfront.c 		spin_unlock_irqrestore(&queue->tx_lock, flags);
queue             635 drivers/net/xen-netfront.c 	first_tx = tx = xennet_make_first_txreq(queue, skb,
queue             656 drivers/net/xen-netfront.c 			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
queue             672 drivers/net/xen-netfront.c 	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
queue             677 drivers/net/xen-netfront.c 		tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
queue             685 drivers/net/xen-netfront.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
queue             687 drivers/net/xen-netfront.c 		notify_remote_via_irq(queue->tx_irq);
queue             695 drivers/net/xen-netfront.c 	xennet_tx_buf_gc(queue);
queue             697 drivers/net/xen-netfront.c 	if (!netfront_tx_slot_available(queue))
queue             698 drivers/net/xen-netfront.c 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
queue             700 drivers/net/xen-netfront.c 	spin_unlock_irqrestore(&queue->tx_lock, flags);
queue             715 drivers/net/xen-netfront.c 	struct netfront_queue *queue;
queue             718 drivers/net/xen-netfront.c 		queue = &np->queues[i];
queue             719 drivers/net/xen-netfront.c 		napi_disable(&queue->napi);
queue             724 drivers/net/xen-netfront.c static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
queue             727 drivers/net/xen-netfront.c 	int new = xennet_rxidx(queue->rx.req_prod_pvt);
queue             729 drivers/net/xen-netfront.c 	BUG_ON(queue->rx_skbs[new]);
queue             730 drivers/net/xen-netfront.c 	queue->rx_skbs[new] = skb;
queue             731 drivers/net/xen-netfront.c 	queue->grant_rx_ref[new] = ref;
queue             732 drivers/net/xen-netfront.c 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
queue             733 drivers/net/xen-netfront.c 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
queue             734 drivers/net/xen-netfront.c 	queue->rx.req_prod_pvt++;
queue             737 drivers/net/xen-netfront.c static int xennet_get_extras(struct netfront_queue *queue,
queue             743 drivers/net/xen-netfront.c 	struct device *dev = &queue->info->netdev->dev;
queue             744 drivers/net/xen-netfront.c 	RING_IDX cons = queue->rx.rsp_cons;
queue             759 drivers/net/xen-netfront.c 			RING_GET_RESPONSE(&queue->rx, ++cons);
queue             772 drivers/net/xen-netfront.c 		skb = xennet_get_rx_skb(queue, cons);
queue             773 drivers/net/xen-netfront.c 		ref = xennet_get_rx_ref(queue, cons);
queue             774 drivers/net/xen-netfront.c 		xennet_move_rx_slot(queue, skb, ref);
queue             777 drivers/net/xen-netfront.c 	queue->rx.rsp_cons = cons;
queue             781 drivers/net/xen-netfront.c static int xennet_get_responses(struct netfront_queue *queue,
queue             787 drivers/net/xen-netfront.c 	struct device *dev = &queue->info->netdev->dev;
queue             788 drivers/net/xen-netfront.c 	RING_IDX cons = queue->rx.rsp_cons;
queue             789 drivers/net/xen-netfront.c 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
queue             790 drivers/net/xen-netfront.c 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
queue             797 drivers/net/xen-netfront.c 		err = xennet_get_extras(queue, extras, rp);
queue             798 drivers/net/xen-netfront.c 		cons = queue->rx.rsp_cons;
queue             807 drivers/net/xen-netfront.c 			xennet_move_rx_slot(queue, skb, ref);
queue             828 drivers/net/xen-netfront.c 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
queue             843 drivers/net/xen-netfront.c 		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
queue             844 drivers/net/xen-netfront.c 		skb = xennet_get_rx_skb(queue, cons + slots);
queue             845 drivers/net/xen-netfront.c 		ref = xennet_get_rx_ref(queue, cons + slots);
queue             856 drivers/net/xen-netfront.c 		queue->rx.rsp_cons = cons + slots;
queue             890 drivers/net/xen-netfront.c static int xennet_fill_frags(struct netfront_queue *queue,
queue             894 drivers/net/xen-netfront.c 	RING_IDX cons = queue->rx.rsp_cons;
queue             899 drivers/net/xen-netfront.c 			RING_GET_RESPONSE(&queue->rx, ++cons);
queue             909 drivers/net/xen-netfront.c 			queue->rx.rsp_cons = ++cons + skb_queue_len(list);
queue             922 drivers/net/xen-netfront.c 	queue->rx.rsp_cons = cons;
queue             951 drivers/net/xen-netfront.c static int handle_incoming_queue(struct netfront_queue *queue,
queue             954 drivers/net/xen-netfront.c 	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
queue             965 drivers/net/xen-netfront.c 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
queue             968 drivers/net/xen-netfront.c 		if (checksum_setup(queue->info->netdev, skb)) {
queue             971 drivers/net/xen-netfront.c 			queue->info->netdev->stats.rx_errors++;
queue             981 drivers/net/xen-netfront.c 		napi_gro_receive(&queue->napi, skb);
queue             989 drivers/net/xen-netfront.c 	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
queue             990 drivers/net/xen-netfront.c 	struct net_device *dev = queue->info->netdev;
queue            1002 drivers/net/xen-netfront.c 	spin_lock(&queue->rx_lock);
queue            1008 drivers/net/xen-netfront.c 	rp = queue->rx.sring->rsp_prod;
queue            1011 drivers/net/xen-netfront.c 	i = queue->rx.rsp_cons;
queue            1014 drivers/net/xen-netfront.c 		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
queue            1017 drivers/net/xen-netfront.c 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
queue            1024 drivers/net/xen-netfront.c 			i = queue->rx.rsp_cons;
queue            1036 drivers/net/xen-netfront.c 				queue->rx.rsp_cons += skb_queue_len(&tmpq);
queue            1050 drivers/net/xen-netfront.c 		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
queue            1060 drivers/net/xen-netfront.c 		i = ++queue->rx.rsp_cons;
queue            1066 drivers/net/xen-netfront.c 	work_done -= handle_incoming_queue(queue, &rxq);
queue            1068 drivers/net/xen-netfront.c 	xennet_alloc_rx_buffers(queue);
queue            1075 drivers/net/xen-netfront.c 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
queue            1080 drivers/net/xen-netfront.c 	spin_unlock(&queue->rx_lock);
queue            1129 drivers/net/xen-netfront.c static void xennet_release_tx_bufs(struct netfront_queue *queue)
queue            1136 drivers/net/xen-netfront.c 		if (skb_entry_is_link(&queue->tx_skbs[i]))
queue            1139 drivers/net/xen-netfront.c 		skb = queue->tx_skbs[i].skb;
queue            1140 drivers/net/xen-netfront.c 		get_page(queue->grant_tx_page[i]);
queue            1141 drivers/net/xen-netfront.c 		gnttab_end_foreign_access(queue->grant_tx_ref[i],
queue            1143 drivers/net/xen-netfront.c 					  (unsigned long)page_address(queue->grant_tx_page[i]));
queue            1144 drivers/net/xen-netfront.c 		queue->grant_tx_page[i] = NULL;
queue            1145 drivers/net/xen-netfront.c 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue            1146 drivers/net/xen-netfront.c 		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
queue            1151 drivers/net/xen-netfront.c static void xennet_release_rx_bufs(struct netfront_queue *queue)
queue            1155 drivers/net/xen-netfront.c 	spin_lock_bh(&queue->rx_lock);
queue            1161 drivers/net/xen-netfront.c 		skb = queue->rx_skbs[id];
queue            1165 drivers/net/xen-netfront.c 		ref = queue->grant_rx_ref[id];
queue            1177 drivers/net/xen-netfront.c 		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
queue            1182 drivers/net/xen-netfront.c 	spin_unlock_bh(&queue->rx_lock);
queue            1223 drivers/net/xen-netfront.c 	struct netfront_queue *queue = dev_id;
queue            1226 drivers/net/xen-netfront.c 	spin_lock_irqsave(&queue->tx_lock, flags);
queue            1227 drivers/net/xen-netfront.c 	xennet_tx_buf_gc(queue);
queue            1228 drivers/net/xen-netfront.c 	spin_unlock_irqrestore(&queue->tx_lock, flags);
queue            1235 drivers/net/xen-netfront.c 	struct netfront_queue *queue = dev_id;
queue            1236 drivers/net/xen-netfront.c 	struct net_device *dev = queue->info->netdev;
queue            1239 drivers/net/xen-netfront.c 		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
queue            1240 drivers/net/xen-netfront.c 		napi_schedule(&queue->napi);
queue            1393 drivers/net/xen-netfront.c 		struct netfront_queue *queue = &info->queues[i];
queue            1395 drivers/net/xen-netfront.c 		del_timer_sync(&queue->rx_refill_timer);
queue            1397 drivers/net/xen-netfront.c 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
queue            1398 drivers/net/xen-netfront.c 			unbind_from_irqhandler(queue->tx_irq, queue);
queue            1399 drivers/net/xen-netfront.c 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
queue            1400 drivers/net/xen-netfront.c 			unbind_from_irqhandler(queue->tx_irq, queue);
queue            1401 drivers/net/xen-netfront.c 			unbind_from_irqhandler(queue->rx_irq, queue);
queue            1403 drivers/net/xen-netfront.c 		queue->tx_evtchn = queue->rx_evtchn = 0;
queue            1404 drivers/net/xen-netfront.c 		queue->tx_irq = queue->rx_irq = 0;
queue            1407 drivers/net/xen-netfront.c 			napi_synchronize(&queue->napi);
queue            1409 drivers/net/xen-netfront.c 		xennet_release_tx_bufs(queue);
queue            1410 drivers/net/xen-netfront.c 		xennet_release_rx_bufs(queue);
queue            1411 drivers/net/xen-netfront.c 		gnttab_free_grant_references(queue->gref_tx_head);
queue            1412 drivers/net/xen-netfront.c 		gnttab_free_grant_references(queue->gref_rx_head);
queue            1415 drivers/net/xen-netfront.c 		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
queue            1416 drivers/net/xen-netfront.c 		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
queue            1418 drivers/net/xen-netfront.c 		queue->tx_ring_ref = GRANT_INVALID_REF;
queue            1419 drivers/net/xen-netfront.c 		queue->rx_ring_ref = GRANT_INVALID_REF;
queue            1420 drivers/net/xen-netfront.c 		queue->tx.sring = NULL;
queue            1421 drivers/net/xen-netfront.c 		queue->rx.sring = NULL;
queue            1463 drivers/net/xen-netfront.c static int setup_netfront_single(struct netfront_queue *queue)
queue            1467 drivers/net/xen-netfront.c 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
queue            1471 drivers/net/xen-netfront.c 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
queue            1473 drivers/net/xen-netfront.c 					0, queue->info->netdev->name, queue);
queue            1476 drivers/net/xen-netfront.c 	queue->rx_evtchn = queue->tx_evtchn;
queue            1477 drivers/net/xen-netfront.c 	queue->rx_irq = queue->tx_irq = err;
queue            1482 drivers/net/xen-netfront.c 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
queue            1483 drivers/net/xen-netfront.c 	queue->tx_evtchn = 0;
queue            1488 drivers/net/xen-netfront.c static int setup_netfront_split(struct netfront_queue *queue)
queue            1492 drivers/net/xen-netfront.c 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
queue            1495 drivers/net/xen-netfront.c 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
queue            1499 drivers/net/xen-netfront.c 	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
queue            1500 drivers/net/xen-netfront.c 		 "%s-tx", queue->name);
queue            1501 drivers/net/xen-netfront.c 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
queue            1503 drivers/net/xen-netfront.c 					0, queue->tx_irq_name, queue);
queue            1506 drivers/net/xen-netfront.c 	queue->tx_irq = err;
queue            1508 drivers/net/xen-netfront.c 	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
queue            1509 drivers/net/xen-netfront.c 		 "%s-rx", queue->name);
queue            1510 drivers/net/xen-netfront.c 	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
queue            1512 drivers/net/xen-netfront.c 					0, queue->rx_irq_name, queue);
queue            1515 drivers/net/xen-netfront.c 	queue->rx_irq = err;
queue            1520 drivers/net/xen-netfront.c 	unbind_from_irqhandler(queue->tx_irq, queue);
queue            1521 drivers/net/xen-netfront.c 	queue->tx_irq = 0;
queue            1523 drivers/net/xen-netfront.c 	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
queue            1524 drivers/net/xen-netfront.c 	queue->rx_evtchn = 0;
queue            1526 drivers/net/xen-netfront.c 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
queue            1527 drivers/net/xen-netfront.c 	queue->tx_evtchn = 0;
queue            1533 drivers/net/xen-netfront.c 			struct netfront_queue *queue, unsigned int feature_split_evtchn)
queue            1540 drivers/net/xen-netfront.c 	queue->tx_ring_ref = GRANT_INVALID_REF;
queue            1541 drivers/net/xen-netfront.c 	queue->rx_ring_ref = GRANT_INVALID_REF;
queue            1542 drivers/net/xen-netfront.c 	queue->rx.sring = NULL;
queue            1543 drivers/net/xen-netfront.c 	queue->tx.sring = NULL;
queue            1552 drivers/net/xen-netfront.c 	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
queue            1557 drivers/net/xen-netfront.c 	queue->tx_ring_ref = gref;
queue            1566 drivers/net/xen-netfront.c 	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
queue            1571 drivers/net/xen-netfront.c 	queue->rx_ring_ref = gref;
queue            1574 drivers/net/xen-netfront.c 		err = setup_netfront_split(queue);
queue            1580 drivers/net/xen-netfront.c 		err = setup_netfront_single(queue);
queue            1591 drivers/net/xen-netfront.c 	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
queue            1595 drivers/net/xen-netfront.c 	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
queue            1606 drivers/net/xen-netfront.c static int xennet_init_queue(struct netfront_queue *queue)
queue            1612 drivers/net/xen-netfront.c 	spin_lock_init(&queue->tx_lock);
queue            1613 drivers/net/xen-netfront.c 	spin_lock_init(&queue->rx_lock);
queue            1615 drivers/net/xen-netfront.c 	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
queue            1617 drivers/net/xen-netfront.c 	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
queue            1618 drivers/net/xen-netfront.c 	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
queue            1619 drivers/net/xen-netfront.c 		 devid, queue->id);
queue            1622 drivers/net/xen-netfront.c 	queue->tx_skb_freelist = 0;
queue            1624 drivers/net/xen-netfront.c 		skb_entry_set_link(&queue->tx_skbs[i], i+1);
queue            1625 drivers/net/xen-netfront.c 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue            1626 drivers/net/xen-netfront.c 		queue->grant_tx_page[i] = NULL;
queue            1631 drivers/net/xen-netfront.c 		queue->rx_skbs[i] = NULL;
queue            1632 drivers/net/xen-netfront.c 		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
queue            1637 drivers/net/xen-netfront.c 					  &queue->gref_tx_head) < 0) {
queue            1645 drivers/net/xen-netfront.c 					  &queue->gref_rx_head) < 0) {
queue            1654 drivers/net/xen-netfront.c 	gnttab_free_grant_references(queue->gref_tx_head);
queue            1659 drivers/net/xen-netfront.c static int write_queue_xenstore_keys(struct netfront_queue *queue,
queue            1666 drivers/net/xen-netfront.c 	struct xenbus_device *dev = queue->info->xbdev;
queue            1682 drivers/net/xen-netfront.c 				dev->nodename, queue->id);
queue            1689 drivers/net/xen-netfront.c 			queue->tx_ring_ref);
queue            1696 drivers/net/xen-netfront.c 			queue->rx_ring_ref);
queue            1705 drivers/net/xen-netfront.c 	if (queue->tx_evtchn == queue->rx_evtchn) {
queue            1708 drivers/net/xen-netfront.c 				"event-channel", "%u", queue->tx_evtchn);
queue            1716 drivers/net/xen-netfront.c 				"event-channel-tx", "%u", queue->tx_evtchn);
queue            1723 drivers/net/xen-netfront.c 				"event-channel-rx", "%u", queue->rx_evtchn);
queue            1746 drivers/net/xen-netfront.c 		struct netfront_queue *queue = &info->queues[i];
queue            1749 drivers/net/xen-netfront.c 			napi_disable(&queue->napi);
queue            1750 drivers/net/xen-netfront.c 		netif_napi_del(&queue->napi);
queue            1769 drivers/net/xen-netfront.c 		struct netfront_queue *queue = &info->queues[i];
queue            1771 drivers/net/xen-netfront.c 		queue->id = i;
queue            1772 drivers/net/xen-netfront.c 		queue->info = info;
queue            1774 drivers/net/xen-netfront.c 		ret = xennet_init_queue(queue);
queue            1782 drivers/net/xen-netfront.c 		netif_napi_add(queue->info->netdev, &queue->napi,
queue            1785 drivers/net/xen-netfront.c 			napi_enable(&queue->napi);
queue            1807 drivers/net/xen-netfront.c 	struct netfront_queue *queue = NULL;
queue            1843 drivers/net/xen-netfront.c 		queue = &info->queues[i];
queue            1844 drivers/net/xen-netfront.c 		err = setup_netfront(dev, queue, feature_split_evtchn);
queue            1874 drivers/net/xen-netfront.c 			queue = &info->queues[i];
queue            1875 drivers/net/xen-netfront.c 			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
queue            1951 drivers/net/xen-netfront.c 	struct netfront_queue *queue = NULL;
queue            1987 drivers/net/xen-netfront.c 		queue = &np->queues[j];
queue            1989 drivers/net/xen-netfront.c 		notify_remote_via_irq(queue->tx_irq);
queue            1990 drivers/net/xen-netfront.c 		if (queue->tx_irq != queue->rx_irq)
queue            1991 drivers/net/xen-netfront.c 			notify_remote_via_irq(queue->rx_irq);
queue            1993 drivers/net/xen-netfront.c 		spin_lock_irq(&queue->tx_lock);
queue            1994 drivers/net/xen-netfront.c 		xennet_tx_buf_gc(queue);
queue            1995 drivers/net/xen-netfront.c 		spin_unlock_irq(&queue->tx_lock);
queue            1997 drivers/net/xen-netfront.c 		spin_lock_bh(&queue->rx_lock);
queue            1998 drivers/net/xen-netfront.c 		xennet_alloc_rx_buffers(queue);
queue            1999 drivers/net/xen-netfront.c 		spin_unlock_bh(&queue->rx_lock);
queue             449 drivers/nfc/pn533/pn533.c 	INIT_LIST_HEAD(&cmd->queue);
queue             450 drivers/nfc/pn533/pn533.c 	list_add_tail(&cmd->queue, &dev->cmd_queue);
queue             548 drivers/nfc/pn533/pn533.c 	cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
queue             550 drivers/nfc/pn533/pn533.c 	list_del(&cmd->queue);
queue            2675 drivers/nfc/pn533/pn533.c 	list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
queue            2676 drivers/nfc/pn533/pn533.c 		list_del(&cmd->queue);
queue             177 drivers/nfc/pn533/pn533.h 	struct list_head queue;
queue             270 drivers/nvdimm/blk.c 	disk->queue		= q;
queue            1538 drivers/nvdimm/btt.c 	btt->btt_disk->queue = btt->btt_queue;
queue            1540 drivers/nvdimm/btt.c 	btt->btt_disk->queue->backing_dev_info->capabilities |=
queue             413 drivers/nvdimm/core.c 	blk_queue_max_integrity_segments(disk->queue, 1);
queue             404 drivers/nvdimm/nd.h 	if (!blk_queue_io_stat(disk->queue))
queue             408 drivers/nvdimm/nd.h 	generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
queue             416 drivers/nvdimm/nd.h 	generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
queue             454 drivers/nvdimm/pmem.c 	disk->queue		= q;
queue             456 drivers/nvdimm/pmem.c 	disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
queue             106 drivers/nvme/host/core.c 	blk_set_queue_dying(ns->queue);
queue             108 drivers/nvme/host/core.c 	blk_mq_unquiesce_queue(ns->queue);
queue            1314 drivers/nvme/host/core.c 	return nvme_submit_user_cmd(ns->queue, &c,
queue            1434 drivers/nvme/host/core.c 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
queue            1481 drivers/nvme/host/core.c 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
queue            1668 drivers/nvme/host/core.c 	blk_queue_max_integrity_segments(disk->queue, 1);
queue            1679 drivers/nvme/host/core.c 	blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
queue            1685 drivers/nvme/host/core.c 	struct request_queue *queue = disk->queue;
queue            1686 drivers/nvme/host/core.c 	u32 size = queue_logical_block_size(queue);
queue            1689 drivers/nvme/host/core.c 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
queue            1699 drivers/nvme/host/core.c 	queue->limits.discard_alignment = 0;
queue            1700 drivers/nvme/host/core.c 	queue->limits.discard_granularity = size;
queue            1703 drivers/nvme/host/core.c 	if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
queue            1706 drivers/nvme/host/core.c 	blk_queue_max_discard_sectors(queue, UINT_MAX);
queue            1707 drivers/nvme/host/core.c 	blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
queue            1710 drivers/nvme/host/core.c 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
queue            1736 drivers/nvme/host/core.c 	blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
queue            1778 drivers/nvme/host/core.c 	blk_mq_freeze_queue(disk->queue);
queue            1803 drivers/nvme/host/core.c 	blk_queue_logical_block_size(disk->queue, bs);
queue            1809 drivers/nvme/host/core.c 	blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
queue            1810 drivers/nvme/host/core.c 	blk_queue_io_min(disk->queue, phys_bs);
queue            1811 drivers/nvme/host/core.c 	blk_queue_io_opt(disk->queue, io_opt);
queue            1830 drivers/nvme/host/core.c 	blk_mq_unfreeze_queue(disk->queue);
queue            1859 drivers/nvme/host/core.c 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
queue            1953 drivers/nvme/host/core.c 	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
queue            3472 drivers/nvme/host/core.c 		blk_queue_io_min(ns->queue, bs * ns->sws);
queue            3474 drivers/nvme/host/core.c 			blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
queue            3492 drivers/nvme/host/core.c 	ns->queue = blk_mq_init_queue(ctrl->tagset);
queue            3493 drivers/nvme/host/core.c 	if (IS_ERR(ns->queue)) {
queue            3494 drivers/nvme/host/core.c 		ret = PTR_ERR(ns->queue);
queue            3499 drivers/nvme/host/core.c 		ns->queue->backing_dev_info->capabilities
queue            3502 drivers/nvme/host/core.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
queue            3504 drivers/nvme/host/core.c 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
queue            3506 drivers/nvme/host/core.c 	ns->queue->queuedata = ns;
queue            3512 drivers/nvme/host/core.c 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
queue            3513 drivers/nvme/host/core.c 	nvme_set_queue_limits(ctrl, ns->queue);
queue            3538 drivers/nvme/host/core.c 	disk->queue = ns->queue;
queue            3568 drivers/nvme/host/core.c 	ns->disk->queue = NULL;
queue            3578 drivers/nvme/host/core.c 	blk_cleanup_queue(ns->queue);
queue            3602 drivers/nvme/host/core.c 		blk_cleanup_queue(ns->queue);
queue            4137 drivers/nvme/host/core.c 		blk_mq_unfreeze_queue(ns->queue);
queue            4148 drivers/nvme/host/core.c 		timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
queue            4162 drivers/nvme/host/core.c 		blk_mq_freeze_queue_wait(ns->queue);
queue            4173 drivers/nvme/host/core.c 		blk_freeze_queue_start(ns->queue);
queue            4184 drivers/nvme/host/core.c 		blk_mq_quiesce_queue(ns->queue);
queue            4195 drivers/nvme/host/core.c 		blk_mq_unquiesce_queue(ns->queue);
queue            4207 drivers/nvme/host/core.c 		blk_sync_queue(ns->queue);
queue              54 drivers/nvme/host/fc.c 	struct nvme_fc_queue	*queue;
queue              84 drivers/nvme/host/fc.c 	struct nvme_fc_queue	*queue;
queue            1180 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
queue            1222 drivers/nvme/host/fc.c 	lsop->queue = queue;
queue            1268 drivers/nvme/host/fc.c 			queue->qnum, validation_errors[fcret]);
queue            1272 drivers/nvme/host/fc.c 		queue->connection_id =
queue            1274 drivers/nvme/host/fc.c 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
queue            1283 drivers/nvme/host/fc.c 			queue->qnum, ret);
queue            1288 drivers/nvme/host/fc.c nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue            1327 drivers/nvme/host/fc.c 	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
queue            1330 drivers/nvme/host/fc.c 	lsop->queue = queue;
queue            1367 drivers/nvme/host/fc.c 			queue->qnum, validation_errors[fcret]);
queue            1369 drivers/nvme/host/fc.c 		queue->connection_id =
queue            1371 drivers/nvme/host/fc.c 		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
queue            1380 drivers/nvme/host/fc.c 			queue->qnum, ret);
queue            1517 drivers/nvme/host/fc.c 					op->queue->lldd_handle,
queue            1560 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = op->queue;
queue            1697 drivers/nvme/host/fc.c 		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
queue            1715 drivers/nvme/host/fc.c 		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
queue            1730 drivers/nvme/host/fc.c 	op->queue = queue;
queue            1768 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
queue            1771 drivers/nvme/host/fc.c 	res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
queue            1839 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
queue            1841 drivers/nvme/host/fc.c 	hctx->driver_data = queue;
queue            1842 drivers/nvme/host/fc.c 	queue->hctx = hctx;
queue            1870 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue;
queue            1872 drivers/nvme/host/fc.c 	queue = &ctrl->queues[idx];
queue            1873 drivers/nvme/host/fc.c 	memset(queue, 0, sizeof(*queue));
queue            1874 drivers/nvme/host/fc.c 	queue->ctrl = ctrl;
queue            1875 drivers/nvme/host/fc.c 	queue->qnum = idx;
queue            1876 drivers/nvme/host/fc.c 	atomic_set(&queue->csn, 0);
queue            1877 drivers/nvme/host/fc.c 	queue->dev = ctrl->dev;
queue            1880 drivers/nvme/host/fc.c 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
queue            1882 drivers/nvme/host/fc.c 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
queue            1905 drivers/nvme/host/fc.c nvme_fc_free_queue(struct nvme_fc_queue *queue)
queue            1907 drivers/nvme/host/fc.c 	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
queue            1910 drivers/nvme/host/fc.c 	clear_bit(NVME_FC_Q_LIVE, &queue->flags);
queue            1917 drivers/nvme/host/fc.c 	queue->connection_id = 0;
queue            1918 drivers/nvme/host/fc.c 	atomic_set(&queue->csn, 0);
queue            1923 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue, unsigned int qidx)
queue            1927 drivers/nvme/host/fc.c 				queue->lldd_handle);
queue            1928 drivers/nvme/host/fc.c 	queue->lldd_handle = NULL;
queue            1942 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
queue            1946 drivers/nvme/host/fc.c 	queue->lldd_handle = NULL;
queue            1949 drivers/nvme/host/fc.c 				qidx, qsize, &queue->lldd_handle);
queue            1957 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
queue            1960 drivers/nvme/host/fc.c 	for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
queue            1961 drivers/nvme/host/fc.c 		__nvme_fc_delete_hw_queue(ctrl, queue, i);
queue            1967 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = &ctrl->queues[1];
queue            1970 drivers/nvme/host/fc.c 	for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
queue            1971 drivers/nvme/host/fc.c 		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
queue            2207 drivers/nvme/host/fc.c nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
queue            2226 drivers/nvme/host/fc.c 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
queue            2244 drivers/nvme/host/fc.c 	op->fcp_req.sqid = cpu_to_le16(queue->qnum);
queue            2284 drivers/nvme/host/fc.c 	cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
queue            2287 drivers/nvme/host/fc.c 					queue->lldd_handle, &op->fcp_req);
queue            2324 drivers/nvme/host/fc.c 	struct nvme_ns *ns = hctx->queue->queuedata;
queue            2325 drivers/nvme/host/fc.c 	struct nvme_fc_queue *queue = hctx->driver_data;
queue            2326 drivers/nvme/host/fc.c 	struct nvme_fc_ctrl *ctrl = queue->ctrl;
queue            2332 drivers/nvme/host/fc.c 	bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
queue            2337 drivers/nvme/host/fc.c 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
queue            2338 drivers/nvme/host/fc.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
queue            2362 drivers/nvme/host/fc.c 	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
queue            2384 drivers/nvme/host/fc.c 	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
queue             874 drivers/nvme/host/lightnvm.c 	ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
queue             918 drivers/nvme/host/lightnvm.c 	q = admin ? ns->ctrl->admin_q : ns->queue;
queue             950 drivers/nvme/host/lightnvm.c 	struct request_queue *q = ns->queue;
queue              22 drivers/nvme/host/multipath.c 			blk_mq_unfreeze_queue(h->disk->queue);
queue              32 drivers/nvme/host/multipath.c 			blk_mq_freeze_queue_wait(h->disk->queue);
queue              42 drivers/nvme/host/multipath.c 			blk_freeze_queue_start(h->disk->queue);
queue             318 drivers/nvme/host/multipath.c 		trace_block_bio_remap(bio->bi_disk->queue, bio,
queue             400 drivers/nvme/host/multipath.c 	head->disk->queue = q;
queue             677 drivers/nvme/host/multipath.c 	blk_set_queue_dying(head->disk->queue);
queue             681 drivers/nvme/host/multipath.c 	blk_cleanup_queue(head->disk->queue);
queue             356 drivers/nvme/host/nvme.h 	struct request_queue *queue;
queue             550 drivers/nvme/host/nvme.h 		trace_block_bio_complete(ns->head->disk->queue,
queue             865 drivers/nvme/host/pci.c 	struct nvme_ns *ns = hctx->queue->queuedata;
queue              64 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue  *queue;
queue             153 drivers/nvme/host/rdma.c static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
queue             155 drivers/nvme/host/rdma.c 	return queue - queue->ctrl->queues;
queue             158 drivers/nvme/host/rdma.c static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
queue             160 drivers/nvme/host/rdma.c 	return nvme_rdma_queue_idx(queue) >
queue             161 drivers/nvme/host/rdma.c 		queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
queue             162 drivers/nvme/host/rdma.c 		queue->ctrl->io_queues[HCTX_TYPE_READ];
queue             165 drivers/nvme/host/rdma.c static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
queue             167 drivers/nvme/host/rdma.c 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
queue             240 drivers/nvme/host/rdma.c static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
queue             244 drivers/nvme/host/rdma.c 	ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
queue             250 drivers/nvme/host/rdma.c 	WARN_ON_ONCE(queue->cm_error > 0);
queue             251 drivers/nvme/host/rdma.c 	return queue->cm_error;
queue             254 drivers/nvme/host/rdma.c static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
queue             256 drivers/nvme/host/rdma.c 	struct nvme_rdma_device *dev = queue->device;
queue             263 drivers/nvme/host/rdma.c 	init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
queue             265 drivers/nvme/host/rdma.c 	init_attr.cap.max_recv_wr = queue->queue_size + 1;
queue             270 drivers/nvme/host/rdma.c 	init_attr.send_cq = queue->ib_cq;
queue             271 drivers/nvme/host/rdma.c 	init_attr.recv_cq = queue->ib_cq;
queue             273 drivers/nvme/host/rdma.c 	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
queue             275 drivers/nvme/host/rdma.c 	queue->qp = queue->cm_id->qp;
queue             294 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
queue             301 drivers/nvme/host/rdma.c 	req->queue = queue;
queue             310 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
queue             314 drivers/nvme/host/rdma.c 	hctx->driver_data = queue;
queue             322 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
queue             326 drivers/nvme/host/rdma.c 	hctx->driver_data = queue;
queue             400 drivers/nvme/host/rdma.c static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
queue             405 drivers/nvme/host/rdma.c 	if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
queue             408 drivers/nvme/host/rdma.c 	dev = queue->device;
queue             411 drivers/nvme/host/rdma.c 	ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
queue             418 drivers/nvme/host/rdma.c 	ib_destroy_qp(queue->qp);
queue             419 drivers/nvme/host/rdma.c 	ib_free_cq(queue->ib_cq);
queue             421 drivers/nvme/host/rdma.c 	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
queue             433 drivers/nvme/host/rdma.c static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
queue             438 drivers/nvme/host/rdma.c 	int comp_vector, idx = nvme_rdma_queue_idx(queue);
queue             442 drivers/nvme/host/rdma.c 	queue->device = nvme_rdma_find_get_device(queue->cm_id);
queue             443 drivers/nvme/host/rdma.c 	if (!queue->device) {
queue             444 drivers/nvme/host/rdma.c 		dev_err(queue->cm_id->device->dev.parent,
queue             448 drivers/nvme/host/rdma.c 	ibdev = queue->device->dev;
queue             457 drivers/nvme/host/rdma.c 	if (nvme_rdma_poll_queue(queue))
queue             463 drivers/nvme/host/rdma.c 	queue->ib_cq = ib_alloc_cq(ibdev, queue,
queue             464 drivers/nvme/host/rdma.c 				cq_factor * queue->queue_size + 1,
queue             466 drivers/nvme/host/rdma.c 	if (IS_ERR(queue->ib_cq)) {
queue             467 drivers/nvme/host/rdma.c 		ret = PTR_ERR(queue->ib_cq);
queue             471 drivers/nvme/host/rdma.c 	ret = nvme_rdma_create_qp(queue, send_wr_factor);
queue             475 drivers/nvme/host/rdma.c 	queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
queue             477 drivers/nvme/host/rdma.c 	if (!queue->rsp_ring) {
queue             488 drivers/nvme/host/rdma.c 	ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue             489 drivers/nvme/host/rdma.c 			      queue->queue_size,
queue             493 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue             495 drivers/nvme/host/rdma.c 			queue->queue_size, idx);
queue             499 drivers/nvme/host/rdma.c 	set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
queue             504 drivers/nvme/host/rdma.c 	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
queue             507 drivers/nvme/host/rdma.c 	rdma_destroy_qp(queue->cm_id);
queue             509 drivers/nvme/host/rdma.c 	ib_free_cq(queue->ib_cq);
queue             511 drivers/nvme/host/rdma.c 	nvme_rdma_dev_put(queue->device);
queue             518 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue;
queue             522 drivers/nvme/host/rdma.c 	queue = &ctrl->queues[idx];
queue             523 drivers/nvme/host/rdma.c 	queue->ctrl = ctrl;
queue             524 drivers/nvme/host/rdma.c 	init_completion(&queue->cm_done);
queue             527 drivers/nvme/host/rdma.c 		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
queue             529 drivers/nvme/host/rdma.c 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
queue             531 drivers/nvme/host/rdma.c 	queue->queue_size = queue_size;
queue             533 drivers/nvme/host/rdma.c 	queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
queue             535 drivers/nvme/host/rdma.c 	if (IS_ERR(queue->cm_id)) {
queue             537 drivers/nvme/host/rdma.c 			"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
queue             538 drivers/nvme/host/rdma.c 		return PTR_ERR(queue->cm_id);
queue             544 drivers/nvme/host/rdma.c 	queue->cm_error = -ETIMEDOUT;
queue             545 drivers/nvme/host/rdma.c 	ret = rdma_resolve_addr(queue->cm_id, src_addr,
queue             554 drivers/nvme/host/rdma.c 	ret = nvme_rdma_wait_for_cm(queue);
queue             561 drivers/nvme/host/rdma.c 	set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
queue             566 drivers/nvme/host/rdma.c 	rdma_destroy_id(queue->cm_id);
queue             567 drivers/nvme/host/rdma.c 	nvme_rdma_destroy_queue_ib(queue);
queue             571 drivers/nvme/host/rdma.c static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
queue             573 drivers/nvme/host/rdma.c 	rdma_disconnect(queue->cm_id);
queue             574 drivers/nvme/host/rdma.c 	ib_drain_qp(queue->qp);
queue             577 drivers/nvme/host/rdma.c static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
queue             579 drivers/nvme/host/rdma.c 	if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
queue             581 drivers/nvme/host/rdma.c 	__nvme_rdma_stop_queue(queue);
queue             584 drivers/nvme/host/rdma.c static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
queue             586 drivers/nvme/host/rdma.c 	if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
queue             589 drivers/nvme/host/rdma.c 	nvme_rdma_destroy_queue_ib(queue);
queue             590 drivers/nvme/host/rdma.c 	rdma_destroy_id(queue->cm_id);
queue             611 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = &ctrl->queues[idx];
queue             612 drivers/nvme/host/rdma.c 	bool poll = nvme_rdma_poll_queue(queue);
queue             621 drivers/nvme/host/rdma.c 		set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
queue             623 drivers/nvme/host/rdma.c 		if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
queue             624 drivers/nvme/host/rdma.c 			__nvme_rdma_stop_queue(queue);
queue            1099 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = cq->cq_context;
queue            1100 drivers/nvme/host/rdma.c 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
queue            1132 drivers/nvme/host/rdma.c static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
queue            1146 drivers/nvme/host/rdma.c 	return ib_post_send(queue->qp, &wr, NULL);
queue            1149 drivers/nvme/host/rdma.c static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
queue            1153 drivers/nvme/host/rdma.c 	struct nvme_rdma_device *dev = queue->device;
queue            1160 drivers/nvme/host/rdma.c 		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
queue            1181 drivers/nvme/host/rdma.c static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
queue            1194 drivers/nvme/host/rdma.c 		sge->lkey = queue->device->pd->local_dma_lkey;
queue            1198 drivers/nvme/host/rdma.c 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
queue            1206 drivers/nvme/host/rdma.c static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
queue            1213 drivers/nvme/host/rdma.c 	put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
queue            1218 drivers/nvme/host/rdma.c static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
queue            1225 drivers/nvme/host/rdma.c 	req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
queue            1235 drivers/nvme/host/rdma.c 		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
queue            1264 drivers/nvme/host/rdma.c static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
queue            1268 drivers/nvme/host/rdma.c 	struct nvme_rdma_device *dev = queue->device;
queue            1297 drivers/nvme/host/rdma.c 		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
queue            1298 drivers/nvme/host/rdma.c 		    queue->ctrl->use_inline_data &&
queue            1300 drivers/nvme/host/rdma.c 				nvme_rdma_inline_data_size(queue)) {
queue            1301 drivers/nvme/host/rdma.c 			ret = nvme_rdma_map_sg_inline(queue, req, c, count);
queue            1306 drivers/nvme/host/rdma.c 			ret = nvme_rdma_map_sg_single(queue, req, c);
queue            1311 drivers/nvme/host/rdma.c 	ret = nvme_rdma_map_sg_fr(queue, req, c, count);
queue            1342 drivers/nvme/host/rdma.c static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
queue            1351 drivers/nvme/host/rdma.c 	sge->lkey   = queue->device->pd->local_dma_lkey;
queue            1365 drivers/nvme/host/rdma.c 	ret = ib_post_send(queue->qp, first, NULL);
queue            1367 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1373 drivers/nvme/host/rdma.c static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
queue            1382 drivers/nvme/host/rdma.c 	list.lkey   = queue->device->pd->local_dma_lkey;
queue            1391 drivers/nvme/host/rdma.c 	ret = ib_post_recv(queue->qp, &wr, NULL);
queue            1393 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1399 drivers/nvme/host/rdma.c static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
queue            1401 drivers/nvme/host/rdma.c 	u32 queue_idx = nvme_rdma_queue_idx(queue);
queue            1404 drivers/nvme/host/rdma.c 		return queue->ctrl->admin_tag_set.tags[queue_idx];
queue            1405 drivers/nvme/host/rdma.c 	return queue->ctrl->tag_set.tags[queue_idx - 1];
queue            1417 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = &ctrl->queues[0];
queue            1418 drivers/nvme/host/rdma.c 	struct ib_device *dev = queue->device->dev;
queue            1437 drivers/nvme/host/rdma.c 	ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
queue            1441 drivers/nvme/host/rdma.c static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
queue            1447 drivers/nvme/host/rdma.c 	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
queue            1449 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1451 drivers/nvme/host/rdma.c 			cqe->command_id, queue->qp->qp_num);
queue            1452 drivers/nvme/host/rdma.c 		nvme_rdma_error_recovery(queue->ctrl);
queue            1462 drivers/nvme/host/rdma.c 			dev_err(queue->ctrl->ctrl.device,
queue            1465 drivers/nvme/host/rdma.c 			nvme_rdma_error_recovery(queue->ctrl);
queue            1470 drivers/nvme/host/rdma.c 		ret = nvme_rdma_inv_rkey(queue, req);
queue            1472 drivers/nvme/host/rdma.c 			dev_err(queue->ctrl->ctrl.device,
queue            1475 drivers/nvme/host/rdma.c 			nvme_rdma_error_recovery(queue->ctrl);
queue            1489 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = cq->cq_context;
queue            1490 drivers/nvme/host/rdma.c 	struct ib_device *ibdev = queue->device->dev;
queue            1506 drivers/nvme/host/rdma.c 	if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
queue            1508 drivers/nvme/host/rdma.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
queue            1511 drivers/nvme/host/rdma.c 		nvme_rdma_process_nvme_rsp(queue, cqe, wc);
queue            1514 drivers/nvme/host/rdma.c 	nvme_rdma_post_recv(queue, qe);
queue            1517 drivers/nvme/host/rdma.c static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
queue            1521 drivers/nvme/host/rdma.c 	for (i = 0; i < queue->queue_size; i++) {
queue            1522 drivers/nvme/host/rdma.c 		ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
queue            1530 drivers/nvme/host/rdma.c 	nvme_rdma_destroy_queue_ib(queue);
queue            1534 drivers/nvme/host/rdma.c static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
queue            1537 drivers/nvme/host/rdma.c 	struct rdma_cm_id *cm_id = queue->cm_id;
queue            1549 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1553 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1560 drivers/nvme/host/rdma.c static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
queue            1562 drivers/nvme/host/rdma.c 	struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
queue            1565 drivers/nvme/host/rdma.c 	ret = nvme_rdma_create_queue_ib(queue);
queue            1570 drivers/nvme/host/rdma.c 		rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
queue            1571 drivers/nvme/host/rdma.c 	ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
queue            1574 drivers/nvme/host/rdma.c 			queue->cm_error);
queue            1581 drivers/nvme/host/rdma.c 	nvme_rdma_destroy_queue_ib(queue);
queue            1585 drivers/nvme/host/rdma.c static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
queue            1587 drivers/nvme/host/rdma.c 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
queue            1592 drivers/nvme/host/rdma.c 	param.qp_num = queue->qp->qp_num;
queue            1595 drivers/nvme/host/rdma.c 	param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
queue            1603 drivers/nvme/host/rdma.c 	priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
queue            1617 drivers/nvme/host/rdma.c 		priv.hrqsize = cpu_to_le16(queue->queue_size);
queue            1618 drivers/nvme/host/rdma.c 		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
queue            1621 drivers/nvme/host/rdma.c 	ret = rdma_connect(queue->cm_id, &param);
queue            1631 drivers/nvme/host/rdma.c 	nvme_rdma_destroy_queue_ib(queue);
queue            1638 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = cm_id->context;
queue            1641 drivers/nvme/host/rdma.c 	dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
queue            1647 drivers/nvme/host/rdma.c 		cm_error = nvme_rdma_addr_resolved(queue);
queue            1650 drivers/nvme/host/rdma.c 		cm_error = nvme_rdma_route_resolved(queue);
queue            1653 drivers/nvme/host/rdma.c 		queue->cm_error = nvme_rdma_conn_established(queue);
queue            1655 drivers/nvme/host/rdma.c 		complete(&queue->cm_done);
queue            1658 drivers/nvme/host/rdma.c 		nvme_rdma_destroy_queue_ib(queue);
queue            1659 drivers/nvme/host/rdma.c 		cm_error = nvme_rdma_conn_rejected(queue, ev);
queue            1664 drivers/nvme/host/rdma.c 		nvme_rdma_destroy_queue_ib(queue);
queue            1667 drivers/nvme/host/rdma.c 		dev_dbg(queue->ctrl->ctrl.device,
queue            1674 drivers/nvme/host/rdma.c 		dev_dbg(queue->ctrl->ctrl.device,
queue            1676 drivers/nvme/host/rdma.c 		nvme_rdma_error_recovery(queue->ctrl);
queue            1682 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1684 drivers/nvme/host/rdma.c 		nvme_rdma_error_recovery(queue->ctrl);
queue            1689 drivers/nvme/host/rdma.c 		queue->cm_error = cm_error;
queue            1690 drivers/nvme/host/rdma.c 		complete(&queue->cm_done);
queue            1700 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = req->queue;
queue            1701 drivers/nvme/host/rdma.c 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
queue            1704 drivers/nvme/host/rdma.c 		 rq->tag, nvme_rdma_queue_idx(queue));
queue            1735 drivers/nvme/host/rdma.c 	struct nvme_ns *ns = hctx->queue->queuedata;
queue            1736 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = hctx->driver_data;
queue            1742 drivers/nvme/host/rdma.c 	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
queue            1748 drivers/nvme/host/rdma.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
queue            1749 drivers/nvme/host/rdma.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
queue            1751 drivers/nvme/host/rdma.c 	dev = queue->device->dev;
queue            1769 drivers/nvme/host/rdma.c 	err = nvme_rdma_map_data(queue, rq, c);
queue            1771 drivers/nvme/host/rdma.c 		dev_err(queue->ctrl->ctrl.device,
queue            1782 drivers/nvme/host/rdma.c 	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
queue            1785 drivers/nvme/host/rdma.c 		nvme_rdma_unmap_data(queue, rq);
queue            1804 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = hctx->driver_data;
queue            1806 drivers/nvme/host/rdma.c 	return ib_process_cq_direct(queue->ib_cq, -1);
queue            1812 drivers/nvme/host/rdma.c 	struct nvme_rdma_queue *queue = req->queue;
queue            1813 drivers/nvme/host/rdma.c 	struct ib_device *ibdev = queue->device->dev;
queue            1815 drivers/nvme/host/rdma.c 	nvme_rdma_unmap_data(queue, rq);
queue              33 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue	*queue;
queue             130 drivers/nvme/host/tcp.c static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
queue             132 drivers/nvme/host/tcp.c 	return queue - queue->ctrl->queues;
queue             135 drivers/nvme/host/tcp.c static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
queue             137 drivers/nvme/host/tcp.c 	u32 queue_idx = nvme_tcp_queue_id(queue);
queue             140 drivers/nvme/host/tcp.c 		return queue->ctrl->admin_tag_set.tags[queue_idx];
queue             141 drivers/nvme/host/tcp.c 	return queue->ctrl->tag_set.tags[queue_idx - 1];
queue             144 drivers/nvme/host/tcp.c static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
queue             146 drivers/nvme/host/tcp.c 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
queue             149 drivers/nvme/host/tcp.c static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
queue             151 drivers/nvme/host/tcp.c 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
queue             154 drivers/nvme/host/tcp.c static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
queue             156 drivers/nvme/host/tcp.c 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
queue             161 drivers/nvme/host/tcp.c 	return req == &req->queue->ctrl->async_req;
queue             174 drivers/nvme/host/tcp.c 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
queue             252 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             254 drivers/nvme/host/tcp.c 	spin_lock(&queue->lock);
queue             255 drivers/nvme/host/tcp.c 	list_add_tail(&req->entry, &queue->send_list);
queue             256 drivers/nvme/host/tcp.c 	spin_unlock(&queue->lock);
queue             258 drivers/nvme/host/tcp.c 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
queue             262 drivers/nvme/host/tcp.c nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
queue             266 drivers/nvme/host/tcp.c 	spin_lock(&queue->lock);
queue             267 drivers/nvme/host/tcp.c 	req = list_first_entry_or_null(&queue->send_list,
queue             271 drivers/nvme/host/tcp.c 	spin_unlock(&queue->lock);
queue             304 drivers/nvme/host/tcp.c static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
queue             312 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             314 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue));
queue             319 drivers/nvme/host/tcp.c 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
queue             322 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             331 drivers/nvme/host/tcp.c static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
queue             334 drivers/nvme/host/tcp.c 	u8 digest_len = nvme_tcp_hdgst_len(queue);
queue             341 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             343 drivers/nvme/host/tcp.c 		nvme_tcp_queue_id(queue));
queue             346 drivers/nvme/host/tcp.c 	crypto_ahash_init(queue->rcv_hash);
queue             366 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
queue             367 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue             369 drivers/nvme/host/tcp.c 	req->pdu = page_frag_alloc(&queue->pf_cache,
queue             375 drivers/nvme/host/tcp.c 	req->queue = queue;
queue             385 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
queue             387 drivers/nvme/host/tcp.c 	hctx->driver_data = queue;
queue             395 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
queue             397 drivers/nvme/host/tcp.c 	hctx->driver_data = queue;
queue             402 drivers/nvme/host/tcp.c nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
queue             404 drivers/nvme/host/tcp.c 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
queue             405 drivers/nvme/host/tcp.c 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
queue             409 drivers/nvme/host/tcp.c static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue             411 drivers/nvme/host/tcp.c 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
queue             412 drivers/nvme/host/tcp.c 				nvme_tcp_hdgst_len(queue);
queue             413 drivers/nvme/host/tcp.c 	queue->pdu_offset = 0;
queue             414 drivers/nvme/host/tcp.c 	queue->data_remaining = -1;
queue             415 drivers/nvme/host/tcp.c 	queue->ddgst_remaining = 0;
queue             426 drivers/nvme/host/tcp.c static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
queue             431 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
queue             433 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             435 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), cqe->command_id);
queue             436 drivers/nvme/host/tcp.c 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
queue             441 drivers/nvme/host/tcp.c 	queue->nr_cqe++;
queue             446 drivers/nvme/host/tcp.c static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
queue             451 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
queue             453 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             455 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), pdu->command_id);
queue             460 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             462 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), rq->tag);
queue             466 drivers/nvme/host/tcp.c 	queue->data_remaining = le32_to_cpu(pdu->data_length);
queue             470 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             472 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), rq->tag);
queue             473 drivers/nvme/host/tcp.c 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
queue             480 drivers/nvme/host/tcp.c static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
queue             492 drivers/nvme/host/tcp.c 	if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
queue             494 drivers/nvme/host/tcp.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
queue             497 drivers/nvme/host/tcp.c 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
queue             506 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             508 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue             509 drivers/nvme/host/tcp.c 	u8 ddgst = nvme_tcp_ddgst_len(queue);
queue             515 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             523 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             533 drivers/nvme/host/tcp.c 	if (queue->hdr_digest)
queue             535 drivers/nvme/host/tcp.c 	if (queue->data_digest)
queue             548 drivers/nvme/host/tcp.c static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
queue             555 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
queue             557 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             559 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), pdu->command_id);
queue             576 drivers/nvme/host/tcp.c static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue             580 drivers/nvme/host/tcp.c 	char *pdu = queue->pdu;
queue             581 drivers/nvme/host/tcp.c 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
queue             585 drivers/nvme/host/tcp.c 		&pdu[queue->pdu_offset], rcv_len);
queue             589 drivers/nvme/host/tcp.c 	queue->pdu_remaining -= rcv_len;
queue             590 drivers/nvme/host/tcp.c 	queue->pdu_offset += rcv_len;
queue             593 drivers/nvme/host/tcp.c 	if (queue->pdu_remaining)
queue             596 drivers/nvme/host/tcp.c 	hdr = queue->pdu;
queue             597 drivers/nvme/host/tcp.c 	if (queue->hdr_digest) {
queue             598 drivers/nvme/host/tcp.c 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
queue             604 drivers/nvme/host/tcp.c 	if (queue->data_digest) {
queue             605 drivers/nvme/host/tcp.c 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
queue             612 drivers/nvme/host/tcp.c 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
queue             614 drivers/nvme/host/tcp.c 		nvme_tcp_init_recv_ctx(queue);
queue             615 drivers/nvme/host/tcp.c 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
queue             617 drivers/nvme/host/tcp.c 		nvme_tcp_init_recv_ctx(queue);
queue             618 drivers/nvme/host/tcp.c 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
queue             620 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             633 drivers/nvme/host/tcp.c static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue             636 drivers/nvme/host/tcp.c 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
queue             640 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
queue             642 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             644 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), pdu->command_id);
queue             652 drivers/nvme/host/tcp.c 		recv_len = min_t(size_t, *len, queue->data_remaining);
queue             664 drivers/nvme/host/tcp.c 				dev_err(queue->ctrl->ctrl.device,
queue             666 drivers/nvme/host/tcp.c 					nvme_tcp_queue_id(queue), rq->tag);
queue             667 drivers/nvme/host/tcp.c 				nvme_tcp_init_recv_ctx(queue);
queue             677 drivers/nvme/host/tcp.c 		if (queue->data_digest)
queue             679 drivers/nvme/host/tcp.c 				&req->iter, recv_len, queue->rcv_hash);
queue             684 drivers/nvme/host/tcp.c 			dev_err(queue->ctrl->ctrl.device,
queue             686 drivers/nvme/host/tcp.c 				nvme_tcp_queue_id(queue), rq->tag);
queue             692 drivers/nvme/host/tcp.c 		queue->data_remaining -= recv_len;
queue             695 drivers/nvme/host/tcp.c 	if (!queue->data_remaining) {
queue             696 drivers/nvme/host/tcp.c 		if (queue->data_digest) {
queue             697 drivers/nvme/host/tcp.c 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
queue             698 drivers/nvme/host/tcp.c 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
queue             702 drivers/nvme/host/tcp.c 				queue->nr_cqe++;
queue             704 drivers/nvme/host/tcp.c 			nvme_tcp_init_recv_ctx(queue);
queue             711 drivers/nvme/host/tcp.c static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
queue             714 drivers/nvme/host/tcp.c 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
queue             715 drivers/nvme/host/tcp.c 	char *ddgst = (char *)&queue->recv_ddgst;
queue             716 drivers/nvme/host/tcp.c 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
queue             717 drivers/nvme/host/tcp.c 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
queue             724 drivers/nvme/host/tcp.c 	queue->ddgst_remaining -= recv_len;
queue             727 drivers/nvme/host/tcp.c 	if (queue->ddgst_remaining)
queue             730 drivers/nvme/host/tcp.c 	if (queue->recv_ddgst != queue->exp_ddgst) {
queue             731 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue             733 drivers/nvme/host/tcp.c 			le32_to_cpu(queue->recv_ddgst),
queue             734 drivers/nvme/host/tcp.c 			le32_to_cpu(queue->exp_ddgst));
queue             739 drivers/nvme/host/tcp.c 		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
queue             743 drivers/nvme/host/tcp.c 		queue->nr_cqe++;
queue             746 drivers/nvme/host/tcp.c 	nvme_tcp_init_recv_ctx(queue);
queue             753 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = desc->arg.data;
queue             758 drivers/nvme/host/tcp.c 		switch (nvme_tcp_recv_state(queue)) {
queue             760 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
queue             763 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
queue             766 drivers/nvme/host/tcp.c 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
queue             772 drivers/nvme/host/tcp.c 			dev_err(queue->ctrl->ctrl.device,
queue             774 drivers/nvme/host/tcp.c 			queue->rd_enabled = false;
queue             775 drivers/nvme/host/tcp.c 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
queue             785 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue;
queue             788 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
queue             789 drivers/nvme/host/tcp.c 	if (likely(queue && queue->rd_enabled))
queue             790 drivers/nvme/host/tcp.c 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
queue             796 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue;
queue             799 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
queue             800 drivers/nvme/host/tcp.c 	if (likely(queue && sk_stream_is_writeable(sk))) {
queue             802 drivers/nvme/host/tcp.c 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
queue             809 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue;
queue             812 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
queue             813 drivers/nvme/host/tcp.c 	if (!queue)
queue             823 drivers/nvme/host/tcp.c 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
queue             826 drivers/nvme/host/tcp.c 		dev_info(queue->ctrl->ctrl.device,
queue             828 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), sk->sk_state);
queue             831 drivers/nvme/host/tcp.c 	queue->state_change(sk);
queue             836 drivers/nvme/host/tcp.c static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
queue             838 drivers/nvme/host/tcp.c 	queue->request = NULL;
queue             848 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             857 drivers/nvme/host/tcp.c 		if (last && !queue->data_digest)
queue             864 drivers/nvme/host/tcp.c 			ret = sock_no_sendpage(queue->sock, page, offset, len,
queue             867 drivers/nvme/host/tcp.c 			ret = kernel_sendpage(queue->sock, page, offset, len,
queue             874 drivers/nvme/host/tcp.c 		if (queue->data_digest)
queue             875 drivers/nvme/host/tcp.c 			nvme_tcp_ddgst_update(queue->snd_hash, page,
queue             880 drivers/nvme/host/tcp.c 			if (queue->data_digest) {
queue             881 drivers/nvme/host/tcp.c 				nvme_tcp_ddgst_final(queue->snd_hash,
queue             886 drivers/nvme/host/tcp.c 				nvme_tcp_done_send_req(queue);
queue             896 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             900 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue             904 drivers/nvme/host/tcp.c 	if (queue->hdr_digest && !req->offset)
queue             905 drivers/nvme/host/tcp.c 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
queue             907 drivers/nvme/host/tcp.c 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
queue             916 drivers/nvme/host/tcp.c 			if (queue->data_digest)
queue             917 drivers/nvme/host/tcp.c 				crypto_ahash_init(queue->snd_hash);
queue             920 drivers/nvme/host/tcp.c 			nvme_tcp_done_send_req(queue);
queue             931 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             933 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue             937 drivers/nvme/host/tcp.c 	if (queue->hdr_digest && !req->offset)
queue             938 drivers/nvme/host/tcp.c 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
queue             940 drivers/nvme/host/tcp.c 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
queue             949 drivers/nvme/host/tcp.c 		if (queue->data_digest)
queue             950 drivers/nvme/host/tcp.c 			crypto_ahash_init(queue->snd_hash);
queue             962 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue             970 drivers/nvme/host/tcp.c 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
queue             975 drivers/nvme/host/tcp.c 		nvme_tcp_done_send_req(queue);
queue             983 drivers/nvme/host/tcp.c static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
queue             988 drivers/nvme/host/tcp.c 	if (!queue->request) {
queue             989 drivers/nvme/host/tcp.c 		queue->request = nvme_tcp_fetch_request(queue);
queue             990 drivers/nvme/host/tcp.c 		if (!queue->request)
queue             993 drivers/nvme/host/tcp.c 	req = queue->request;
queue            1023 drivers/nvme/host/tcp.c static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
queue            1025 drivers/nvme/host/tcp.c 	struct socket *sock = queue->sock;
queue            1030 drivers/nvme/host/tcp.c 	rd_desc.arg.data = queue;
queue            1033 drivers/nvme/host/tcp.c 	queue->nr_cqe = 0;
queue            1041 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue =
queue            1049 drivers/nvme/host/tcp.c 		result = nvme_tcp_try_send(queue);
queue            1053 drivers/nvme/host/tcp.c 			dev_err(queue->ctrl->ctrl.device,
queue            1061 drivers/nvme/host/tcp.c 				nvme_tcp_fail_request(queue->request);
queue            1062 drivers/nvme/host/tcp.c 			nvme_tcp_done_send_req(queue);
queue            1066 drivers/nvme/host/tcp.c 		result = nvme_tcp_try_recv(queue);
queue            1075 drivers/nvme/host/tcp.c 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
queue            1078 drivers/nvme/host/tcp.c static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
queue            1080 drivers/nvme/host/tcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
queue            1082 drivers/nvme/host/tcp.c 	ahash_request_free(queue->rcv_hash);
queue            1083 drivers/nvme/host/tcp.c 	ahash_request_free(queue->snd_hash);
queue            1087 drivers/nvme/host/tcp.c static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
queue            1095 drivers/nvme/host/tcp.c 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
queue            1096 drivers/nvme/host/tcp.c 	if (!queue->snd_hash)
queue            1098 drivers/nvme/host/tcp.c 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
queue            1100 drivers/nvme/host/tcp.c 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
queue            1101 drivers/nvme/host/tcp.c 	if (!queue->rcv_hash)
queue            1103 drivers/nvme/host/tcp.c 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
queue            1107 drivers/nvme/host/tcp.c 	ahash_request_free(queue->snd_hash);
queue            1122 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
queue            1124 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue            1126 drivers/nvme/host/tcp.c 	async->pdu = page_frag_alloc(&queue->pf_cache,
queue            1132 drivers/nvme/host/tcp.c 	async->queue = &ctrl->queues[0];
queue            1139 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
queue            1141 drivers/nvme/host/tcp.c 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
queue            1144 drivers/nvme/host/tcp.c 	if (queue->hdr_digest || queue->data_digest)
queue            1145 drivers/nvme/host/tcp.c 		nvme_tcp_free_crypto(queue);
queue            1147 drivers/nvme/host/tcp.c 	sock_release(queue->sock);
queue            1148 drivers/nvme/host/tcp.c 	kfree(queue->pdu);
queue            1151 drivers/nvme/host/tcp.c static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
queue            1177 drivers/nvme/host/tcp.c 	if (queue->hdr_digest)
queue            1179 drivers/nvme/host/tcp.c 	if (queue->data_digest)
queue            1184 drivers/nvme/host/tcp.c 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
queue            1191 drivers/nvme/host/tcp.c 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
queue            1199 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), icresp->hdr.type);
queue            1205 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
queue            1211 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), icresp->pfv);
queue            1216 drivers/nvme/host/tcp.c 	if ((queue->data_digest && !ctrl_ddgst) ||
queue            1217 drivers/nvme/host/tcp.c 	    (!queue->data_digest && ctrl_ddgst)) {
queue            1219 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue),
queue            1220 drivers/nvme/host/tcp.c 			queue->data_digest ? "enabled" : "disabled",
queue            1226 drivers/nvme/host/tcp.c 	if ((queue->hdr_digest && !ctrl_hdgst) ||
queue            1227 drivers/nvme/host/tcp.c 	    (!queue->hdr_digest && ctrl_hdgst)) {
queue            1229 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue),
queue            1230 drivers/nvme/host/tcp.c 			queue->hdr_digest ? "enabled" : "disabled",
queue            1237 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), icresp->cpda);
queue            1253 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
queue            1257 drivers/nvme/host/tcp.c 	queue->ctrl = ctrl;
queue            1258 drivers/nvme/host/tcp.c 	INIT_LIST_HEAD(&queue->send_list);
queue            1259 drivers/nvme/host/tcp.c 	spin_lock_init(&queue->lock);
queue            1260 drivers/nvme/host/tcp.c 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue            1261 drivers/nvme/host/tcp.c 	queue->queue_size = queue_size;
queue            1264 drivers/nvme/host/tcp.c 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
queue            1266 drivers/nvme/host/tcp.c 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
queue            1270 drivers/nvme/host/tcp.c 			IPPROTO_TCP, &queue->sock);
queue            1279 drivers/nvme/host/tcp.c 	ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
queue            1289 drivers/nvme/host/tcp.c 	ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
queue            1302 drivers/nvme/host/tcp.c 	ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
queue            1313 drivers/nvme/host/tcp.c 		ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
queue            1322 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue            1327 drivers/nvme/host/tcp.c 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
queue            1328 drivers/nvme/host/tcp.c 	queue->request = NULL;
queue            1329 drivers/nvme/host/tcp.c 	queue->data_remaining = 0;
queue            1330 drivers/nvme/host/tcp.c 	queue->ddgst_remaining = 0;
queue            1331 drivers/nvme/host/tcp.c 	queue->pdu_remaining = 0;
queue            1332 drivers/nvme/host/tcp.c 	queue->pdu_offset = 0;
queue            1333 drivers/nvme/host/tcp.c 	sk_set_memalloc(queue->sock->sk);
queue            1336 drivers/nvme/host/tcp.c 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
queue            1346 drivers/nvme/host/tcp.c 	queue->hdr_digest = nctrl->opts->hdr_digest;
queue            1347 drivers/nvme/host/tcp.c 	queue->data_digest = nctrl->opts->data_digest;
queue            1348 drivers/nvme/host/tcp.c 	if (queue->hdr_digest || queue->data_digest) {
queue            1349 drivers/nvme/host/tcp.c 		ret = nvme_tcp_alloc_crypto(queue);
queue            1358 drivers/nvme/host/tcp.c 			nvme_tcp_hdgst_len(queue);
queue            1359 drivers/nvme/host/tcp.c 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
queue            1360 drivers/nvme/host/tcp.c 	if (!queue->pdu) {
queue            1366 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue));
queue            1368 drivers/nvme/host/tcp.c 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
queue            1376 drivers/nvme/host/tcp.c 	ret = nvme_tcp_init_connection(queue);
queue            1380 drivers/nvme/host/tcp.c 	queue->rd_enabled = true;
queue            1381 drivers/nvme/host/tcp.c 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
queue            1382 drivers/nvme/host/tcp.c 	nvme_tcp_init_recv_ctx(queue);
queue            1384 drivers/nvme/host/tcp.c 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
queue            1385 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_user_data = queue;
queue            1386 drivers/nvme/host/tcp.c 	queue->state_change = queue->sock->sk->sk_state_change;
queue            1387 drivers/nvme/host/tcp.c 	queue->data_ready = queue->sock->sk->sk_data_ready;
queue            1388 drivers/nvme/host/tcp.c 	queue->write_space = queue->sock->sk->sk_write_space;
queue            1389 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue            1390 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue            1391 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
queue            1393 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_ll_usec = 1;
queue            1395 drivers/nvme/host/tcp.c 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
queue            1400 drivers/nvme/host/tcp.c 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue            1402 drivers/nvme/host/tcp.c 	kfree(queue->pdu);
queue            1404 drivers/nvme/host/tcp.c 	if (queue->hdr_digest || queue->data_digest)
queue            1405 drivers/nvme/host/tcp.c 		nvme_tcp_free_crypto(queue);
queue            1407 drivers/nvme/host/tcp.c 	sock_release(queue->sock);
queue            1408 drivers/nvme/host/tcp.c 	queue->sock = NULL;
queue            1412 drivers/nvme/host/tcp.c static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
queue            1414 drivers/nvme/host/tcp.c 	struct socket *sock = queue->sock;
queue            1418 drivers/nvme/host/tcp.c 	sock->sk->sk_data_ready = queue->data_ready;
queue            1419 drivers/nvme/host/tcp.c 	sock->sk->sk_state_change = queue->state_change;
queue            1420 drivers/nvme/host/tcp.c 	sock->sk->sk_write_space  = queue->write_space;
queue            1424 drivers/nvme/host/tcp.c static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
queue            1426 drivers/nvme/host/tcp.c 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue            1427 drivers/nvme/host/tcp.c 	nvme_tcp_restore_sock_calls(queue);
queue            1428 drivers/nvme/host/tcp.c 	cancel_work_sync(&queue->io_work);
queue            1434 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
queue            1436 drivers/nvme/host/tcp.c 	if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
queue            1439 drivers/nvme/host/tcp.c 	__nvme_tcp_stop_queue(queue);
queue            1996 drivers/nvme/host/tcp.c static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
queue            2001 drivers/nvme/host/tcp.c 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
queue            2020 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
queue            2023 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue);
queue            2027 drivers/nvme/host/tcp.c 	if (queue->hdr_digest)
queue            2049 drivers/nvme/host/tcp.c 	struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
queue            2062 drivers/nvme/host/tcp.c 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
queue            2082 drivers/nvme/host/tcp.c static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
queue            2094 drivers/nvme/host/tcp.c 	    req->data_len <= nvme_tcp_inline_data_size(queue))
queue            2095 drivers/nvme/host/tcp.c 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
queue            2107 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = req->queue;
queue            2108 drivers/nvme/host/tcp.c 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
queue            2125 drivers/nvme/host/tcp.c 	    req->data_len <= nvme_tcp_inline_data_size(queue))
queue            2132 drivers/nvme/host/tcp.c 	if (queue->hdr_digest)
queue            2134 drivers/nvme/host/tcp.c 	if (queue->data_digest && req->pdu_len) {
queue            2136 drivers/nvme/host/tcp.c 		ddgst = nvme_tcp_ddgst_len(queue);
queue            2143 drivers/nvme/host/tcp.c 	ret = nvme_tcp_map_data(queue, rq);
queue            2146 drivers/nvme/host/tcp.c 		dev_err(queue->ctrl->ctrl.device,
queue            2157 drivers/nvme/host/tcp.c 	struct nvme_ns *ns = hctx->queue->queuedata;
queue            2158 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = hctx->driver_data;
queue            2161 drivers/nvme/host/tcp.c 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
queue            2164 drivers/nvme/host/tcp.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
queue            2165 drivers/nvme/host/tcp.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
queue            2225 drivers/nvme/host/tcp.c 	struct nvme_tcp_queue *queue = hctx->driver_data;
queue            2226 drivers/nvme/host/tcp.c 	struct sock *sk = queue->sock->sk;
queue            2230 drivers/nvme/host/tcp.c 	nvme_tcp_try_recv(queue);
queue            2231 drivers/nvme/host/tcp.c 	return queue->nr_cqe;
queue              80 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue	*queue;
queue             159 drivers/nvme/target/fc.c 	return (fodptr - fodptr->queue->fod);
queue             223 drivers/nvme/target/fc.c static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
queue             224 drivers/nvme/target/fc.c static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
queue             418 drivers/nvme/target/fc.c 				struct nvmet_fc_tgt_queue *queue)
queue             420 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
queue             423 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
queue             426 drivers/nvme/target/fc.c 		fod->queue = queue;
queue             431 drivers/nvme/target/fc.c 		list_add_tail(&fod->fcp_list, &queue->fod_list);
queue             453 drivers/nvme/target/fc.c 				struct nvmet_fc_tgt_queue *queue)
queue             455 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
queue             458 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
queue             466 drivers/nvme/target/fc.c nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
queue             470 drivers/nvme/target/fc.c 	lockdep_assert_held(&queue->qlock);
queue             472 drivers/nvme/target/fc.c 	fod = list_first_entry_or_null(&queue->fod_list,
queue             489 drivers/nvme/target/fc.c 		       struct nvmet_fc_tgt_queue *queue,
queue             498 drivers/nvme/target/fc.c 	fcpreq->hwqid = queue->qid ?
queue             499 drivers/nvme/target/fc.c 			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
queue             511 drivers/nvme/target/fc.c 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
queue             516 drivers/nvme/target/fc.c nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
queue             538 drivers/nvme/target/fc.c 	nvmet_fc_tgt_q_put(queue);
queue             540 drivers/nvme/target/fc.c 	spin_lock_irqsave(&queue->qlock, flags);
queue             541 drivers/nvme/target/fc.c 	deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
queue             544 drivers/nvme/target/fc.c 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
queue             545 drivers/nvme/target/fc.c 		spin_unlock_irqrestore(&queue->qlock, flags);
queue             555 drivers/nvme/target/fc.c 	list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
queue             557 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&queue->qlock, flags);
queue             577 drivers/nvme/target/fc.c 	queue_work(queue->work_q, &fod->defer_work);
queue             584 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue             591 drivers/nvme/target/fc.c 	queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
queue             592 drivers/nvme/target/fc.c 	if (!queue)
queue             598 drivers/nvme/target/fc.c 	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
queue             601 drivers/nvme/target/fc.c 	if (!queue->work_q)
queue             604 drivers/nvme/target/fc.c 	queue->qid = qid;
queue             605 drivers/nvme/target/fc.c 	queue->sqsize = sqsize;
queue             606 drivers/nvme/target/fc.c 	queue->assoc = assoc;
queue             607 drivers/nvme/target/fc.c 	INIT_LIST_HEAD(&queue->fod_list);
queue             608 drivers/nvme/target/fc.c 	INIT_LIST_HEAD(&queue->avail_defer_list);
queue             609 drivers/nvme/target/fc.c 	INIT_LIST_HEAD(&queue->pending_cmd_list);
queue             610 drivers/nvme/target/fc.c 	atomic_set(&queue->connected, 0);
queue             611 drivers/nvme/target/fc.c 	atomic_set(&queue->sqtail, 0);
queue             612 drivers/nvme/target/fc.c 	atomic_set(&queue->rsn, 1);
queue             613 drivers/nvme/target/fc.c 	atomic_set(&queue->zrspcnt, 0);
queue             614 drivers/nvme/target/fc.c 	spin_lock_init(&queue->qlock);
queue             615 drivers/nvme/target/fc.c 	kref_init(&queue->ref);
queue             617 drivers/nvme/target/fc.c 	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
queue             619 drivers/nvme/target/fc.c 	ret = nvmet_sq_init(&queue->nvme_sq);
queue             625 drivers/nvme/target/fc.c 	assoc->queues[qid] = queue;
queue             628 drivers/nvme/target/fc.c 	return queue;
queue             631 drivers/nvme/target/fc.c 	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
queue             632 drivers/nvme/target/fc.c 	destroy_workqueue(queue->work_q);
queue             636 drivers/nvme/target/fc.c 	kfree(queue);
queue             644 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue =
queue             648 drivers/nvme/target/fc.c 	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
queue             649 drivers/nvme/target/fc.c 	queue->assoc->queues[queue->qid] = NULL;
queue             650 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
queue             652 drivers/nvme/target/fc.c 	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
queue             654 drivers/nvme/target/fc.c 	nvmet_fc_tgt_a_put(queue->assoc);
queue             656 drivers/nvme/target/fc.c 	destroy_workqueue(queue->work_q);
queue             658 drivers/nvme/target/fc.c 	kfree(queue);
queue             662 drivers/nvme/target/fc.c nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
queue             664 drivers/nvme/target/fc.c 	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
queue             668 drivers/nvme/target/fc.c nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
queue             670 drivers/nvme/target/fc.c 	return kref_get_unless_zero(&queue->ref);
queue             675 drivers/nvme/target/fc.c nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
queue             677 drivers/nvme/target/fc.c 	struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
queue             678 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
queue             684 drivers/nvme/target/fc.c 	disconnect = atomic_xchg(&queue->connected, 0);
queue             686 drivers/nvme/target/fc.c 	spin_lock_irqsave(&queue->qlock, flags);
queue             688 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
queue             710 drivers/nvme/target/fc.c 	list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
queue             717 drivers/nvme/target/fc.c 		deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
queue             723 drivers/nvme/target/fc.c 		spin_unlock_irqrestore(&queue->qlock, flags);
queue             735 drivers/nvme/target/fc.c 		nvmet_fc_tgt_q_put(queue);
queue             739 drivers/nvme/target/fc.c 		spin_lock_irqsave(&queue->qlock, flags);
queue             741 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&queue->qlock, flags);
queue             743 drivers/nvme/target/fc.c 	flush_workqueue(queue->work_q);
queue             746 drivers/nvme/target/fc.c 		nvmet_sq_destroy(&queue->nvme_sq);
queue             748 drivers/nvme/target/fc.c 	nvmet_fc_tgt_q_put(queue);
queue             756 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue             767 drivers/nvme/target/fc.c 			queue = assoc->queues[qid];
queue             768 drivers/nvme/target/fc.c 			if (queue &&
queue             769 drivers/nvme/target/fc.c 			    (!atomic_read(&queue->connected) ||
queue             770 drivers/nvme/target/fc.c 			     !nvmet_fc_tgt_q_get(queue)))
queue             771 drivers/nvme/target/fc.c 				queue = NULL;
queue             773 drivers/nvme/target/fc.c 			return queue;
queue             875 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue             881 drivers/nvme/target/fc.c 		queue = assoc->queues[i];
queue             882 drivers/nvme/target/fc.c 		if (queue) {
queue             883 drivers/nvme/target/fc.c 			if (!nvmet_fc_tgt_q_get(queue))
queue             886 drivers/nvme/target/fc.c 			nvmet_fc_delete_target_queue(queue);
queue             887 drivers/nvme/target/fc.c 			nvmet_fc_tgt_q_put(queue);
queue            1157 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue            1171 drivers/nvme/target/fc.c 			queue = assoc->queues[0];
queue            1172 drivers/nvme/target/fc.c 			if (queue && queue->nvme_sq.ctrl == ctrl) {
queue            1328 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue            1363 drivers/nvme/target/fc.c 			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
queue            1365 drivers/nvme/target/fc.c 			if (!queue)
queue            1381 drivers/nvme/target/fc.c 	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
queue            1382 drivers/nvme/target/fc.c 	atomic_set(&queue->connected, 1);
queue            1383 drivers/nvme/target/fc.c 	queue->sqhd = 0;	/* best place to init value */
queue            1414 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue            1450 drivers/nvme/target/fc.c 			queue = nvmet_fc_alloc_target_queue(iod->assoc,
queue            1453 drivers/nvme/target/fc.c 			if (!queue)
queue            1474 drivers/nvme/target/fc.c 	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
queue            1475 drivers/nvme/target/fc.c 	atomic_set(&queue->connected, 1);
queue            1476 drivers/nvme/target/fc.c 	queue->sqhd = 0;	/* best place to init value */
queue            1807 drivers/nvme/target/fc.c 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
queue            1808 drivers/nvme/target/fc.c 	if (!(rspcnt % fod->queue->ersp_ratio) ||
queue            1813 drivers/nvme/target/fc.c 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
queue            1825 drivers/nvme/target/fc.c 		rsn = atomic_inc_return(&fod->queue->rsn);
queue            1854 drivers/nvme/target/fc.c 	nvmet_fc_free_fcp_iod(fod->queue, fod);
queue            2036 drivers/nvme/target/fc.c 			nvmet_fc_free_fcp_iod(fod->queue, fod);
queue            2060 drivers/nvme/target/fc.c 		nvmet_fc_free_fcp_iod(fod->queue, fod);
queue            2094 drivers/nvme/target/fc.c 		fod->queue->sqhd = cqe->sq_head;
queue            2105 drivers/nvme/target/fc.c 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
queue            2106 drivers/nvme/target/fc.c 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
queue            2197 drivers/nvme/target/fc.c 				&fod->queue->nvme_cq,
queue            2198 drivers/nvme/target/fc.c 				&fod->queue->nvme_sq,
queue            2209 drivers/nvme/target/fc.c 	atomic_inc(&fod->queue->sqtail);
queue            2295 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue            2307 drivers/nvme/target/fc.c 	queue = nvmet_fc_find_target_queue(tgtport,
queue            2309 drivers/nvme/target/fc.c 	if (!queue)
queue            2319 drivers/nvme/target/fc.c 	spin_lock_irqsave(&queue->qlock, flags);
queue            2321 drivers/nvme/target/fc.c 	fod = nvmet_fc_alloc_fcp_iod(queue);
queue            2323 drivers/nvme/target/fc.c 		spin_unlock_irqrestore(&queue->qlock, flags);
queue            2330 drivers/nvme/target/fc.c 		nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
queue            2336 drivers/nvme/target/fc.c 		spin_unlock_irqrestore(&queue->qlock, flags);
queue            2338 drivers/nvme/target/fc.c 		nvmet_fc_tgt_q_put(queue);
queue            2342 drivers/nvme/target/fc.c 	deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
queue            2348 drivers/nvme/target/fc.c 		spin_unlock_irqrestore(&queue->qlock, flags);
queue            2354 drivers/nvme/target/fc.c 			nvmet_fc_tgt_q_put(queue);
queue            2357 drivers/nvme/target/fc.c 		spin_lock_irqsave(&queue->qlock, flags);
queue            2366 drivers/nvme/target/fc.c 	list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
queue            2370 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&queue->qlock, flags);
queue            2404 drivers/nvme/target/fc.c 	struct nvmet_fc_tgt_queue *queue;
queue            2411 drivers/nvme/target/fc.c 	queue = fod->queue;
queue            2413 drivers/nvme/target/fc.c 	spin_lock_irqsave(&queue->qlock, flags);
queue            2425 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&queue->qlock, flags);
queue              23 drivers/nvme/target/loop.c 	struct nvme_loop_queue	*queue;
queue              70 drivers/nvme/target/loop.c static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
queue              72 drivers/nvme/target/loop.c 	return queue - queue->ctrl->queues;
queue              84 drivers/nvme/target/loop.c static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
queue              86 drivers/nvme/target/loop.c 	u32 queue_idx = nvme_loop_queue_idx(queue);
queue              89 drivers/nvme/target/loop.c 		return queue->ctrl->admin_tag_set.tags[queue_idx];
queue              90 drivers/nvme/target/loop.c 	return queue->ctrl->tag_set.tags[queue_idx - 1];
queue              95 drivers/nvme/target/loop.c 	struct nvme_loop_queue *queue =
queue             105 drivers/nvme/target/loop.c 	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
queue             107 drivers/nvme/target/loop.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
queue             112 drivers/nvme/target/loop.c 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
queue             114 drivers/nvme/target/loop.c 			dev_err(queue->ctrl->ctrl.device,
queue             116 drivers/nvme/target/loop.c 				cqe->command_id, nvme_loop_queue_idx(queue));
queue             135 drivers/nvme/target/loop.c 	struct nvme_ns *ns = hctx->queue->queuedata;
queue             136 drivers/nvme/target/loop.c 	struct nvme_loop_queue *queue = hctx->driver_data;
queue             139 drivers/nvme/target/loop.c 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
queue             142 drivers/nvme/target/loop.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
queue             143 drivers/nvme/target/loop.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
queue             151 drivers/nvme/target/loop.c 	iod->req.port = queue->ctrl->port;
queue             152 drivers/nvme/target/loop.c 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
queue             153 drivers/nvme/target/loop.c 			&queue->nvme_sq, &nvme_loop_ops))
queue             177 drivers/nvme/target/loop.c 	struct nvme_loop_queue *queue = &ctrl->queues[0];
queue             185 drivers/nvme/target/loop.c 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
queue             199 drivers/nvme/target/loop.c 	iod->queue = &ctrl->queues[queue_idx];
queue             219 drivers/nvme/target/loop.c 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
queue             223 drivers/nvme/target/loop.c 	hctx->driver_data = queue;
queue             231 drivers/nvme/target/loop.c 	struct nvme_loop_queue *queue = &ctrl->queues[0];
queue             235 drivers/nvme/target/loop.c 	hctx->driver_data = queue;
queue              40 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue	*queue;
queue              54 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue	*queue;
queue             133 drivers/nvme/target/rdma.c static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
queue             168 drivers/nvme/target/rdma.c nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
queue             173 drivers/nvme/target/rdma.c 	spin_lock_irqsave(&queue->rsps_lock, flags);
queue             174 drivers/nvme/target/rdma.c 	rsp = list_first_entry_or_null(&queue->free_rsps,
queue             178 drivers/nvme/target/rdma.c 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
queue             186 drivers/nvme/target/rdma.c 		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
queue             204 drivers/nvme/target/rdma.c 		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
queue             209 drivers/nvme/target/rdma.c 	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
queue             210 drivers/nvme/target/rdma.c 	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
queue             211 drivers/nvme/target/rdma.c 	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
queue             406 drivers/nvme/target/rdma.c nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
queue             408 drivers/nvme/target/rdma.c 	struct nvmet_rdma_device *ndev = queue->dev;
queue             409 drivers/nvme/target/rdma.c 	int nr_rsps = queue->recv_queue_size * 2;
queue             412 drivers/nvme/target/rdma.c 	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
queue             414 drivers/nvme/target/rdma.c 	if (!queue->rsps)
queue             418 drivers/nvme/target/rdma.c 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
queue             424 drivers/nvme/target/rdma.c 		list_add_tail(&rsp->free_list, &queue->free_rsps);
queue             431 drivers/nvme/target/rdma.c 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
queue             436 drivers/nvme/target/rdma.c 	kfree(queue->rsps);
queue             441 drivers/nvme/target/rdma.c static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
queue             443 drivers/nvme/target/rdma.c 	struct nvmet_rdma_device *ndev = queue->dev;
queue             444 drivers/nvme/target/rdma.c 	int i, nr_rsps = queue->recv_queue_size * 2;
queue             447 drivers/nvme/target/rdma.c 		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
queue             452 drivers/nvme/target/rdma.c 	kfree(queue->rsps);
queue             467 drivers/nvme/target/rdma.c 		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
queue             475 drivers/nvme/target/rdma.c static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
queue             477 drivers/nvme/target/rdma.c 	spin_lock(&queue->rsp_wr_wait_lock);
queue             478 drivers/nvme/target/rdma.c 	while (!list_empty(&queue->rsp_wr_wait_list)) {
queue             482 drivers/nvme/target/rdma.c 		rsp = list_entry(queue->rsp_wr_wait_list.next,
queue             486 drivers/nvme/target/rdma.c 		spin_unlock(&queue->rsp_wr_wait_lock);
queue             488 drivers/nvme/target/rdma.c 		spin_lock(&queue->rsp_wr_wait_lock);
queue             491 drivers/nvme/target/rdma.c 			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
queue             495 drivers/nvme/target/rdma.c 	spin_unlock(&queue->rsp_wr_wait_lock);
queue             501 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = rsp->queue;
queue             503 drivers/nvme/target/rdma.c 	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
queue             506 drivers/nvme/target/rdma.c 		rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
queue             507 drivers/nvme/target/rdma.c 				queue->cm_id->port_num, rsp->req.sg,
queue             514 drivers/nvme/target/rdma.c 	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
queue             515 drivers/nvme/target/rdma.c 		nvmet_rdma_process_wr_wait_list(queue);
queue             520 drivers/nvme/target/rdma.c static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
queue             522 drivers/nvme/target/rdma.c 	if (queue->nvme_sq.ctrl) {
queue             523 drivers/nvme/target/rdma.c 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
queue             530 drivers/nvme/target/rdma.c 		nvmet_rdma_queue_disconnect(queue);
queue             538 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = cq->cq_context;
queue             546 drivers/nvme/target/rdma.c 		nvmet_rdma_error_comp(queue);
queue             554 drivers/nvme/target/rdma.c 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
queue             570 drivers/nvme/target/rdma.c 	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
queue             572 drivers/nvme/target/rdma.c 	ib_dma_sync_single_for_device(rsp->queue->dev->device,
queue             586 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = cq->cq_context;
queue             589 drivers/nvme/target/rdma.c 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
queue             590 drivers/nvme/target/rdma.c 	rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
queue             591 drivers/nvme/target/rdma.c 			queue->cm_id->port_num, rsp->req.sg,
queue             601 drivers/nvme/target/rdma.c 			nvmet_rdma_error_comp(queue);
queue             645 drivers/nvme/target/rdma.c 	if (off + len > rsp->queue->dev->inline_data_size) {
queue             663 drivers/nvme/target/rdma.c 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
queue             733 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = rsp->queue;
queue             736 drivers/nvme/target/rdma.c 			&queue->sq_wr_avail) < 0)) {
queue             738 drivers/nvme/target/rdma.c 				1 + rsp->n_rdma, queue->idx,
queue             739 drivers/nvme/target/rdma.c 				queue->nvme_sq.ctrl->cntlid);
queue             740 drivers/nvme/target/rdma.c 		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
queue             745 drivers/nvme/target/rdma.c 		if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
queue             746 drivers/nvme/target/rdma.c 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
queue             755 drivers/nvme/target/rdma.c static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
queue             760 drivers/nvme/target/rdma.c 	ib_dma_sync_single_for_cpu(queue->dev->device,
queue             763 drivers/nvme/target/rdma.c 	ib_dma_sync_single_for_cpu(queue->dev->device,
queue             767 drivers/nvme/target/rdma.c 	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
queue             768 drivers/nvme/target/rdma.c 			&queue->nvme_sq, &nvmet_rdma_ops))
queue             776 drivers/nvme/target/rdma.c 		spin_lock(&queue->rsp_wr_wait_lock);
queue             777 drivers/nvme/target/rdma.c 		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
queue             778 drivers/nvme/target/rdma.c 		spin_unlock(&queue->rsp_wr_wait_lock);
queue             791 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = cq->cq_context;
queue             799 drivers/nvme/target/rdma.c 			nvmet_rdma_error_comp(queue);
queue             806 drivers/nvme/target/rdma.c 		nvmet_rdma_error_comp(queue);
queue             810 drivers/nvme/target/rdma.c 	cmd->queue = queue;
queue             811 drivers/nvme/target/rdma.c 	rsp = nvmet_rdma_get_rsp(queue);
queue             818 drivers/nvme/target/rdma.c 		nvmet_rdma_post_recv(queue->dev, cmd);
queue             821 drivers/nvme/target/rdma.c 	rsp->queue = queue;
queue             825 drivers/nvme/target/rdma.c 	rsp->req.port = queue->port;
queue             828 drivers/nvme/target/rdma.c 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
queue             831 drivers/nvme/target/rdma.c 		spin_lock_irqsave(&queue->state_lock, flags);
queue             832 drivers/nvme/target/rdma.c 		if (queue->state == NVMET_RDMA_Q_CONNECTING)
queue             833 drivers/nvme/target/rdma.c 			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
queue             836 drivers/nvme/target/rdma.c 		spin_unlock_irqrestore(&queue->state_lock, flags);
queue             840 drivers/nvme/target/rdma.c 	nvmet_rdma_handle_command(queue, rsp);
queue             974 drivers/nvme/target/rdma.c static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
queue             977 drivers/nvme/target/rdma.c 	struct nvmet_rdma_device *ndev = queue->dev;
queue             984 drivers/nvme/target/rdma.c 	comp_vector = !queue->host_qid ? 0 :
queue             985 drivers/nvme/target/rdma.c 		queue->idx % ndev->device->num_comp_vectors;
queue             990 drivers/nvme/target/rdma.c 	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
queue             992 drivers/nvme/target/rdma.c 	queue->cq = ib_alloc_cq(ndev->device, queue,
queue             995 drivers/nvme/target/rdma.c 	if (IS_ERR(queue->cq)) {
queue             996 drivers/nvme/target/rdma.c 		ret = PTR_ERR(queue->cq);
queue            1003 drivers/nvme/target/rdma.c 	qp_attr.qp_context = queue;
queue            1005 drivers/nvme/target/rdma.c 	qp_attr.send_cq = queue->cq;
queue            1006 drivers/nvme/target/rdma.c 	qp_attr.recv_cq = queue->cq;
queue            1010 drivers/nvme/target/rdma.c 	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
queue            1011 drivers/nvme/target/rdma.c 	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
queue            1019 drivers/nvme/target/rdma.c 		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
queue            1023 drivers/nvme/target/rdma.c 	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
queue            1029 drivers/nvme/target/rdma.c 	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
queue            1032 drivers/nvme/target/rdma.c 		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
queue            1033 drivers/nvme/target/rdma.c 		 qp_attr.cap.max_send_wr, queue->cm_id);
queue            1036 drivers/nvme/target/rdma.c 		for (i = 0; i < queue->recv_queue_size; i++) {
queue            1037 drivers/nvme/target/rdma.c 			queue->cmds[i].queue = queue;
queue            1038 drivers/nvme/target/rdma.c 			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
queue            1048 drivers/nvme/target/rdma.c 	rdma_destroy_qp(queue->cm_id);
queue            1050 drivers/nvme/target/rdma.c 	ib_free_cq(queue->cq);
queue            1054 drivers/nvme/target/rdma.c static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
queue            1056 drivers/nvme/target/rdma.c 	struct ib_qp *qp = queue->cm_id->qp;
queue            1059 drivers/nvme/target/rdma.c 	rdma_destroy_id(queue->cm_id);
queue            1061 drivers/nvme/target/rdma.c 	ib_free_cq(queue->cq);
queue            1064 drivers/nvme/target/rdma.c static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
queue            1066 drivers/nvme/target/rdma.c 	pr_debug("freeing queue %d\n", queue->idx);
queue            1068 drivers/nvme/target/rdma.c 	nvmet_sq_destroy(&queue->nvme_sq);
queue            1070 drivers/nvme/target/rdma.c 	nvmet_rdma_destroy_queue_ib(queue);
queue            1071 drivers/nvme/target/rdma.c 	if (!queue->dev->srq) {
queue            1072 drivers/nvme/target/rdma.c 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue            1073 drivers/nvme/target/rdma.c 				queue->recv_queue_size,
queue            1074 drivers/nvme/target/rdma.c 				!queue->host_qid);
queue            1076 drivers/nvme/target/rdma.c 	nvmet_rdma_free_rsps(queue);
queue            1077 drivers/nvme/target/rdma.c 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
queue            1078 drivers/nvme/target/rdma.c 	kfree(queue);
queue            1083 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue =
queue            1085 drivers/nvme/target/rdma.c 	struct nvmet_rdma_device *dev = queue->dev;
queue            1087 drivers/nvme/target/rdma.c 	nvmet_rdma_free_queue(queue);
queue            1094 drivers/nvme/target/rdma.c 				struct nvmet_rdma_queue *queue)
queue            1105 drivers/nvme/target/rdma.c 	queue->host_qid = le16_to_cpu(req->qid);
queue            1111 drivers/nvme/target/rdma.c 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
queue            1112 drivers/nvme/target/rdma.c 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
queue            1114 drivers/nvme/target/rdma.c 	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
queue            1141 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue;
queue            1144 drivers/nvme/target/rdma.c 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue            1145 drivers/nvme/target/rdma.c 	if (!queue) {
queue            1150 drivers/nvme/target/rdma.c 	ret = nvmet_sq_init(&queue->nvme_sq);
queue            1156 drivers/nvme/target/rdma.c 	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
queue            1164 drivers/nvme/target/rdma.c 	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
queue            1165 drivers/nvme/target/rdma.c 	queue->dev = ndev;
queue            1166 drivers/nvme/target/rdma.c 	queue->cm_id = cm_id;
queue            1168 drivers/nvme/target/rdma.c 	spin_lock_init(&queue->state_lock);
queue            1169 drivers/nvme/target/rdma.c 	queue->state = NVMET_RDMA_Q_CONNECTING;
queue            1170 drivers/nvme/target/rdma.c 	INIT_LIST_HEAD(&queue->rsp_wait_list);
queue            1171 drivers/nvme/target/rdma.c 	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
queue            1172 drivers/nvme/target/rdma.c 	spin_lock_init(&queue->rsp_wr_wait_lock);
queue            1173 drivers/nvme/target/rdma.c 	INIT_LIST_HEAD(&queue->free_rsps);
queue            1174 drivers/nvme/target/rdma.c 	spin_lock_init(&queue->rsps_lock);
queue            1175 drivers/nvme/target/rdma.c 	INIT_LIST_HEAD(&queue->queue_list);
queue            1177 drivers/nvme/target/rdma.c 	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
queue            1178 drivers/nvme/target/rdma.c 	if (queue->idx < 0) {
queue            1183 drivers/nvme/target/rdma.c 	ret = nvmet_rdma_alloc_rsps(queue);
queue            1190 drivers/nvme/target/rdma.c 		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
queue            1191 drivers/nvme/target/rdma.c 				queue->recv_queue_size,
queue            1192 drivers/nvme/target/rdma.c 				!queue->host_qid);
queue            1193 drivers/nvme/target/rdma.c 		if (IS_ERR(queue->cmds)) {
queue            1199 drivers/nvme/target/rdma.c 	ret = nvmet_rdma_create_queue_ib(queue);
queue            1207 drivers/nvme/target/rdma.c 	return queue;
queue            1211 drivers/nvme/target/rdma.c 		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue            1212 drivers/nvme/target/rdma.c 				queue->recv_queue_size,
queue            1213 drivers/nvme/target/rdma.c 				!queue->host_qid);
queue            1216 drivers/nvme/target/rdma.c 	nvmet_rdma_free_rsps(queue);
queue            1218 drivers/nvme/target/rdma.c 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
queue            1220 drivers/nvme/target/rdma.c 	nvmet_sq_destroy(&queue->nvme_sq);
queue            1222 drivers/nvme/target/rdma.c 	kfree(queue);
queue            1230 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = priv;
queue            1234 drivers/nvme/target/rdma.c 		rdma_notify(queue->cm_id, event->event);
queue            1244 drivers/nvme/target/rdma.c 		struct nvmet_rdma_queue *queue,
queue            1254 drivers/nvme/target/rdma.c 		queue->dev->device->attrs.max_qp_init_rd_atom);
queue            1258 drivers/nvme/target/rdma.c 	priv.crqsize = cpu_to_le16(queue->recv_queue_size);
queue            1271 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue;
queue            1280 drivers/nvme/target/rdma.c 	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
queue            1281 drivers/nvme/target/rdma.c 	if (!queue) {
queue            1285 drivers/nvme/target/rdma.c 	queue->port = cm_id->context;
queue            1287 drivers/nvme/target/rdma.c 	if (queue->host_qid == 0) {
queue            1292 drivers/nvme/target/rdma.c 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
queue            1294 drivers/nvme/target/rdma.c 		schedule_work(&queue->release_work);
queue            1300 drivers/nvme/target/rdma.c 	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
queue            1311 drivers/nvme/target/rdma.c static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
queue            1315 drivers/nvme/target/rdma.c 	spin_lock_irqsave(&queue->state_lock, flags);
queue            1316 drivers/nvme/target/rdma.c 	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
queue            1320 drivers/nvme/target/rdma.c 	queue->state = NVMET_RDMA_Q_LIVE;
queue            1322 drivers/nvme/target/rdma.c 	while (!list_empty(&queue->rsp_wait_list)) {
queue            1325 drivers/nvme/target/rdma.c 		cmd = list_first_entry(&queue->rsp_wait_list,
queue            1329 drivers/nvme/target/rdma.c 		spin_unlock_irqrestore(&queue->state_lock, flags);
queue            1330 drivers/nvme/target/rdma.c 		nvmet_rdma_handle_command(queue, cmd);
queue            1331 drivers/nvme/target/rdma.c 		spin_lock_irqsave(&queue->state_lock, flags);
queue            1335 drivers/nvme/target/rdma.c 	spin_unlock_irqrestore(&queue->state_lock, flags);
queue            1338 drivers/nvme/target/rdma.c static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
queue            1343 drivers/nvme/target/rdma.c 	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
queue            1345 drivers/nvme/target/rdma.c 	spin_lock_irqsave(&queue->state_lock, flags);
queue            1346 drivers/nvme/target/rdma.c 	switch (queue->state) {
queue            1349 drivers/nvme/target/rdma.c 		queue->state = NVMET_RDMA_Q_DISCONNECTING;
queue            1355 drivers/nvme/target/rdma.c 	spin_unlock_irqrestore(&queue->state_lock, flags);
queue            1358 drivers/nvme/target/rdma.c 		rdma_disconnect(queue->cm_id);
queue            1359 drivers/nvme/target/rdma.c 		schedule_work(&queue->release_work);
queue            1363 drivers/nvme/target/rdma.c static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
queue            1368 drivers/nvme/target/rdma.c 	if (!list_empty(&queue->queue_list)) {
queue            1369 drivers/nvme/target/rdma.c 		list_del_init(&queue->queue_list);
queue            1375 drivers/nvme/target/rdma.c 		__nvmet_rdma_queue_disconnect(queue);
queue            1379 drivers/nvme/target/rdma.c 		struct nvmet_rdma_queue *queue)
queue            1381 drivers/nvme/target/rdma.c 	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
queue            1384 drivers/nvme/target/rdma.c 	if (!list_empty(&queue->queue_list))
queue            1385 drivers/nvme/target/rdma.c 		list_del_init(&queue->queue_list);
queue            1388 drivers/nvme/target/rdma.c 	pr_err("failed to connect queue %d\n", queue->idx);
queue            1389 drivers/nvme/target/rdma.c 	schedule_work(&queue->release_work);
queue            1408 drivers/nvme/target/rdma.c 		struct nvmet_rdma_queue *queue)
queue            1412 drivers/nvme/target/rdma.c 	if (queue) {
queue            1442 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue = NULL;
queue            1446 drivers/nvme/target/rdma.c 		queue = cm_id->qp->qp_context;
queue            1457 drivers/nvme/target/rdma.c 		nvmet_rdma_queue_established(queue);
queue            1462 drivers/nvme/target/rdma.c 		nvmet_rdma_queue_disconnect(queue);
queue            1465 drivers/nvme/target/rdma.c 		ret = nvmet_rdma_device_removal(cm_id, queue);
queue            1473 drivers/nvme/target/rdma.c 		nvmet_rdma_queue_connect_fail(cm_id, queue);
queue            1486 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue;
queue            1490 drivers/nvme/target/rdma.c 	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
queue            1491 drivers/nvme/target/rdma.c 		if (queue->nvme_sq.ctrl == ctrl) {
queue            1492 drivers/nvme/target/rdma.c 			list_del_init(&queue->queue_list);
queue            1495 drivers/nvme/target/rdma.c 			__nvmet_rdma_queue_disconnect(queue);
queue            1596 drivers/nvme/target/rdma.c 		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
queue            1619 drivers/nvme/target/rdma.c 	struct nvmet_rdma_queue *queue, *tmp;
queue            1640 drivers/nvme/target/rdma.c 	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
queue            1642 drivers/nvme/target/rdma.c 		if (queue->dev->device != ib_device)
queue            1645 drivers/nvme/target/rdma.c 		pr_info("Removing queue %d\n", queue->idx);
queue            1646 drivers/nvme/target/rdma.c 		list_del_init(&queue->queue_list);
queue            1647 drivers/nvme/target/rdma.c 		__nvmet_rdma_queue_disconnect(queue);
queue              46 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue		*queue;
queue             150 drivers/nvme/target/tcp.c static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
queue             153 drivers/nvme/target/tcp.c 	return cmd - queue->cmds;
queue             181 drivers/nvme/target/tcp.c nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
queue             185 drivers/nvme/target/tcp.c 	cmd = list_first_entry_or_null(&queue->free_list,
queue             201 drivers/nvme/target/tcp.c 	if (unlikely(cmd == &cmd->queue->connect))
queue             204 drivers/nvme/target/tcp.c 	list_add_tail(&cmd->entry, &cmd->queue->free_list);
queue             207 drivers/nvme/target/tcp.c static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
queue             209 drivers/nvme/target/tcp.c 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
queue             212 drivers/nvme/target/tcp.c static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
queue             214 drivers/nvme/target/tcp.c 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
queue             227 drivers/nvme/target/tcp.c static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
queue             236 drivers/nvme/target/tcp.c 			queue->idx);
queue             241 drivers/nvme/target/tcp.c 	nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
queue             245 drivers/nvme/target/tcp.c 			queue->idx, le32_to_cpu(recv_digest),
queue             253 drivers/nvme/target/tcp.c static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
queue             256 drivers/nvme/target/tcp.c 	u8 digest_len = nvmet_tcp_hdgst_len(queue);
queue             263 drivers/nvme/target/tcp.c 		pr_err("queue %d: data digest flag is cleared\n", queue->idx);
queue             309 drivers/nvme/target/tcp.c static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
queue             311 drivers/nvme/target/tcp.c 	queue->rcv_state = NVMET_TCP_RECV_ERR;
queue             312 drivers/nvme/target/tcp.c 	if (queue->nvme_sq.ctrl)
queue             313 drivers/nvme/target/tcp.c 		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
queue             315 drivers/nvme/target/tcp.c 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue             366 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue             367 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             368 drivers/nvme/target/tcp.c 	u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
queue             374 drivers/nvme/target/tcp.c 	pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
queue             385 drivers/nvme/target/tcp.c 	if (queue->data_digest) {
queue             387 drivers/nvme/target/tcp.c 		nvmet_tcp_ddgst(queue->snd_hash, cmd);
queue             390 drivers/nvme/target/tcp.c 	if (cmd->queue->hdr_digest) {
queue             392 drivers/nvme/target/tcp.c 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
queue             399 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue             400 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             412 drivers/nvme/target/tcp.c 	pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
queue             415 drivers/nvme/target/tcp.c 	if (cmd->queue->hdr_digest) {
queue             417 drivers/nvme/target/tcp.c 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
queue             424 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue             425 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             435 drivers/nvme/target/tcp.c 	if (cmd->queue->hdr_digest) {
queue             437 drivers/nvme/target/tcp.c 		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
queue             441 drivers/nvme/target/tcp.c static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
queue             445 drivers/nvme/target/tcp.c 	node = llist_del_all(&queue->resp_list);
queue             453 drivers/nvme/target/tcp.c 		list_add(&cmd->entry, &queue->resp_send_list);
queue             455 drivers/nvme/target/tcp.c 		queue->send_list_len++;
queue             459 drivers/nvme/target/tcp.c static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
queue             461 drivers/nvme/target/tcp.c 	queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
queue             463 drivers/nvme/target/tcp.c 	if (!queue->snd_cmd) {
queue             464 drivers/nvme/target/tcp.c 		nvmet_tcp_process_resp_list(queue);
queue             465 drivers/nvme/target/tcp.c 		queue->snd_cmd =
queue             466 drivers/nvme/target/tcp.c 			list_first_entry_or_null(&queue->resp_send_list,
queue             468 drivers/nvme/target/tcp.c 		if (unlikely(!queue->snd_cmd))
queue             472 drivers/nvme/target/tcp.c 	list_del_init(&queue->snd_cmd->entry);
queue             473 drivers/nvme/target/tcp.c 	queue->send_list_len--;
queue             475 drivers/nvme/target/tcp.c 	if (nvmet_tcp_need_data_out(queue->snd_cmd))
queue             476 drivers/nvme/target/tcp.c 		nvmet_setup_c2h_data_pdu(queue->snd_cmd);
queue             477 drivers/nvme/target/tcp.c 	else if (nvmet_tcp_need_data_in(queue->snd_cmd))
queue             478 drivers/nvme/target/tcp.c 		nvmet_setup_r2t_pdu(queue->snd_cmd);
queue             480 drivers/nvme/target/tcp.c 		nvmet_setup_response_pdu(queue->snd_cmd);
queue             482 drivers/nvme/target/tcp.c 	return queue->snd_cmd;
queue             489 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue	*queue = cmd->queue;
queue             491 drivers/nvme/target/tcp.c 	llist_add(&cmd->lentry, &queue->resp_list);
queue             492 drivers/nvme/target/tcp.c 	queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
queue             497 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             501 drivers/nvme/target/tcp.c 	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
queue             520 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue             528 drivers/nvme/target/tcp.c 		if ((!last_in_batch && cmd->queue->send_list_len) ||
queue             530 drivers/nvme/target/tcp.c 		    queue->data_digest || !queue->nvme_sq.sqhd_disabled)
queue             533 drivers/nvme/target/tcp.c 		ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
queue             548 drivers/nvme/target/tcp.c 	if (queue->data_digest) {
queue             552 drivers/nvme/target/tcp.c 		if (queue->nvme_sq.sqhd_disabled) {
queue             553 drivers/nvme/target/tcp.c 			cmd->queue->snd_cmd = NULL;
queue             560 drivers/nvme/target/tcp.c 	if (queue->nvme_sq.sqhd_disabled) {
queue             572 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             577 drivers/nvme/target/tcp.c 	if (!last_in_batch && cmd->queue->send_list_len)
queue             582 drivers/nvme/target/tcp.c 	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
queue             594 drivers/nvme/target/tcp.c 	cmd->queue->snd_cmd = NULL;
queue             601 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
queue             606 drivers/nvme/target/tcp.c 	if (!last_in_batch && cmd->queue->send_list_len)
queue             611 drivers/nvme/target/tcp.c 	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
queue             621 drivers/nvme/target/tcp.c 	cmd->queue->snd_cmd = NULL;
queue             627 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue             635 drivers/nvme/target/tcp.c 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
queue             641 drivers/nvme/target/tcp.c 	if (queue->nvme_sq.sqhd_disabled) {
queue             642 drivers/nvme/target/tcp.c 		cmd->queue->snd_cmd = NULL;
queue             650 drivers/nvme/target/tcp.c static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
queue             653 drivers/nvme/target/tcp.c 	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
queue             656 drivers/nvme/target/tcp.c 	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
queue             657 drivers/nvme/target/tcp.c 		cmd = nvmet_tcp_fetch_cmd(queue);
queue             699 drivers/nvme/target/tcp.c static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
queue             705 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
queue             714 drivers/nvme/target/tcp.c static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
queue             716 drivers/nvme/target/tcp.c 	queue->offset = 0;
queue             717 drivers/nvme/target/tcp.c 	queue->left = sizeof(struct nvme_tcp_hdr);
queue             718 drivers/nvme/target/tcp.c 	queue->cmd = NULL;
queue             719 drivers/nvme/target/tcp.c 	queue->rcv_state = NVMET_TCP_RECV_PDU;
queue             722 drivers/nvme/target/tcp.c static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
queue             724 drivers/nvme/target/tcp.c 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
queue             726 drivers/nvme/target/tcp.c 	ahash_request_free(queue->rcv_hash);
queue             727 drivers/nvme/target/tcp.c 	ahash_request_free(queue->snd_hash);
queue             731 drivers/nvme/target/tcp.c static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
queue             739 drivers/nvme/target/tcp.c 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
queue             740 drivers/nvme/target/tcp.c 	if (!queue->snd_hash)
queue             742 drivers/nvme/target/tcp.c 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
queue             744 drivers/nvme/target/tcp.c 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
queue             745 drivers/nvme/target/tcp.c 	if (!queue->rcv_hash)
queue             747 drivers/nvme/target/tcp.c 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
queue             751 drivers/nvme/target/tcp.c 	ahash_request_free(queue->snd_hash);
queue             758 drivers/nvme/target/tcp.c static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
queue             760 drivers/nvme/target/tcp.c 	struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
queue             761 drivers/nvme/target/tcp.c 	struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
queue             769 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue);
queue             773 drivers/nvme/target/tcp.c 		pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
queue             778 drivers/nvme/target/tcp.c 		pr_err("queue %d: unsupported hpda %d\n", queue->idx,
queue             783 drivers/nvme/target/tcp.c 	queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue             784 drivers/nvme/target/tcp.c 	queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
queue             785 drivers/nvme/target/tcp.c 	if (queue->hdr_digest || queue->data_digest) {
queue             786 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_alloc_crypto(queue);
queue             799 drivers/nvme/target/tcp.c 	if (queue->hdr_digest)
queue             801 drivers/nvme/target/tcp.c 	if (queue->data_digest)
queue             806 drivers/nvme/target/tcp.c 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
queue             810 drivers/nvme/target/tcp.c 	queue->state = NVMET_TCP_Q_LIVE;
queue             811 drivers/nvme/target/tcp.c 	nvmet_prepare_receive_pdu(queue);
queue             814 drivers/nvme/target/tcp.c 	if (queue->hdr_digest || queue->data_digest)
queue             815 drivers/nvme/target/tcp.c 		nvmet_tcp_free_crypto(queue);
queue             819 drivers/nvme/target/tcp.c static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
queue             829 drivers/nvme/target/tcp.c 		nvmet_prepare_receive_pdu(queue);
queue             835 drivers/nvme/target/tcp.c 		pr_err("queue %d: failed to map data\n", queue->idx);
queue             836 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue);
queue             840 drivers/nvme/target/tcp.c 	queue->rcv_state = NVMET_TCP_RECV_DATA;
queue             845 drivers/nvme/target/tcp.c static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
queue             847 drivers/nvme/target/tcp.c 	struct nvme_tcp_data_pdu *data = &queue->pdu.data;
queue             850 drivers/nvme/target/tcp.c 	cmd = &queue->cmds[data->ttag];
queue             865 drivers/nvme/target/tcp.c 	queue->cmd = cmd;
queue             866 drivers/nvme/target/tcp.c 	queue->rcv_state = NVMET_TCP_RECV_DATA;
queue             871 drivers/nvme/target/tcp.c static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
queue             873 drivers/nvme/target/tcp.c 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
queue             874 drivers/nvme/target/tcp.c 	struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
queue             878 drivers/nvme/target/tcp.c 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
queue             882 drivers/nvme/target/tcp.c 			nvmet_tcp_fatal_error(queue);
queue             885 drivers/nvme/target/tcp.c 		return nvmet_tcp_handle_icreq(queue);
queue             889 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_handle_h2c_data_pdu(queue);
queue             895 drivers/nvme/target/tcp.c 	queue->cmd = nvmet_tcp_get_cmd(queue);
queue             896 drivers/nvme/target/tcp.c 	if (unlikely(!queue->cmd)) {
queue             899 drivers/nvme/target/tcp.c 			queue->idx, queue->nr_cmds, queue->send_list_len,
queue             901 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue);
queue             905 drivers/nvme/target/tcp.c 	req = &queue->cmd->req;
queue             908 drivers/nvme/target/tcp.c 	if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
queue             909 drivers/nvme/target/tcp.c 			&queue->nvme_sq, &nvmet_tcp_ops))) {
queue             915 drivers/nvme/target/tcp.c 		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
queue             919 drivers/nvme/target/tcp.c 	ret = nvmet_tcp_map_data(queue->cmd);
queue             921 drivers/nvme/target/tcp.c 		pr_err("queue %d: failed to map data\n", queue->idx);
queue             922 drivers/nvme/target/tcp.c 		if (nvmet_tcp_has_inline_data(queue->cmd))
queue             923 drivers/nvme/target/tcp.c 			nvmet_tcp_fatal_error(queue);
queue             930 drivers/nvme/target/tcp.c 	if (nvmet_tcp_need_data_in(queue->cmd)) {
queue             931 drivers/nvme/target/tcp.c 		if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue             932 drivers/nvme/target/tcp.c 			queue->rcv_state = NVMET_TCP_RECV_DATA;
queue             933 drivers/nvme/target/tcp.c 			nvmet_tcp_map_pdu_iovec(queue->cmd);
queue             937 drivers/nvme/target/tcp.c 		nvmet_tcp_queue_response(&queue->cmd->req);
queue             941 drivers/nvme/target/tcp.c 	nvmet_req_execute(&queue->cmd->req);
queue             943 drivers/nvme/target/tcp.c 	nvmet_prepare_receive_pdu(queue);
queue             975 drivers/nvme/target/tcp.c static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
queue             977 drivers/nvme/target/tcp.c 	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
queue             983 drivers/nvme/target/tcp.c 	iov.iov_base = (void *)&queue->pdu + queue->offset;
queue             984 drivers/nvme/target/tcp.c 	iov.iov_len = queue->left;
queue             985 drivers/nvme/target/tcp.c 	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
queue             990 drivers/nvme/target/tcp.c 	queue->offset += len;
queue             991 drivers/nvme/target/tcp.c 	queue->left -= len;
queue             992 drivers/nvme/target/tcp.c 	if (queue->left)
queue             995 drivers/nvme/target/tcp.c 	if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
queue             996 drivers/nvme/target/tcp.c 		u8 hdgst = nvmet_tcp_hdgst_len(queue);
queue            1000 drivers/nvme/target/tcp.c 			nvmet_tcp_fatal_error(queue);
queue            1009 drivers/nvme/target/tcp.c 		queue->left = hdr->hlen - queue->offset + hdgst;
queue            1013 drivers/nvme/target/tcp.c 	if (queue->hdr_digest &&
queue            1014 drivers/nvme/target/tcp.c 	    nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
queue            1015 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue); /* fatal */
queue            1019 drivers/nvme/target/tcp.c 	if (queue->data_digest &&
queue            1020 drivers/nvme/target/tcp.c 	    nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
queue            1021 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue); /* fatal */
queue            1025 drivers/nvme/target/tcp.c 	return nvmet_tcp_done_recv_pdu(queue);
queue            1030 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue = cmd->queue;
queue            1032 drivers/nvme/target/tcp.c 	nvmet_tcp_ddgst(queue->rcv_hash, cmd);
queue            1033 drivers/nvme/target/tcp.c 	queue->offset = 0;
queue            1034 drivers/nvme/target/tcp.c 	queue->left = NVME_TCP_DIGEST_LENGTH;
queue            1035 drivers/nvme/target/tcp.c 	queue->rcv_state = NVMET_TCP_RECV_DDGST;
queue            1038 drivers/nvme/target/tcp.c static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
queue            1040 drivers/nvme/target/tcp.c 	struct nvmet_tcp_cmd  *cmd = queue->cmd;
queue            1044 drivers/nvme/target/tcp.c 		ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
queue            1057 drivers/nvme/target/tcp.c 		if (queue->data_digest) {
queue            1064 drivers/nvme/target/tcp.c 	nvmet_prepare_receive_pdu(queue);
queue            1068 drivers/nvme/target/tcp.c static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue            1070 drivers/nvme/target/tcp.c 	struct nvmet_tcp_cmd *cmd = queue->cmd;
queue            1074 drivers/nvme/target/tcp.c 		.iov_base = (void *)&cmd->recv_ddgst + queue->offset,
queue            1075 drivers/nvme/target/tcp.c 		.iov_len = queue->left
queue            1078 drivers/nvme/target/tcp.c 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
queue            1083 drivers/nvme/target/tcp.c 	queue->offset += ret;
queue            1084 drivers/nvme/target/tcp.c 	queue->left -= ret;
queue            1085 drivers/nvme/target/tcp.c 	if (queue->left)
queue            1088 drivers/nvme/target/tcp.c 	if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
queue            1090 drivers/nvme/target/tcp.c 			queue->idx, cmd->req.cmd->common.command_id,
queue            1091 drivers/nvme/target/tcp.c 			queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
queue            1094 drivers/nvme/target/tcp.c 		nvmet_tcp_fatal_error(queue);
queue            1104 drivers/nvme/target/tcp.c 	nvmet_prepare_receive_pdu(queue);
queue            1108 drivers/nvme/target/tcp.c static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
queue            1112 drivers/nvme/target/tcp.c 	if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
queue            1115 drivers/nvme/target/tcp.c 	if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
queue            1116 drivers/nvme/target/tcp.c 		result = nvmet_tcp_try_recv_pdu(queue);
queue            1121 drivers/nvme/target/tcp.c 	if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
queue            1122 drivers/nvme/target/tcp.c 		result = nvmet_tcp_try_recv_data(queue);
queue            1127 drivers/nvme/target/tcp.c 	if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
queue            1128 drivers/nvme/target/tcp.c 		result = nvmet_tcp_try_recv_ddgst(queue);
queue            1142 drivers/nvme/target/tcp.c static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
queue            1148 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_try_recv_one(queue);
queue            1157 drivers/nvme/target/tcp.c static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
queue            1159 drivers/nvme/target/tcp.c 	spin_lock(&queue->state_lock);
queue            1160 drivers/nvme/target/tcp.c 	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue            1161 drivers/nvme/target/tcp.c 		queue->state = NVMET_TCP_Q_DISCONNECTING;
queue            1162 drivers/nvme/target/tcp.c 		schedule_work(&queue->release_work);
queue            1164 drivers/nvme/target/tcp.c 	spin_unlock(&queue->state_lock);
queue            1169 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue =
queue            1177 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
queue            1182 drivers/nvme/target/tcp.c 				kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue            1184 drivers/nvme/target/tcp.c 				nvmet_tcp_fatal_error(queue);
queue            1188 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
queue            1194 drivers/nvme/target/tcp.c 				kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue            1196 drivers/nvme/target/tcp.c 				nvmet_tcp_fatal_error(queue);
queue            1206 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
queue            1209 drivers/nvme/target/tcp.c static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
queue            1212 drivers/nvme/target/tcp.c 	u8 hdgst = nvmet_tcp_hdgst_len(queue);
queue            1214 drivers/nvme/target/tcp.c 	c->queue = queue;
queue            1215 drivers/nvme/target/tcp.c 	c->req.port = queue->port->nport;
queue            1217 drivers/nvme/target/tcp.c 	c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
queue            1223 drivers/nvme/target/tcp.c 	c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
queue            1229 drivers/nvme/target/tcp.c 	c->data_pdu = page_frag_alloc(&queue->pf_cache,
queue            1234 drivers/nvme/target/tcp.c 	c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
queue            1241 drivers/nvme/target/tcp.c 	list_add_tail(&c->entry, &queue->free_list);
queue            1261 drivers/nvme/target/tcp.c static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
queue            1264 drivers/nvme/target/tcp.c 	int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
queue            1271 drivers/nvme/target/tcp.c 		ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
queue            1276 drivers/nvme/target/tcp.c 	queue->cmds = cmds;
queue            1287 drivers/nvme/target/tcp.c static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
queue            1289 drivers/nvme/target/tcp.c 	struct nvmet_tcp_cmd *cmds = queue->cmds;
queue            1292 drivers/nvme/target/tcp.c 	for (i = 0; i < queue->nr_cmds; i++)
queue            1295 drivers/nvme/target/tcp.c 	nvmet_tcp_free_cmd(&queue->connect);
queue            1299 drivers/nvme/target/tcp.c static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
queue            1301 drivers/nvme/target/tcp.c 	struct socket *sock = queue->sock;
queue            1304 drivers/nvme/target/tcp.c 	sock->sk->sk_data_ready =  queue->data_ready;
queue            1305 drivers/nvme/target/tcp.c 	sock->sk->sk_state_change = queue->state_change;
queue            1306 drivers/nvme/target/tcp.c 	sock->sk->sk_write_space = queue->write_space;
queue            1319 drivers/nvme/target/tcp.c static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
queue            1321 drivers/nvme/target/tcp.c 	struct nvmet_tcp_cmd *cmd = queue->cmds;
queue            1324 drivers/nvme/target/tcp.c 	for (i = 0; i < queue->nr_cmds; i++, cmd++) {
queue            1329 drivers/nvme/target/tcp.c 	if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
queue            1331 drivers/nvme/target/tcp.c 		nvmet_tcp_finish_cmd(&queue->connect);
queue            1337 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue =
queue            1341 drivers/nvme/target/tcp.c 	list_del_init(&queue->queue_list);
queue            1344 drivers/nvme/target/tcp.c 	nvmet_tcp_restore_socket_callbacks(queue);
queue            1345 drivers/nvme/target/tcp.c 	flush_work(&queue->io_work);
queue            1347 drivers/nvme/target/tcp.c 	nvmet_tcp_uninit_data_in_cmds(queue);
queue            1348 drivers/nvme/target/tcp.c 	nvmet_sq_destroy(&queue->nvme_sq);
queue            1349 drivers/nvme/target/tcp.c 	cancel_work_sync(&queue->io_work);
queue            1350 drivers/nvme/target/tcp.c 	sock_release(queue->sock);
queue            1351 drivers/nvme/target/tcp.c 	nvmet_tcp_free_cmds(queue);
queue            1352 drivers/nvme/target/tcp.c 	if (queue->hdr_digest || queue->data_digest)
queue            1353 drivers/nvme/target/tcp.c 		nvmet_tcp_free_crypto(queue);
queue            1354 drivers/nvme/target/tcp.c 	ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
queue            1356 drivers/nvme/target/tcp.c 	kfree(queue);
queue            1361 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1364 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
queue            1365 drivers/nvme/target/tcp.c 	if (likely(queue))
queue            1366 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
queue            1372 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1375 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
queue            1376 drivers/nvme/target/tcp.c 	if (unlikely(!queue))
queue            1379 drivers/nvme/target/tcp.c 	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
queue            1380 drivers/nvme/target/tcp.c 		queue->write_space(sk);
queue            1386 drivers/nvme/target/tcp.c 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
queue            1394 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1397 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
queue            1398 drivers/nvme/target/tcp.c 	if (!queue)
queue            1407 drivers/nvme/target/tcp.c 		nvmet_tcp_schedule_release_queue(queue);
queue            1411 drivers/nvme/target/tcp.c 			queue->idx, sk->sk_state);
queue            1417 drivers/nvme/target/tcp.c static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
queue            1419 drivers/nvme/target/tcp.c 	struct socket *sock = queue->sock;
queue            1425 drivers/nvme/target/tcp.c 		(struct sockaddr *)&queue->sockaddr);
queue            1430 drivers/nvme/target/tcp.c 		(struct sockaddr *)&queue->sockaddr_peer);
queue            1455 drivers/nvme/target/tcp.c 	sock->sk->sk_user_data = queue;
queue            1456 drivers/nvme/target/tcp.c 	queue->data_ready = sock->sk->sk_data_ready;
queue            1458 drivers/nvme/target/tcp.c 	queue->state_change = sock->sk->sk_state_change;
queue            1460 drivers/nvme/target/tcp.c 	queue->write_space = sock->sk->sk_write_space;
queue            1470 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1473 drivers/nvme/target/tcp.c 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue            1474 drivers/nvme/target/tcp.c 	if (!queue)
queue            1477 drivers/nvme/target/tcp.c 	INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
queue            1478 drivers/nvme/target/tcp.c 	INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
queue            1479 drivers/nvme/target/tcp.c 	queue->sock = newsock;
queue            1480 drivers/nvme/target/tcp.c 	queue->port = port;
queue            1481 drivers/nvme/target/tcp.c 	queue->nr_cmds = 0;
queue            1482 drivers/nvme/target/tcp.c 	spin_lock_init(&queue->state_lock);
queue            1483 drivers/nvme/target/tcp.c 	queue->state = NVMET_TCP_Q_CONNECTING;
queue            1484 drivers/nvme/target/tcp.c 	INIT_LIST_HEAD(&queue->free_list);
queue            1485 drivers/nvme/target/tcp.c 	init_llist_head(&queue->resp_list);
queue            1486 drivers/nvme/target/tcp.c 	INIT_LIST_HEAD(&queue->resp_send_list);
queue            1488 drivers/nvme/target/tcp.c 	queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
queue            1489 drivers/nvme/target/tcp.c 	if (queue->idx < 0) {
queue            1490 drivers/nvme/target/tcp.c 		ret = queue->idx;
queue            1494 drivers/nvme/target/tcp.c 	ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
queue            1498 drivers/nvme/target/tcp.c 	ret = nvmet_sq_init(&queue->nvme_sq);
queue            1504 drivers/nvme/target/tcp.c 	queue->cpu = port->last_cpu;
queue            1505 drivers/nvme/target/tcp.c 	nvmet_prepare_receive_pdu(queue);
queue            1508 drivers/nvme/target/tcp.c 	list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
queue            1511 drivers/nvme/target/tcp.c 	ret = nvmet_tcp_set_queue_sock(queue);
queue            1515 drivers/nvme/target/tcp.c 	queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
queue            1520 drivers/nvme/target/tcp.c 	list_del_init(&queue->queue_list);
queue            1522 drivers/nvme/target/tcp.c 	nvmet_sq_destroy(&queue->nvme_sq);
queue            1524 drivers/nvme/target/tcp.c 	nvmet_tcp_free_cmd(&queue->connect);
queue            1526 drivers/nvme/target/tcp.c 	ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
queue            1528 drivers/nvme/target/tcp.c 	kfree(queue);
queue            1675 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1678 drivers/nvme/target/tcp.c 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
queue            1679 drivers/nvme/target/tcp.c 		if (queue->nvme_sq.ctrl == ctrl)
queue            1680 drivers/nvme/target/tcp.c 			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue            1686 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue =
queue            1694 drivers/nvme/target/tcp.c 	queue->nr_cmds = sq->size * 2;
queue            1695 drivers/nvme/target/tcp.c 	if (nvmet_tcp_alloc_cmds(queue))
queue            1708 drivers/nvme/target/tcp.c 		struct nvmet_tcp_queue *queue = cmd->queue;
queue            1710 drivers/nvme/target/tcp.c 		sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
queue            1749 drivers/nvme/target/tcp.c 	struct nvmet_tcp_queue *queue;
queue            1755 drivers/nvme/target/tcp.c 	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
queue            1756 drivers/nvme/target/tcp.c 		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
queue             308 drivers/pci/hotplug/cpqphp.h 	wait_queue_head_t queue;	/* sleep & wake process */
queue             710 drivers/pci/hotplug/cpqphp.h 	add_wait_queue(&ctrl->queue, &wait);
queue             713 drivers/pci/hotplug/cpqphp.h 	remove_wait_queue(&ctrl->queue, &wait);
queue            1034 drivers/pci/hotplug/cpqphp_core.c 	init_waitqueue_head(&ctrl->queue);
queue             907 drivers/pci/hotplug/cpqphp_ctrl.c 		wake_up_interruptible(&ctrl->queue);
queue             937 drivers/pci/hotplug/cpqphp_ctrl.c 		wake_up_interruptible(&ctrl->queue);
queue              92 drivers/pci/hotplug/pciehp.h 	wait_queue_head_t queue;
queue             122 drivers/pci/hotplug/pciehp_hpc.c 		rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
queue             598 drivers/pci/hotplug/pciehp_hpc.c 		wake_up(&ctrl->queue);
queue             880 drivers/pci/hotplug/pciehp_hpc.c 	init_waitqueue_head(&ctrl->queue);
queue              98 drivers/pci/hotplug/shpchp.h 	wait_queue_head_t queue;	/* sleep & wake process */
queue             280 drivers/pci/hotplug/shpchp_hpc.c 		rc = wait_event_interruptible_timeout(ctrl->queue,
queue             809 drivers/pci/hotplug/shpchp_hpc.c 		wake_up_interruptible(&ctrl->queue);
queue            1002 drivers/pci/hotplug/shpchp_hpc.c 	init_waitqueue_head(&ctrl->queue);
queue             123 drivers/perf/fsl_imx8_ddr_perf.c 	IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
queue             124 drivers/perf/fsl_imx8_ddr_perf.c 	IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
queue             355 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(collision-queue-not-empty,		0x10),
queue             356 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(collision-queue-full,		0x11),
queue             565 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh,		0x22),
queue             566 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh,		0x23),
queue             567 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh,		0x24),
queue             104 drivers/pps/kapi.c 	init_waitqueue_head(&pps->queue);
queue             216 drivers/pps/kapi.c 		wake_up_interruptible_all(&pps->queue);
queue              42 drivers/pps/pps.c 	poll_wait(file, &pps->queue, wait);
queue              60 drivers/pps/pps.c 		err = wait_event_interruptible(pps->queue,
queue              73 drivers/pps/pps.c 					pps->queue,
queue             403 drivers/ptp/ptp_chardev.c 	struct timestamp_event_queue *queue = &ptp->tsevq;
queue             421 drivers/ptp/ptp_chardev.c 				     ptp->defunct || queue_cnt(queue))) {
queue             437 drivers/ptp/ptp_chardev.c 	spin_lock_irqsave(&queue->lock, flags);
queue             439 drivers/ptp/ptp_chardev.c 	qcnt = queue_cnt(queue);
queue             445 drivers/ptp/ptp_chardev.c 		event[i] = queue->buf[queue->head];
queue             446 drivers/ptp/ptp_chardev.c 		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
queue             449 drivers/ptp/ptp_chardev.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue              41 drivers/ptp/ptp_clock.c static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
queue              51 drivers/ptp/ptp_clock.c 	spin_lock_irqsave(&queue->lock, flags);
queue              53 drivers/ptp/ptp_clock.c 	dst = &queue->buf[queue->tail];
queue              58 drivers/ptp/ptp_clock.c 	if (!queue_free(queue))
queue              59 drivers/ptp/ptp_clock.c 		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
queue              61 drivers/ptp/ptp_clock.c 	queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
queue              63 drivers/ptp/ptp_clock.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue              66 drivers/ptp/ptp_sysfs.c 	struct timestamp_event_queue *queue = &ptp->tsevq;
queue              77 drivers/ptp/ptp_sysfs.c 	spin_lock_irqsave(&queue->lock, flags);
queue              78 drivers/ptp/ptp_sysfs.c 	qcnt = queue_cnt(queue);
queue              80 drivers/ptp/ptp_sysfs.c 		event = queue->buf[queue->head];
queue              81 drivers/ptp/ptp_sysfs.c 		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
queue              83 drivers/ptp/ptp_sysfs.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue             705 drivers/rapidio/devices/tsi721.h 	struct list_head	queue;
queue             546 drivers/rapidio/devices/tsi721_dma.c 	if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
queue             547 drivers/rapidio/devices/tsi721_dma.c 		desc = list_first_entry(&bdma_chan->queue,
queue             714 drivers/rapidio/devices/tsi721_dma.c 	list_add_tail(&desc->desc_node, &bdma_chan->queue);
queue             922 drivers/rapidio/devices/tsi721_dma.c 	list_splice_init(&bdma_chan->queue, &list);
queue             988 drivers/rapidio/devices/tsi721_dma.c 		INIT_LIST_HEAD(&bdma_chan->queue);
queue              77 drivers/rpmsg/rpmsg_char.c 	struct sk_buff_head queue;
queue             114 drivers/rpmsg/rpmsg_char.c 	skb_queue_tail(&eptdev->queue, skb);
queue             160 drivers/rpmsg/rpmsg_char.c 	while (!skb_queue_empty(&eptdev->queue)) {
queue             161 drivers/rpmsg/rpmsg_char.c 		skb = skb_dequeue(&eptdev->queue);
queue             184 drivers/rpmsg/rpmsg_char.c 	if (skb_queue_empty(&eptdev->queue)) {
queue             192 drivers/rpmsg/rpmsg_char.c 					     !skb_queue_empty(&eptdev->queue) ||
queue             203 drivers/rpmsg/rpmsg_char.c 	skb = skb_dequeue(&eptdev->queue);
queue             268 drivers/rpmsg/rpmsg_char.c 	if (!skb_queue_empty(&eptdev->queue))
queue             361 drivers/rpmsg/rpmsg_char.c 	skb_queue_head_init(&eptdev->queue);
queue            3075 drivers/s390/block/dasd.c 	struct dasd_block *block = hctx->queue->queuedata;
queue              76 drivers/s390/block/dasd_genhd.c 	gdp->queue = block->request_queue;
queue             418 drivers/s390/block/dcssblk.c 	dev_info->gd->queue = NULL;
queue             640 drivers/s390/block/dcssblk.c 	dev_info->gd->queue = dev_info->dcssblk_queue;
queue             708 drivers/s390/block/dcssblk.c 	dev_info->gd->queue = NULL;
queue             720 drivers/s390/block/dcssblk.c 	dev_info->gd->queue = NULL;
queue             790 drivers/s390/block/dcssblk.c 	dev_info->gd->queue = NULL;
queue             285 drivers/s390/block/scm_blk.c 	struct scm_device *scmdev = hctx->queue->queuedata;
queue             487 drivers/s390/block/scm_blk.c 	bdev->gendisk->queue = rq;
queue             518 drivers/s390/block/scm_blk.c 	blk_cleanup_queue(bdev->gendisk->queue);
queue             378 drivers/s390/block/xpram.c 		disk->queue = xpram_queues[i];
queue              88 drivers/s390/cio/qdio.h static inline int do_sqbs(u64 token, unsigned char state, int queue,
queue              93 drivers/s390/cio/qdio.h 	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
queue             106 drivers/s390/cio/qdio.h static inline int do_eqbs(u64 token, unsigned char *state, int queue,
queue             111 drivers/s390/cio/qdio.h 	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
queue             708 drivers/s390/crypto/ap_bus.c 	int rc, card, queue, devres, drvres;
queue             712 drivers/s390/crypto/ap_bus.c 		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
queue             715 drivers/s390/crypto/ap_bus.c 			&& test_bit_inv(queue, ap_perms.aqm);
queue             721 drivers/s390/crypto/ap_bus.c 			       card, queue);
queue             734 drivers/s390/crypto/ap_bus.c int ap_owned_by_def_drv(int card, int queue)
queue             738 drivers/s390/crypto/ap_bus.c 	if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
queue             744 drivers/s390/crypto/ap_bus.c 	    && test_bit_inv(queue, ap_perms.aqm))
queue             756 drivers/s390/crypto/ap_bus.c 	int card, queue, rc = 0;
queue             763 drivers/s390/crypto/ap_bus.c 			for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
queue             764 drivers/s390/crypto/ap_bus.c 				if (test_bit_inv(queue, aqm) &&
queue             765 drivers/s390/crypto/ap_bus.c 				    test_bit_inv(queue, ap_perms.aqm))
queue             778 drivers/s390/crypto/ap_bus.c 	int card, queue, devres, drvres, rc;
queue             788 drivers/s390/crypto/ap_bus.c 		queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
queue             791 drivers/s390/crypto/ap_bus.c 			&& test_bit_inv(queue, ap_perms.aqm);
queue             564 drivers/s390/crypto/zcrypt_api.c static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
queue             566 drivers/s390/crypto/zcrypt_api.c 	return test_bit_inv(queue, perms->aqm) ? true : false;
queue             574 drivers/s390/crypto/zcrypt_api.c 	if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
queue             577 drivers/s390/crypto/zcrypt_api.c 	get_device(&zq->queue->ap_dev.device);
queue             581 drivers/s390/crypto/zcrypt_api.c 	*pmod = zq->queue->ap_dev.drv->driver.owner;
queue             593 drivers/s390/crypto/zcrypt_api.c 	put_device(&zq->queue->ap_dev.device);
queue             623 drivers/s390/crypto/zcrypt_api.c 		return zq->queue->total_request_count >
queue             624 drivers/s390/crypto/zcrypt_api.c 			pref_zq->queue->total_request_count;
queue             684 drivers/s390/crypto/zcrypt_api.c 						AP_QID_QUEUE(zq->queue->qid)))
queue             702 drivers/s390/crypto/zcrypt_api.c 	qid = pref_zq->queue->qid;
queue             768 drivers/s390/crypto/zcrypt_api.c 						AP_QID_QUEUE(zq->queue->qid)))
queue             786 drivers/s390/crypto/zcrypt_api.c 	qid = pref_zq->queue->qid;
queue             853 drivers/s390/crypto/zcrypt_api.c 			     tdom != AP_QID_QUEUE(zq->queue->qid)))
queue             857 drivers/s390/crypto/zcrypt_api.c 						AP_QID_QUEUE(zq->queue->qid)))
queue             876 drivers/s390/crypto/zcrypt_api.c 	qid = pref_zq->queue->qid;
queue             990 drivers/s390/crypto/zcrypt_api.c 			     !is_desired_ep11_queue(zq->queue->qid,
queue             995 drivers/s390/crypto/zcrypt_api.c 						AP_QID_QUEUE(zq->queue->qid)))
queue            1013 drivers/s390/crypto/zcrypt_api.c 	qid = pref_zq->queue->qid;
queue            1078 drivers/s390/crypto/zcrypt_api.c 	qid = pref_zq->queue->qid;
queue            1097 drivers/s390/crypto/zcrypt_api.c 	int card, queue;
queue            1105 drivers/s390/crypto/zcrypt_api.c 			card = AP_QID_CARD(zq->queue->qid);
queue            1108 drivers/s390/crypto/zcrypt_api.c 			queue = AP_QID_QUEUE(zq->queue->qid);
queue            1109 drivers/s390/crypto/zcrypt_api.c 			stat = &devstatus[card * AP_DOMAINS + queue];
queue            1112 drivers/s390/crypto/zcrypt_api.c 			stat->qid = zq->queue->qid;
queue            1124 drivers/s390/crypto/zcrypt_api.c 	int card, queue;
queue            1132 drivers/s390/crypto/zcrypt_api.c 			card = AP_QID_CARD(zq->queue->qid);
queue            1133 drivers/s390/crypto/zcrypt_api.c 			queue = AP_QID_QUEUE(zq->queue->qid);
queue            1134 drivers/s390/crypto/zcrypt_api.c 			stat = &devstatus[card * AP_DOMAINS + queue];
queue            1137 drivers/s390/crypto/zcrypt_api.c 			stat->qid = zq->queue->qid;
queue            1145 drivers/s390/crypto/zcrypt_api.c int zcrypt_device_status_ext(int card, int queue,
queue            1156 drivers/s390/crypto/zcrypt_api.c 			if (card == AP_QID_CARD(zq->queue->qid) &&
queue            1157 drivers/s390/crypto/zcrypt_api.c 			    queue == AP_QID_QUEUE(zq->queue->qid)) {
queue            1160 drivers/s390/crypto/zcrypt_api.c 				devstat->qid = zq->queue->qid;
queue            1183 drivers/s390/crypto/zcrypt_api.c 			card = AP_QID_CARD(zq->queue->qid);
queue            1184 drivers/s390/crypto/zcrypt_api.c 			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
queue            1204 drivers/s390/crypto/zcrypt_api.c 			card = AP_QID_CARD(zq->queue->qid);
queue            1205 drivers/s390/crypto/zcrypt_api.c 			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
queue            1208 drivers/s390/crypto/zcrypt_api.c 			spin_lock(&zq->queue->lock);
queue            1210 drivers/s390/crypto/zcrypt_api.c 				zq->queue->pendingq_count +
queue            1211 drivers/s390/crypto/zcrypt_api.c 				zq->queue->requestq_count;
queue            1212 drivers/s390/crypto/zcrypt_api.c 			spin_unlock(&zq->queue->lock);
queue            1231 drivers/s390/crypto/zcrypt_api.c 			card = AP_QID_CARD(zq->queue->qid);
queue            1232 drivers/s390/crypto/zcrypt_api.c 			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
queue            1235 drivers/s390/crypto/zcrypt_api.c 			spin_lock(&zq->queue->lock);
queue            1236 drivers/s390/crypto/zcrypt_api.c 			cnt = zq->queue->total_request_count;
queue            1237 drivers/s390/crypto/zcrypt_api.c 			spin_unlock(&zq->queue->lock);
queue            1256 drivers/s390/crypto/zcrypt_api.c 			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
queue            1258 drivers/s390/crypto/zcrypt_api.c 			spin_lock(&zq->queue->lock);
queue            1259 drivers/s390/crypto/zcrypt_api.c 			pendingq_count += zq->queue->pendingq_count;
queue            1260 drivers/s390/crypto/zcrypt_api.c 			spin_unlock(&zq->queue->lock);
queue            1279 drivers/s390/crypto/zcrypt_api.c 			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
queue            1281 drivers/s390/crypto/zcrypt_api.c 			spin_lock(&zq->queue->lock);
queue            1282 drivers/s390/crypto/zcrypt_api.c 			requestq_count += zq->queue->requestq_count;
queue            1283 drivers/s390/crypto/zcrypt_api.c 			spin_unlock(&zq->queue->lock);
queue              96 drivers/s390/crypto/zcrypt_api.h 	struct ap_queue *queue;		/* The "real" ap queue device. */
queue             144 drivers/s390/crypto/zcrypt_api.h int zcrypt_device_status_ext(int card, int queue,
queue             175 drivers/s390/crypto/zcrypt_cex2a.c 	zq->queue = aq;
queue             220 drivers/s390/crypto/zcrypt_cex2c.c 	zq->queue = aq;
queue             117 drivers/s390/crypto/zcrypt_cex4.c 	cca_get_info(AP_QID_CARD(zq->queue->qid),
queue             118 drivers/s390/crypto/zcrypt_cex4.c 		     AP_QID_QUEUE(zq->queue->qid),
queue             381 drivers/s390/crypto/zcrypt_cex4.c 	zq->queue = aq;
queue              84 drivers/s390/crypto/zcrypt_error.h 	int card = AP_QID_CARD(zq->queue->qid);
queue              85 drivers/s390/crypto/zcrypt_error.h 	int queue = AP_QID_QUEUE(zq->queue->qid);
queue             102 drivers/s390/crypto/zcrypt_error.h 			   card, queue, ehdr->reply_code);
queue             114 drivers/s390/crypto/zcrypt_error.h 		       card, queue);
queue             117 drivers/s390/crypto/zcrypt_error.h 			   card, queue, ehdr->reply_code);
queue             124 drivers/s390/crypto/zcrypt_error.h 		       card, queue);
queue             135 drivers/s390/crypto/zcrypt_error.h 				   card, queue, apfs, ehdr->reply_code);
queue             139 drivers/s390/crypto/zcrypt_error.h 				   card, queue, ehdr->reply_code);
queue             147 drivers/s390/crypto/zcrypt_error.h 		       card, queue);
queue             150 drivers/s390/crypto/zcrypt_error.h 			   card, queue, ehdr->reply_code);
queue             155 drivers/s390/crypto/zcrypt_error.h 		       card, queue);
queue             158 drivers/s390/crypto/zcrypt_error.h 			   card, queue, ehdr->reply_code);
queue             360 drivers/s390/crypto/zcrypt_msgtype50.c 		       AP_QID_CARD(zq->queue->qid),
queue             361 drivers/s390/crypto/zcrypt_msgtype50.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             364 drivers/s390/crypto/zcrypt_msgtype50.c 			   AP_QID_CARD(zq->queue->qid),
queue             365 drivers/s390/crypto/zcrypt_msgtype50.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             397 drivers/s390/crypto/zcrypt_msgtype50.c 		       AP_QID_CARD(zq->queue->qid),
queue             398 drivers/s390/crypto/zcrypt_msgtype50.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             401 drivers/s390/crypto/zcrypt_msgtype50.c 			   AP_QID_CARD(zq->queue->qid),
queue             402 drivers/s390/crypto/zcrypt_msgtype50.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             478 drivers/s390/crypto/zcrypt_msgtype50.c 	ap_queue_message(zq->queue, &ap_msg);
queue             487 drivers/s390/crypto/zcrypt_msgtype50.c 		ap_cancel_message(zq->queue, &ap_msg);
queue             524 drivers/s390/crypto/zcrypt_msgtype50.c 	ap_queue_message(zq->queue, &ap_msg);
queue             533 drivers/s390/crypto/zcrypt_msgtype50.c 		ap_cancel_message(zq->queue, &ap_msg);
queue             297 drivers/s390/crypto/zcrypt_msgtype6.c 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
queue             367 drivers/s390/crypto/zcrypt_msgtype6.c 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
queue             657 drivers/s390/crypto/zcrypt_msgtype6.c 				   AP_QID_CARD(zq->queue->qid),
queue             658 drivers/s390/crypto/zcrypt_msgtype6.c 				   AP_QID_QUEUE(zq->queue->qid),
queue             664 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_CARD(zq->queue->qid),
queue             665 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             668 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_CARD(zq->queue->qid),
queue             669 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             808 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_CARD(zq->queue->qid),
queue             809 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             812 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_CARD(zq->queue->qid),
queue             813 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             842 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_CARD(zq->queue->qid),
queue             843 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             846 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_CARD(zq->queue->qid),
queue             847 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             871 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_CARD(zq->queue->qid),
queue             872 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             875 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_CARD(zq->queue->qid),
queue             876 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_QUEUE(zq->queue->qid),
queue             901 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_CARD(zq->queue->qid),
queue             902 drivers/s390/crypto/zcrypt_msgtype6.c 		       AP_QID_QUEUE(zq->queue->qid));
queue             905 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_CARD(zq->queue->qid),
queue             906 drivers/s390/crypto/zcrypt_msgtype6.c 			   AP_QID_QUEUE(zq->queue->qid),
queue            1034 drivers/s390/crypto/zcrypt_msgtype6.c 	ap_queue_message(zq->queue, &ap_msg);
queue            1044 drivers/s390/crypto/zcrypt_msgtype6.c 		ap_cancel_message(zq->queue, &ap_msg);
queue            1078 drivers/s390/crypto/zcrypt_msgtype6.c 	ap_queue_message(zq->queue, &ap_msg);
queue            1088 drivers/s390/crypto/zcrypt_msgtype6.c 		ap_cancel_message(zq->queue, &ap_msg);
queue            1137 drivers/s390/crypto/zcrypt_msgtype6.c 	ap_queue_message(zq->queue, ap_msg);
queue            1145 drivers/s390/crypto/zcrypt_msgtype6.c 		ap_cancel_message(zq->queue, ap_msg);
queue            1217 drivers/s390/crypto/zcrypt_msgtype6.c 					AP_QID_QUEUE(zq->queue->qid);
queue            1235 drivers/s390/crypto/zcrypt_msgtype6.c 					AP_QID_QUEUE(zq->queue->qid);
queue            1239 drivers/s390/crypto/zcrypt_msgtype6.c 	ap_queue_message(zq->queue, ap_msg);
queue            1247 drivers/s390/crypto/zcrypt_msgtype6.c 		ap_cancel_message(zq->queue, ap_msg);
queue            1297 drivers/s390/crypto/zcrypt_msgtype6.c 	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
queue            1300 drivers/s390/crypto/zcrypt_msgtype6.c 	ap_queue_message(zq->queue, ap_msg);
queue            1308 drivers/s390/crypto/zcrypt_msgtype6.c 		ap_cancel_message(zq->queue, ap_msg);
queue              64 drivers/s390/crypto/zcrypt_queue.c 		   AP_QID_CARD(zq->queue->qid),
queue              65 drivers/s390/crypto/zcrypt_queue.c 		   AP_QID_QUEUE(zq->queue->qid),
queue              69 drivers/s390/crypto/zcrypt_queue.c 		ap_flush_queue(zq->queue);
queue             100 drivers/s390/crypto/zcrypt_queue.c 		ap_flush_queue(zq->queue);
queue             162 drivers/s390/crypto/zcrypt_queue.c 	zc = zq->queue->card->private;
queue             168 drivers/s390/crypto/zcrypt_queue.c 		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
queue             174 drivers/s390/crypto/zcrypt_queue.c 	rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
queue             178 drivers/s390/crypto/zcrypt_queue.c 	get_device(&zq->queue->ap_dev.device);
queue             188 drivers/s390/crypto/zcrypt_queue.c 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
queue             190 drivers/s390/crypto/zcrypt_queue.c 	put_device(&zq->queue->ap_dev.device);
queue             211 drivers/s390/crypto/zcrypt_queue.c 		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
queue             221 drivers/s390/crypto/zcrypt_queue.c 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
queue             223 drivers/s390/crypto/zcrypt_queue.c 	put_device(&zq->queue->ap_dev.device);
queue             543 drivers/s390/net/qeth_core.h static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
queue             545 drivers/s390/net/qeth_core.h 	if (timer_pending(&queue->timer))
queue             547 drivers/s390/net/qeth_core.h 	mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
queue             551 drivers/s390/net/qeth_core.h static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
queue             553 drivers/s390/net/qeth_core.h 	return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
queue             556 drivers/s390/net/qeth_core.h static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
queue             558 drivers/s390/net/qeth_core.h 	return atomic_read(&queue->used_buffers) == 0;
queue            1073 drivers/s390/net/qeth_core.h int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
queue            1098 drivers/s390/net/qeth_core.h 	      struct qeth_qdio_out_q *queue, int ipv,
queue            1099 drivers/s390/net/qeth_core.h 	      void (*fill_header)(struct qeth_qdio_out_q *queue,
queue              71 drivers/s390/net/qeth_core_main.c static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
queue             482 drivers/s390/net/qeth_core_main.c static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
queue             486 drivers/s390/net/qeth_core_main.c 	    queue != 0 &&
queue             487 drivers/s390/net/qeth_core_main.c 	    queue == card->qdio.no_in_queues - 1;
queue            1094 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_q *queue = buf->q;
queue            1101 drivers/s390/net/qeth_core_main.c 		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
queue            1107 drivers/s390/net/qeth_core_main.c 	QETH_TXQ_STAT_INC(queue, bufs);
queue            1108 drivers/s390/net/qeth_core_main.c 	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
queue            1116 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
queue            1118 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
queue            1119 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
queue            1121 drivers/s390/net/qeth_core_main.c 				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
queue            1123 drivers/s390/net/qeth_core_main.c 				QETH_TXQ_STAT_INC(queue, skbs_sg);
queue            1125 drivers/s390/net/qeth_core_main.c 				QETH_TXQ_STAT_INC(queue, skbs_tso);
queue            1126 drivers/s390/net/qeth_core_main.c 				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
queue            1134 drivers/s390/net/qeth_core_main.c static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
queue            1142 drivers/s390/net/qeth_core_main.c 		atomic_dec(&queue->set_pci_flags_count);
queue            1146 drivers/s390/net/qeth_core_main.c 	for (i = 0; i < queue->max_elements; ++i) {
queue            1153 drivers/s390/net/qeth_core_main.c 	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
queue            2322 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
queue            2324 drivers/s390/net/qeth_core_main.c 	napi_schedule(&queue->napi);
queue            2325 drivers/s390/net/qeth_core_main.c 	QETH_TXQ_STAT_INC(queue, completion_timer);
queue            2349 drivers/s390/net/qeth_core_main.c 		struct qeth_qdio_out_q *queue;
queue            2351 drivers/s390/net/qeth_core_main.c 		queue = qeth_alloc_output_queue();
queue            2352 drivers/s390/net/qeth_core_main.c 		if (!queue)
queue            2355 drivers/s390/net/qeth_core_main.c 		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
queue            2356 drivers/s390/net/qeth_core_main.c 		card->qdio.out_qs[i] = queue;
queue            2357 drivers/s390/net/qeth_core_main.c 		queue->card = card;
queue            2358 drivers/s390/net/qeth_core_main.c 		queue->queue_no = i;
queue            2359 drivers/s390/net/qeth_core_main.c 		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue            2363 drivers/s390/net/qeth_core_main.c 			WARN_ON(queue->bufs[j]);
queue            2364 drivers/s390/net/qeth_core_main.c 			if (qeth_init_qdio_out_buf(queue, j))
queue            2699 drivers/s390/net/qeth_core_main.c 		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
queue            2701 drivers/s390/net/qeth_core_main.c 		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
queue            2702 drivers/s390/net/qeth_core_main.c 		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
queue            2703 drivers/s390/net/qeth_core_main.c 		queue->next_buf_to_fill = 0;
queue            2704 drivers/s390/net/qeth_core_main.c 		queue->do_pack = 0;
queue            2705 drivers/s390/net/qeth_core_main.c 		queue->prev_hdr = NULL;
queue            2706 drivers/s390/net/qeth_core_main.c 		queue->bulk_start = 0;
queue            2707 drivers/s390/net/qeth_core_main.c 		atomic_set(&queue->used_buffers, 0);
queue            2708 drivers/s390/net/qeth_core_main.c 		atomic_set(&queue->set_pci_flags_count, 0);
queue            2709 drivers/s390/net/qeth_core_main.c 		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
queue            3124 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_q *queue = card->qdio.in_q;
queue            3131 drivers/s390/net/qeth_core_main.c 	count = (index < queue->next_buf_to_init)?
queue            3133 drivers/s390/net/qeth_core_main.c 		(queue->next_buf_to_init - index) :
queue            3135 drivers/s390/net/qeth_core_main.c 		(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
queue            3138 drivers/s390/net/qeth_core_main.c 		for (i = queue->next_buf_to_init;
queue            3139 drivers/s390/net/qeth_core_main.c 		     i < queue->next_buf_to_init + count; ++i) {
queue            3141 drivers/s390/net/qeth_core_main.c 				&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
queue            3179 drivers/s390/net/qeth_core_main.c 			     queue->next_buf_to_init, count);
queue            3183 drivers/s390/net/qeth_core_main.c 		queue->next_buf_to_init = (queue->next_buf_to_init + count) %
queue            3222 drivers/s390/net/qeth_core_main.c static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
queue            3226 drivers/s390/net/qeth_core_main.c 	buffer = queue->bufs[queue->next_buf_to_fill];
queue            3231 drivers/s390/net/qeth_core_main.c 		queue->next_buf_to_fill =
queue            3232 drivers/s390/net/qeth_core_main.c 			(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
queue            3242 drivers/s390/net/qeth_core_main.c static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
queue            3244 drivers/s390/net/qeth_core_main.c 	if (!queue->do_pack) {
queue            3245 drivers/s390/net/qeth_core_main.c 		if (atomic_read(&queue->used_buffers)
queue            3248 drivers/s390/net/qeth_core_main.c 			QETH_CARD_TEXT(queue->card, 6, "np->pack");
queue            3249 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue            3250 drivers/s390/net/qeth_core_main.c 			queue->do_pack = 1;
queue            3261 drivers/s390/net/qeth_core_main.c static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
queue            3263 drivers/s390/net/qeth_core_main.c 	if (queue->do_pack) {
queue            3264 drivers/s390/net/qeth_core_main.c 		if (atomic_read(&queue->used_buffers)
queue            3267 drivers/s390/net/qeth_core_main.c 			QETH_CARD_TEXT(queue->card, 6, "pack->np");
queue            3268 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue            3269 drivers/s390/net/qeth_core_main.c 			queue->do_pack = 0;
queue            3270 drivers/s390/net/qeth_core_main.c 			return qeth_prep_flush_pack_buffer(queue);
queue            3276 drivers/s390/net/qeth_core_main.c static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
queue            3279 drivers/s390/net/qeth_core_main.c 	struct qeth_card *card = queue->card;
queue            3287 drivers/s390/net/qeth_core_main.c 		buf = queue->bufs[bidx];
queue            3291 drivers/s390/net/qeth_core_main.c 		if (queue->bufstates)
queue            3292 drivers/s390/net/qeth_core_main.c 			queue->bufstates[bidx].user = buf;
queue            3294 drivers/s390/net/qeth_core_main.c 		if (IS_IQD(queue->card))
queue            3297 drivers/s390/net/qeth_core_main.c 		if (!queue->do_pack) {
queue            3298 drivers/s390/net/qeth_core_main.c 			if ((atomic_read(&queue->used_buffers) >=
queue            3301 drivers/s390/net/qeth_core_main.c 			    !atomic_read(&queue->set_pci_flags_count)) {
queue            3304 drivers/s390/net/qeth_core_main.c 				atomic_inc(&queue->set_pci_flags_count);
queue            3308 drivers/s390/net/qeth_core_main.c 			if (!atomic_read(&queue->set_pci_flags_count)) {
queue            3317 drivers/s390/net/qeth_core_main.c 				atomic_inc(&queue->set_pci_flags_count);
queue            3324 drivers/s390/net/qeth_core_main.c 	if (atomic_read(&queue->set_pci_flags_count))
queue            3326 drivers/s390/net/qeth_core_main.c 	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue            3327 drivers/s390/net/qeth_core_main.c 		     queue->queue_no, index, count);
queue            3331 drivers/s390/net/qeth_core_main.c 		napi_schedule(&queue->napi);
queue            3337 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
queue            3338 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
queue            3339 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
queue            3340 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
queue            3341 drivers/s390/net/qeth_core_main.c 		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
queue            3345 drivers/s390/net/qeth_core_main.c 		qeth_schedule_recovery(queue->card);
queue            3350 drivers/s390/net/qeth_core_main.c static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
queue            3352 drivers/s390/net/qeth_core_main.c 	qeth_flush_buffers(queue, queue->bulk_start, 1);
queue            3354 drivers/s390/net/qeth_core_main.c 	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
queue            3355 drivers/s390/net/qeth_core_main.c 	queue->prev_hdr = NULL;
queue            3358 drivers/s390/net/qeth_core_main.c static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
queue            3368 drivers/s390/net/qeth_core_main.c 	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
queue            3369 drivers/s390/net/qeth_core_main.c 	    !atomic_read(&queue->set_pci_flags_count)) {
queue            3370 drivers/s390/net/qeth_core_main.c 		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
queue            3377 drivers/s390/net/qeth_core_main.c 			index = queue->next_buf_to_fill;
queue            3378 drivers/s390/net/qeth_core_main.c 			q_was_packing = queue->do_pack;
queue            3381 drivers/s390/net/qeth_core_main.c 			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
queue            3383 drivers/s390/net/qeth_core_main.c 			    !atomic_read(&queue->set_pci_flags_count))
queue            3384 drivers/s390/net/qeth_core_main.c 				flush_cnt += qeth_prep_flush_pack_buffer(queue);
queue            3386 drivers/s390/net/qeth_core_main.c 				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
queue            3388 drivers/s390/net/qeth_core_main.c 				qeth_flush_buffers(queue, index, flush_cnt);
queue            3389 drivers/s390/net/qeth_core_main.c 			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
queue            3394 drivers/s390/net/qeth_core_main.c static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
queue            3427 drivers/s390/net/qeth_core_main.c 				 unsigned int queue, int first_element,
queue            3434 drivers/s390/net/qeth_core_main.c 	if (!qeth_is_cq(card, queue))
queue            3462 drivers/s390/net/qeth_core_main.c 	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
queue            3475 drivers/s390/net/qeth_core_main.c 				    unsigned int qdio_err, int queue,
queue            3481 drivers/s390/net/qeth_core_main.c 	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
queue            3484 drivers/s390/net/qeth_core_main.c 	if (qeth_is_cq(card, queue))
queue            3485 drivers/s390/net/qeth_core_main.c 		qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
queue            3496 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
queue            3512 drivers/s390/net/qeth_core_main.c 		buffer = queue->bufs[bidx];
queue            3514 drivers/s390/net/qeth_core_main.c 		qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
queue            3517 drivers/s390/net/qeth_core_main.c 	atomic_sub(count, &queue->used_buffers);
queue            3518 drivers/s390/net/qeth_core_main.c 	qeth_check_outbound_queue(queue);
queue            3525 drivers/s390/net/qeth_core_main.c 	if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
queue            3637 drivers/s390/net/qeth_core_main.c static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
queue            3643 drivers/s390/net/qeth_core_main.c 	const unsigned int max_elements = queue->max_elements;
queue            3682 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
queue            3686 drivers/s390/net/qeth_core_main.c 		QETH_TXQ_STAT_INC(queue, skbs_linearized);
queue            3708 drivers/s390/net/qeth_core_main.c static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
queue            3713 drivers/s390/net/qeth_core_main.c 	struct qeth_hdr *prev_hdr = queue->prev_hdr;
queue            3827 drivers/s390/net/qeth_core_main.c static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
queue            3832 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
queue            3847 drivers/s390/net/qeth_core_main.c 	if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
queue            3848 drivers/s390/net/qeth_core_main.c 	    !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
queue            3850 drivers/s390/net/qeth_core_main.c 		qeth_flush_queue(queue);
queue            3851 drivers/s390/net/qeth_core_main.c 		buffer = queue->bufs[queue->bulk_start];
queue            3859 drivers/s390/net/qeth_core_main.c 	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
queue            3863 drivers/s390/net/qeth_core_main.c 		QETH_TXQ_STAT_INC(queue, stopped);
queue            3870 drivers/s390/net/qeth_core_main.c 	queue->prev_hdr = hdr;
queue            3875 drivers/s390/net/qeth_core_main.c 	if (flush || next_element >= queue->max_elements) {
queue            3877 drivers/s390/net/qeth_core_main.c 		qeth_flush_queue(queue);
queue            3880 drivers/s390/net/qeth_core_main.c 	if (stopped && !qeth_out_queue_is_full(queue))
queue            3885 drivers/s390/net/qeth_core_main.c int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
queue            3901 drivers/s390/net/qeth_core_main.c 	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
queue            3903 drivers/s390/net/qeth_core_main.c 	start_index = queue->next_buf_to_fill;
queue            3904 drivers/s390/net/qeth_core_main.c 	buffer = queue->bufs[queue->next_buf_to_fill];
queue            3910 drivers/s390/net/qeth_core_main.c 		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
queue            3917 drivers/s390/net/qeth_core_main.c 	qeth_switch_to_packing_if_needed(queue);
queue            3918 drivers/s390/net/qeth_core_main.c 	if (queue->do_pack) {
queue            3922 drivers/s390/net/qeth_core_main.c 		    queue->max_elements) {
queue            3926 drivers/s390/net/qeth_core_main.c 			queue->next_buf_to_fill =
queue            3927 drivers/s390/net/qeth_core_main.c 				(queue->next_buf_to_fill + 1) %
queue            3929 drivers/s390/net/qeth_core_main.c 			buffer = queue->bufs[queue->next_buf_to_fill];
queue            3934 drivers/s390/net/qeth_core_main.c 				qeth_flush_buffers(queue, start_index,
queue            3936 drivers/s390/net/qeth_core_main.c 				atomic_set(&queue->state,
queue            3945 drivers/s390/net/qeth_core_main.c 	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
queue            3949 drivers/s390/net/qeth_core_main.c 		QETH_TXQ_STAT_INC(queue, stopped);
queue            3956 drivers/s390/net/qeth_core_main.c 	if (queue->do_pack)
queue            3957 drivers/s390/net/qeth_core_main.c 		QETH_TXQ_STAT_INC(queue, skbs_pack);
queue            3958 drivers/s390/net/qeth_core_main.c 	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
queue            3961 drivers/s390/net/qeth_core_main.c 		queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
queue            3966 drivers/s390/net/qeth_core_main.c 		qeth_flush_buffers(queue, start_index, flush_count);
queue            3967 drivers/s390/net/qeth_core_main.c 	else if (!atomic_read(&queue->set_pci_flags_count))
queue            3968 drivers/s390/net/qeth_core_main.c 		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
queue            3975 drivers/s390/net/qeth_core_main.c 	while (atomic_dec_return(&queue->state)) {
queue            3976 drivers/s390/net/qeth_core_main.c 		start_index = queue->next_buf_to_fill;
queue            3978 drivers/s390/net/qeth_core_main.c 		tmp = qeth_switch_to_nonpacking_if_needed(queue);
queue            3983 drivers/s390/net/qeth_core_main.c 		if (!tmp && !atomic_read(&queue->set_pci_flags_count))
queue            3984 drivers/s390/net/qeth_core_main.c 			tmp = qeth_prep_flush_pack_buffer(queue);
queue            3986 drivers/s390/net/qeth_core_main.c 			qeth_flush_buffers(queue, start_index, tmp);
queue            3993 drivers/s390/net/qeth_core_main.c 		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
queue            3995 drivers/s390/net/qeth_core_main.c 	if (stopped && !qeth_out_queue_is_full(queue))
queue            4018 drivers/s390/net/qeth_core_main.c 	      struct qeth_qdio_out_q *queue, int ipv,
queue            4019 drivers/s390/net/qeth_core_main.c 	      void (*fill_header)(struct qeth_qdio_out_q *queue,
queue            4044 drivers/s390/net/qeth_core_main.c 	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
queue            4054 drivers/s390/net/qeth_core_main.c 	fill_header(queue, hdr, skb, ipv, frame_len);
queue            4060 drivers/s390/net/qeth_core_main.c 		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
queue            4065 drivers/s390/net/qeth_core_main.c 		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
queue            5216 drivers/s390/net/qeth_core_main.c static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
queue            5219 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
queue            5221 drivers/s390/net/qeth_core_main.c 	struct qeth_card *card = queue->card;
queue            5223 drivers/s390/net/qeth_core_main.c 	if (queue->bufstates && (queue->bufstates[bidx].flags &
queue            5230 drivers/s390/net/qeth_core_main.c 			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
queue            5235 drivers/s390/net/qeth_core_main.c 		qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
queue            5236 drivers/s390/net/qeth_core_main.c 		if (qeth_init_qdio_out_buf(queue, bidx)) {
queue            5245 drivers/s390/net/qeth_core_main.c 		qeth_notify_skbs(queue, buffer,
queue            5247 drivers/s390/net/qeth_core_main.c 	qeth_clear_output_buffer(queue, buffer, error, budget);
queue            5252 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
queue            5253 drivers/s390/net/qeth_core_main.c 	unsigned int queue_no = queue->queue_no;
queue            5254 drivers/s390/net/qeth_core_main.c 	struct qeth_card *card = queue->card;
queue            5267 drivers/s390/net/qeth_core_main.c 		if (qeth_out_queue_is_empty(queue)) {
queue            5274 drivers/s390/net/qeth_core_main.c 			QETH_TXQ_STAT_INC(queue, completion_yield);
queue            5285 drivers/s390/net/qeth_core_main.c 				qeth_tx_arm_timer(queue);
queue            5293 drivers/s390/net/qeth_core_main.c 			buffer = queue->bufs[bidx];
queue            5298 drivers/s390/net/qeth_core_main.c 			qeth_iqd_tx_complete(queue, bidx, error, budget);
queue            5299 drivers/s390/net/qeth_core_main.c 			qeth_cleanup_handled_pending(queue, bidx, false);
queue            5303 drivers/s390/net/qeth_core_main.c 		atomic_sub(completed, &queue->used_buffers);
queue            5312 drivers/s390/net/qeth_core_main.c 		    !qeth_out_queue_is_full(queue))
queue            6217 drivers/s390/net/qeth_core_main.c 	struct qeth_qdio_out_q *queue;
queue            6229 drivers/s390/net/qeth_core_main.c 		queue = card->qdio.out_qs[i];
queue            6231 drivers/s390/net/qeth_core_main.c 		stats->tx_packets += queue->stats.tx_packets;
queue            6232 drivers/s390/net/qeth_core_main.c 		stats->tx_bytes += queue->stats.tx_bytes;
queue            6233 drivers/s390/net/qeth_core_main.c 		stats->tx_errors += queue->stats.tx_errors;
queue            6234 drivers/s390/net/qeth_core_main.c 		stats->tx_dropped += queue->stats.tx_dropped;
queue            6264 drivers/s390/net/qeth_core_main.c 		struct qeth_qdio_out_q *queue;
queue            6267 drivers/s390/net/qeth_core_main.c 		qeth_for_each_output_queue(card, queue, i) {
queue            6268 drivers/s390/net/qeth_core_main.c 			netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
queue            6270 drivers/s390/net/qeth_core_main.c 			napi_enable(&queue->napi);
queue            6271 drivers/s390/net/qeth_core_main.c 			napi_schedule(&queue->napi);
queue            6286 drivers/s390/net/qeth_core_main.c 		struct qeth_qdio_out_q *queue;
queue            6290 drivers/s390/net/qeth_core_main.c 		qeth_for_each_output_queue(card, queue, i) {
queue            6291 drivers/s390/net/qeth_core_main.c 			napi_disable(&queue->napi);
queue            6292 drivers/s390/net/qeth_core_main.c 			del_timer_sync(&queue->timer);
queue            6299 drivers/s390/net/qeth_core_main.c 		qeth_for_each_output_queue(card, queue, i)
queue            6300 drivers/s390/net/qeth_core_main.c 			netif_napi_del(&queue->napi);
queue             340 drivers/s390/net/qeth_core_sys.c 	struct qeth_qdio_out_q *queue;
queue             355 drivers/s390/net/qeth_core_sys.c 			queue = card->qdio.out_qs[i];
queue             356 drivers/s390/net/qeth_core_sys.c 			if (!queue)
queue             358 drivers/s390/net/qeth_core_sys.c 			memset(&queue->stats, 0, sizeof(queue->stats));
queue             165 drivers/s390/net/qeth_l2_main.c static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
queue             552 drivers/s390/net/qeth_l2_main.c 			    struct qeth_qdio_out_q *queue)
queue             575 drivers/s390/net/qeth_l2_main.c 	if (elements > queue->max_elements) {
queue             580 drivers/s390/net/qeth_l2_main.c 	rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
queue             593 drivers/s390/net/qeth_l2_main.c 	struct qeth_qdio_out_q *queue;
queue             600 drivers/s390/net/qeth_l2_main.c 	queue = card->qdio.out_qs[txq];
queue             603 drivers/s390/net/qeth_l2_main.c 		rc = qeth_l2_xmit_osn(card, skb, queue);
queue             605 drivers/s390/net/qeth_l2_main.c 		rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
queue             611 drivers/s390/net/qeth_l2_main.c 	QETH_TXQ_STAT_INC(queue, tx_dropped);
queue            1931 drivers/s390/net/qeth_l3_main.c static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
queue            1937 drivers/s390/net/qeth_l3_main.c 	struct qeth_card *card = queue->card;
queue            2023 drivers/s390/net/qeth_l3_main.c 			struct qeth_qdio_out_q *queue, int ipv)
queue            2037 drivers/s390/net/qeth_l3_main.c 	return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
queue            2046 drivers/s390/net/qeth_l3_main.c 	struct qeth_qdio_out_q *queue;
queue            2052 drivers/s390/net/qeth_l3_main.c 		queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
queue            2061 drivers/s390/net/qeth_l3_main.c 		queue = card->qdio.out_qs[txq];
queue            2069 drivers/s390/net/qeth_l3_main.c 		rc = qeth_l3_xmit(card, skb, queue, ipv);
queue            2071 drivers/s390/net/qeth_l3_main.c 		rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
queue            2077 drivers/s390/net/qeth_l3_main.c 	QETH_TXQ_STAT_INC(queue, tx_dropped);
queue              86 drivers/s390/virtio/virtio_ccw.c 	__u64 queue;
queue             461 drivers/s390/virtio/virtio_ccw.c 		info->info_block->l.queue = 0;
queue             520 drivers/s390/virtio/virtio_ccw.c 	u64 queue;
queue             558 drivers/s390/virtio/virtio_ccw.c 	queue = virtqueue_get_desc_addr(vq);
queue             560 drivers/s390/virtio/virtio_ccw.c 		info->info_block->l.queue = queue;
queue             566 drivers/s390/virtio/virtio_ccw.c 		info->info_block->s.desc = queue;
queue             289 drivers/scsi/3w-9xxx.c 	int first_reset = 0, queue = 0, retval = 1;
queue             334 drivers/scsi/3w-9xxx.c 		queue = 0;
queue             348 drivers/scsi/3w-9xxx.c 				queue = 1;
queue             353 drivers/scsi/3w-9xxx.c 			queue = 1;
queue             357 drivers/scsi/3w-9xxx.c 		if (queue)
queue             565 drivers/scsi/3w-sas.c 	int first_reset = 0, queue = 0, retval = 1;
queue             605 drivers/scsi/3w-sas.c 		queue = 0;
queue             619 drivers/scsi/3w-sas.c 				queue = 1;
queue             624 drivers/scsi/3w-sas.c 			queue = 1;
queue             628 drivers/scsi/3w-sas.c 		if (queue)
queue             683 drivers/scsi/3w-xxxx.c 	int queue = 0;
queue             764 drivers/scsi/3w-xxxx.c 			queue = 0;
queue             780 drivers/scsi/3w-xxxx.c 						queue = 1;
queue             798 drivers/scsi/3w-xxxx.c 					queue = 1;
queue             802 drivers/scsi/3w-xxxx.c 			if (queue == 1) {
queue            1010 drivers/scsi/aacraid/aacraid.h 	struct aac_queue queue[8];
queue             398 drivers/scsi/aacraid/comminit.c 	comm->queue[HostNormCmdQueue].base = queues;
queue             399 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
queue             404 drivers/scsi/aacraid/comminit.c 	comm->queue[HostHighCmdQueue].base = queues;
queue             405 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
queue             411 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapNormCmdQueue].base = queues;
queue             412 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
queue             418 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapHighCmdQueue].base = queues;
queue             419 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
queue             425 drivers/scsi/aacraid/comminit.c 	comm->queue[HostNormRespQueue].base = queues;
queue             426 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
queue             431 drivers/scsi/aacraid/comminit.c 	comm->queue[HostHighRespQueue].base = queues;
queue             432 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
queue             438 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapNormRespQueue].base = queues;
queue             439 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
queue             445 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapHighRespQueue].base = queues;
queue             446 drivers/scsi/aacraid/comminit.c 	aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
queue             448 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
queue             449 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
queue             450 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
queue             451 drivers/scsi/aacraid/comminit.c 	comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
queue             369 drivers/scsi/aacraid/commsup.c 	q = &dev->queues->queue[qid];
queue             650 drivers/scsi/aacraid/commsup.c 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
queue             918 drivers/scsi/aacraid/commsup.c 			q = &dev->queues->queue[AdapNormRespQueue];
queue            2175 drivers/scsi/aacraid/commsup.c 	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
queue            2178 drivers/scsi/aacraid/commsup.c 	while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
queue            2187 drivers/scsi/aacraid/commsup.c 		entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
queue            2190 drivers/scsi/aacraid/commsup.c 		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
queue            2282 drivers/scsi/aacraid/commsup.c 		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
queue            2288 drivers/scsi/aacraid/commsup.c 	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
queue            2457 drivers/scsi/aacraid/commsup.c 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
queue            2529 drivers/scsi/aacraid/commsup.c 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
queue              72 drivers/scsi/aacraid/dpcsup.c 		atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
queue             278 drivers/scsi/aacraid/dpcsup.c 		struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
queue             344 drivers/scsi/aacraid/dpcsup.c 		atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
queue              55 drivers/scsi/aacraid/rx.c 			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
queue              59 drivers/scsi/aacraid/rx.c 			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
queue             391 drivers/scsi/aacraid/rx.c 	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
queue             414 drivers/scsi/aacraid/rx.c 	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
queue              55 drivers/scsi/aacraid/sa.c 			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
queue              58 drivers/scsi/aacraid/sa.c 			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
queue             477 drivers/scsi/aacraid/src.c 	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
queue             241 drivers/scsi/aic7xxx/aic7xxx_core.c 					       struct scb_tailq *queue);
queue             932 drivers/scsi/aic7xxx/aic7xxx_core.c ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
queue             939 drivers/scsi/aic7xxx/aic7xxx_core.c 	if ((scb = TAILQ_FIRST(queue)) != NULL
queue              55 drivers/scsi/arm/queue.c int queue_initialise (Queue_t *queue)
queue              60 drivers/scsi/arm/queue.c 	spin_lock_init(&queue->queue_lock);
queue              61 drivers/scsi/arm/queue.c 	INIT_LIST_HEAD(&queue->head);
queue              62 drivers/scsi/arm/queue.c 	INIT_LIST_HEAD(&queue->free);
queue              70 drivers/scsi/arm/queue.c 	queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL);
queue              75 drivers/scsi/arm/queue.c 			list_add(&q->list, &queue->free);
queue              79 drivers/scsi/arm/queue.c 	return queue->alloc != NULL;
queue              87 drivers/scsi/arm/queue.c void queue_free (Queue_t *queue)
queue              89 drivers/scsi/arm/queue.c 	if (!list_empty(&queue->head))
queue              90 drivers/scsi/arm/queue.c 		printk(KERN_WARNING "freeing non-empty queue %p\n", queue);
queue              91 drivers/scsi/arm/queue.c 	kfree(queue->alloc);
queue             103 drivers/scsi/arm/queue.c int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
queue             110 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             111 drivers/scsi/arm/queue.c 	if (list_empty(&queue->free))
queue             114 drivers/scsi/arm/queue.c 	l = queue->free.next;
queue             124 drivers/scsi/arm/queue.c 		list_add(l, &queue->head);
queue             126 drivers/scsi/arm/queue.c 		list_add_tail(l, &queue->head);
queue             130 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             134 drivers/scsi/arm/queue.c static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
queue             146 drivers/scsi/arm/queue.c 	list_add(ent, &queue->free);
queue             158 drivers/scsi/arm/queue.c struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
queue             164 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             165 drivers/scsi/arm/queue.c 	list_for_each(l, &queue->head) {
queue             169 drivers/scsi/arm/queue.c 			SCpnt = __queue_remove(queue, l);
queue             173 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             184 drivers/scsi/arm/queue.c struct scsi_cmnd *queue_remove(Queue_t *queue)
queue             189 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             190 drivers/scsi/arm/queue.c 	if (!list_empty(&queue->head))
queue             191 drivers/scsi/arm/queue.c 		SCpnt = __queue_remove(queue, queue->head.next);
queue             192 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             206 drivers/scsi/arm/queue.c struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
queue             213 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             214 drivers/scsi/arm/queue.c 	list_for_each(l, &queue->head) {
queue             218 drivers/scsi/arm/queue.c 			SCpnt = __queue_remove(queue, l);
queue             222 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             234 drivers/scsi/arm/queue.c void queue_remove_all_target(Queue_t *queue, int target)
queue             239 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             240 drivers/scsi/arm/queue.c 	list_for_each(l, &queue->head) {
queue             243 drivers/scsi/arm/queue.c 			__queue_remove(queue, l);
queue             245 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             257 drivers/scsi/arm/queue.c int queue_probetgtlun (Queue_t *queue, int target, int lun)
queue             263 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             264 drivers/scsi/arm/queue.c 	list_for_each(l, &queue->head) {
queue             271 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue             283 drivers/scsi/arm/queue.c int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
queue             289 drivers/scsi/arm/queue.c 	spin_lock_irqsave(&queue->queue_lock, flags);
queue             290 drivers/scsi/arm/queue.c 	list_for_each(l, &queue->head) {
queue             293 drivers/scsi/arm/queue.c 			__queue_remove(queue, l);
queue             298 drivers/scsi/arm/queue.c 	spin_unlock_irqrestore(&queue->queue_lock, flags);
queue              22 drivers/scsi/arm/queue.h extern int queue_initialise (Queue_t *queue);
queue              29 drivers/scsi/arm/queue.h extern void queue_free (Queue_t *queue);
queue              37 drivers/scsi/arm/queue.h extern struct scsi_cmnd *queue_remove (Queue_t *queue);
queue              46 drivers/scsi/arm/queue.h extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue,
queue              49 drivers/scsi/arm/queue.h #define queue_add_cmd_ordered(queue,SCpnt) \
queue              50 drivers/scsi/arm/queue.h 	__queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE)
queue              51 drivers/scsi/arm/queue.h #define queue_add_cmd_tail(queue,SCpnt) \
queue              52 drivers/scsi/arm/queue.h 	__queue_add(queue,SCpnt,0)
queue              61 drivers/scsi/arm/queue.h extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
queue              72 drivers/scsi/arm/queue.h extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target,
queue              82 drivers/scsi/arm/queue.h extern void queue_remove_all_target(Queue_t *queue, int target);
queue              93 drivers/scsi/arm/queue.h extern int queue_probetgtlun (Queue_t *queue, int target, int lun);
queue             102 drivers/scsi/arm/queue.h int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt);
queue             766 drivers/scsi/bfa/bfa_core.c 	int	queue;
queue             777 drivers/scsi/bfa/bfa_core.c 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
queue             778 drivers/scsi/bfa/bfa_core.c 			bfa_isr_rspq(bfa, queue);
queue             790 drivers/scsi/bfa/bfa_core.c 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
queue             791 drivers/scsi/bfa/bfa_core.c 			bfa_isr_reqq(bfa, queue);
queue             804 drivers/scsi/bfa/bfa_core.c 	int queue;
queue             817 drivers/scsi/bfa/bfa_core.c 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
queue             818 drivers/scsi/bfa/bfa_core.c 			if (bfa_isr_rspq(bfa, queue))
queue             830 drivers/scsi/bfa/bfa_core.c 		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
queue             831 drivers/scsi/bfa/bfa_core.c 			bfa_isr_reqq(bfa, queue);
queue             539 drivers/scsi/bfa/bfa_ioc.h 	u8	queue;
queue            5762 drivers/scsi/bfa/bfa_svc.c 		res->queue  = fcdiag->qtest.all;
queue            5776 drivers/scsi/bfa/bfa_svc.c 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
queue            5787 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(fcdiag, fcdiag->qtest.queue);
queue            5789 drivers/scsi/bfa/bfa_svc.c 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
queue            5826 drivers/scsi/bfa/bfa_svc.c 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
queue            5828 drivers/scsi/bfa/bfa_svc.c 			fcdiag->qtest.queue++;
queue            5842 drivers/scsi/bfa/bfa_svc.c 	res->queue = fcdiag->qtest.queue;
queue            6056 drivers/scsi/bfa/bfa_svc.c bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
queue            6063 drivers/scsi/bfa/bfa_svc.c 	bfa_trc(fcdiag, queue);
queue            6086 drivers/scsi/bfa/bfa_svc.c 	if (queue < BFI_IOC_MAX_CQS) {
queue            6087 drivers/scsi/bfa/bfa_svc.c 		fcdiag->qtest.result->queue  = (u8)queue;
queue            6088 drivers/scsi/bfa/bfa_svc.c 		fcdiag->qtest.queue = (u8)queue;
queue            6091 drivers/scsi/bfa/bfa_svc.c 		fcdiag->qtest.result->queue  = 0;
queue            6092 drivers/scsi/bfa/bfa_svc.c 		fcdiag->qtest.queue = 0;
queue             696 drivers/scsi/bfa/bfa_svc.h 	u8	queue;
queue             744 drivers/scsi/bfa/bfa_svc.h 			u32 queue, struct bfa_diag_qtest_result_s *result,
queue            1713 drivers/scsi/bfa/bfad_bsg.c 				iocmd->queue, &iocmd->result,
queue             620 drivers/scsi/bfa/bfad_bsg.h 	u32	queue;
queue             161 drivers/scsi/cxlflash/common.h 	struct list_head queue;
queue             184 drivers/scsi/cxlflash/common.h 	INIT_LIST_HEAD(&afuc->queue);
queue             205 drivers/scsi/cxlflash/main.c 		if (!list_empty(&cmd->queue))
queue             484 drivers/scsi/cxlflash/main.c 	INIT_LIST_HEAD(&cmd->queue);
queue            1403 drivers/scsi/cxlflash/main.c 		list_add_tail(&cmd->queue, doneq);
queue            1438 drivers/scsi/cxlflash/main.c 	list_for_each_entry_safe(cmd, tmp, doneq, queue)
queue            2335 drivers/scsi/cxlflash/main.c 	INIT_LIST_HEAD(&cmd->queue);
queue             903 drivers/scsi/esas2r/esas2r_main.c 				     struct list_head *queue)
queue             910 drivers/scsi/esas2r/esas2r_main.c 	list_for_each_safe(element, next, queue) {
queue             917 drivers/scsi/esas2r/esas2r_main.c 			if (queue == &a->active_list) {
queue             979 drivers/scsi/esas2r/esas2r_main.c 	struct list_head *queue;
queue            1001 drivers/scsi/esas2r/esas2r_main.c 	queue = &a->defer_list;
queue            1005 drivers/scsi/esas2r/esas2r_main.c 	result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
queue            1010 drivers/scsi/esas2r/esas2r_main.c 	} else if (result == 2 && (queue == &a->defer_list)) {
queue            1011 drivers/scsi/esas2r/esas2r_main.c 		queue = &a->active_list;
queue             670 drivers/scsi/hisi_sas/hisi_sas_main.c 			int queue = i % hisi_hba->queue_count;
queue             671 drivers/scsi/hisi_sas/hisi_sas_main.c 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
queue            1496 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	int queue = cq->id;
queue            1499 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 			hisi_hba->complete_hdr[queue];
queue            1503 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
queue            1505 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 			COMPL_Q_0_WR_PTR + (0x14 * queue));
queue            1523 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 		slot->cmplt_queue = queue;
queue            1532 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
queue             861 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 			int queue = i % hisi_hba->queue_count;
queue             862 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
queue            3115 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	int queue = cq->id;
queue            3120 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	complete_queue = hisi_hba->complete_hdr[queue];
queue            3123 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 				   (0x14 * queue));
queue            3154 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 				slot->cmplt_queue = queue;
queue            3166 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 			slot->cmplt_queue = queue;
queue            3176 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
queue            3183 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	int queue = cq->id;
queue            3185 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
queue            2300 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	int queue = cq->id;
queue            2302 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	complete_queue = hisi_hba->complete_hdr[queue];
queue            2305 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 				   (0x14 * queue));
queue            2320 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 			slot->cmplt_queue = queue;
queue            2331 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
queue            2338 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	int queue = cq->id;
queue            2340 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
queue            2350 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	int queue, cpu;
queue            2352 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 	for (queue = 0; queue < nvecs; queue++) {
queue            2353 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
queue            2355 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 		mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
queue            2361 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c 			hisi_hba->reply_map[cpu] = queue;
queue            6959 drivers/scsi/hpsa.c static struct ctlr_info *queue_to_hba(u8 *queue)
queue            6961 drivers/scsi/hpsa.c 	return container_of((queue - *queue), struct ctlr_info, q[0]);
queue            6964 drivers/scsi/hpsa.c static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
queue            6966 drivers/scsi/hpsa.c 	struct ctlr_info *h = queue_to_hba(queue);
queue            6967 drivers/scsi/hpsa.c 	u8 q = *(u8 *) queue;
queue            6984 drivers/scsi/hpsa.c static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
queue            6986 drivers/scsi/hpsa.c 	struct ctlr_info *h = queue_to_hba(queue);
queue            6988 drivers/scsi/hpsa.c 	u8 q = *(u8 *) queue;
queue            7000 drivers/scsi/hpsa.c static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
queue            7002 drivers/scsi/hpsa.c 	struct ctlr_info *h = queue_to_hba((u8 *) queue);
queue            7004 drivers/scsi/hpsa.c 	u8 q = *(u8 *) queue;
queue            7019 drivers/scsi/hpsa.c static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
queue            7021 drivers/scsi/hpsa.c 	struct ctlr_info *h = queue_to_hba(queue);
queue            7023 drivers/scsi/hpsa.c 	u8 q = *(u8 *) queue;
queue            7441 drivers/scsi/hpsa.c 	unsigned int queue, cpu;
queue            7443 drivers/scsi/hpsa.c 	for (queue = 0; queue < h->msix_vectors; queue++) {
queue            7444 drivers/scsi/hpsa.c 		mask = pci_irq_get_affinity(h->pdev, queue);
queue            7449 drivers/scsi/hpsa.c 			h->reply_map[cpu] = queue;
queue             552 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue)
queue             585 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
queue             762 drivers/scsi/ibmvscsi/ibmvfc.c 	list_add_tail(&evt->queue, &vhost->free);
queue             803 drivers/scsi/ibmvscsi/ibmvfc.c 	list_del(&evt->queue);
queue             822 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
queue             908 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue)
queue            1223 drivers/scsi/ibmvscsi/ibmvfc.c 		list_add_tail(&evt->queue, &vhost->free);
queue            1242 drivers/scsi/ibmvscsi/ibmvfc.c 		list_del(&pool->events[i].queue);
queue            1268 drivers/scsi/ibmvscsi/ibmvfc.c 	evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
queue            1270 drivers/scsi/ibmvscsi/ibmvfc.c 	list_del(&evt->queue);
queue            1414 drivers/scsi/ibmvscsi/ibmvfc.c 	list_add_tail(&evt->queue, &vhost->sent);
queue            1426 drivers/scsi/ibmvscsi/ibmvfc.c 		list_del(&evt->queue);
queue            1501 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
queue            1760 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
queue            2098 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(evt, &vhost->sent, queue) {
queue            2112 drivers/scsi/ibmvscsi/ibmvfc.c 				list_for_each_entry(evt, &vhost->sent, queue) {
queue            2157 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(evt, &vhost->sent, queue) {
queue            2288 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(evt, &vhost->sent, queue) {
queue            2677 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
queue            2788 drivers/scsi/ibmvscsi/ibmvfc.c 	list_del(&evt->queue);
queue            3130 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_crq_queue *queue = &vhost->crq;
queue            3133 drivers/scsi/ibmvscsi/ibmvfc.c 	crq = &queue->msgs[queue->cur];
queue            3135 drivers/scsi/ibmvscsi/ibmvfc.c 		if (++queue->cur == queue->size)
queue            3136 drivers/scsi/ibmvscsi/ibmvfc.c 			queue->cur = 0;
queue            3887 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
queue            3907 drivers/scsi/ibmvscsi/ibmvfc.c 	list_add_tail(&tgt->queue, &vhost->targets);
queue            4182 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
queue            4213 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
queue            4216 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
queue            4288 drivers/scsi/ibmvscsi/ibmvfc.c 		list_del(&tgt->queue);
queue            4376 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
queue            4381 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
queue            4393 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
queue            4398 drivers/scsi/ibmvscsi/ibmvfc.c 				list_del(&tgt->queue);
queue            4446 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
queue            4705 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
queue            4819 drivers/scsi/ibmvscsi/ibmvfc.c 	list_add_tail(&vhost->queue, &ibmvfc_head);
queue            4875 drivers/scsi/ibmvscsi/ibmvfc.c 	list_del(&vhost->queue);
queue             604 drivers/scsi/ibmvscsi/ibmvfc.h 	struct list_head queue;
queue             626 drivers/scsi/ibmvscsi/ibmvfc.h 	struct list_head queue;
queue             679 drivers/scsi/ibmvscsi/ibmvfc.h 	struct list_head queue;
queue             140 drivers/scsi/ibmvscsi/ibmvscsi.c static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
queue             154 drivers/scsi/ibmvscsi/ibmvscsi.c 			 queue->msg_token,
queue             155 drivers/scsi/ibmvscsi/ibmvscsi.c 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
queue             156 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_page((unsigned long)queue->msgs);
queue             166 drivers/scsi/ibmvscsi/ibmvscsi.c static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
queue             171 drivers/scsi/ibmvscsi/ibmvscsi.c 	spin_lock_irqsave(&queue->lock, flags);
queue             172 drivers/scsi/ibmvscsi/ibmvscsi.c 	crq = &queue->msgs[queue->cur];
queue             174 drivers/scsi/ibmvscsi/ibmvscsi.c 		if (++queue->cur == queue->size)
queue             175 drivers/scsi/ibmvscsi/ibmvscsi.c 			queue->cur = 0;
queue             183 drivers/scsi/ibmvscsi/ibmvscsi.c 	spin_unlock_irqrestore(&queue->lock, flags);
queue             220 drivers/scsi/ibmvscsi/ibmvscsi.c 		while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
queue             227 drivers/scsi/ibmvscsi/ibmvscsi.c 		crq = crq_queue_next_crq(&hostdata->queue);
queue             284 drivers/scsi/ibmvscsi/ibmvscsi.c static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
queue             298 drivers/scsi/ibmvscsi/ibmvscsi.c 	memset(queue->msgs, 0x00, PAGE_SIZE);
queue             299 drivers/scsi/ibmvscsi/ibmvscsi.c 	queue->cur = 0;
queue             306 drivers/scsi/ibmvscsi/ibmvscsi.c 				queue->msg_token, PAGE_SIZE);
queue             325 drivers/scsi/ibmvscsi/ibmvscsi.c static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
queue             333 drivers/scsi/ibmvscsi/ibmvscsi.c 	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
queue             335 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (!queue->msgs)
queue             337 drivers/scsi/ibmvscsi/ibmvscsi.c 	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue             339 drivers/scsi/ibmvscsi/ibmvscsi.c 	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
queue             340 drivers/scsi/ibmvscsi/ibmvscsi.c 					  queue->size * sizeof(*queue->msgs),
queue             343 drivers/scsi/ibmvscsi/ibmvscsi.c 	if (dma_mapping_error(hostdata->dev, queue->msg_token))
queue             351 drivers/scsi/ibmvscsi/ibmvscsi.c 				queue->msg_token, PAGE_SIZE);
queue             354 drivers/scsi/ibmvscsi/ibmvscsi.c 		rc = ibmvscsi_reset_crq_queue(queue,
queue             366 drivers/scsi/ibmvscsi/ibmvscsi.c 	queue->cur = 0;
queue             367 drivers/scsi/ibmvscsi/ibmvscsi.c 	spin_lock_init(&queue->lock);
queue             398 drivers/scsi/ibmvscsi/ibmvscsi.c 			 queue->msg_token,
queue             399 drivers/scsi/ibmvscsi/ibmvscsi.c 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
queue             401 drivers/scsi/ibmvscsi/ibmvscsi.c 	free_page((unsigned long)queue->msgs);
queue             412 drivers/scsi/ibmvscsi/ibmvscsi.c static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
queue            2117 drivers/scsi/ibmvscsi/ibmvscsi.c 		rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
queue            2126 drivers/scsi/ibmvscsi/ibmvscsi.c 		rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
queue            2244 drivers/scsi/ibmvscsi/ibmvscsi.c 	rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
queue            2309 drivers/scsi/ibmvscsi/ibmvscsi.c 	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
queue            2330 drivers/scsi/ibmvscsi/ibmvscsi.c 	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
queue              92 drivers/scsi/ibmvscsi/ibmvscsi.h 	struct crq_queue queue;
queue              38 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
queue              41 drivers/scsi/ibmvscsi_tgt/libsrp.c 		kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
queue             142 drivers/scsi/ibmvscsi_tgt/libsrp.c 	if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
queue             157 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
queue              79 drivers/scsi/ibmvscsi_tgt/libsrp.h 	struct kfifo queue;
queue             702 drivers/scsi/ipr.c 			struct ipr_cmnd, queue);
queue             703 drivers/scsi/ipr.c 		list_del(&ipr_cmd->queue);
queue             829 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue             872 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue             913 drivers/scsi/ipr.c 					temp, &hrrq->hrrq_pending_q, queue) {
queue             914 drivers/scsi/ipr.c 			list_del(&ipr_cmd->queue);
queue             982 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            1116 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            1117 drivers/scsi/ipr.c 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
queue            1141 drivers/scsi/ipr.c 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
queue            1211 drivers/scsi/ipr.c 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
queue            1412 drivers/scsi/ipr.c 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
queue            1446 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            1462 drivers/scsi/ipr.c 				 struct ipr_resource_entry, queue);
queue            1464 drivers/scsi/ipr.c 		list_del(&res->queue);
queue            1466 drivers/scsi/ipr.c 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
queue            1478 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
queue            1504 drivers/scsi/ipr.c 	list_del_init(&hostrcb->queue);
queue            1505 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            2613 drivers/scsi/ipr.c 					struct ipr_hostrcb, queue);
queue            2618 drivers/scsi/ipr.c 						struct ipr_hostrcb, queue);
queue            2621 drivers/scsi/ipr.c 	list_del_init(&hostrcb->queue);
queue            2648 drivers/scsi/ipr.c 	list_del_init(&hostrcb->queue);
queue            2649 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            2661 drivers/scsi/ipr.c 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
queue            2789 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            3345 drivers/scsi/ipr.c 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            3351 drivers/scsi/ipr.c 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
queue            3364 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            4182 drivers/scsi/ipr.c 					struct ipr_hostrcb, queue);
queue            4205 drivers/scsi/ipr.c 					struct ipr_hostrcb, queue);
queue            4212 drivers/scsi/ipr.c 	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
queue            4778 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            4883 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            5072 drivers/scsi/ipr.c 	list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
queue            5246 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            5421 drivers/scsi/ipr.c 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            5437 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            5562 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            5648 drivers/scsi/ipr.c 				list_del(&ioa_cfg->reset_cmd->queue);
queue            5663 drivers/scsi/ipr.c 		list_del(&ioa_cfg->reset_cmd->queue);
queue            5747 drivers/scsi/ipr.c 		list_move_tail(&ipr_cmd->queue, doneq);
queue            5782 drivers/scsi/ipr.c 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
queue            5783 drivers/scsi/ipr.c 		list_del(&ipr_cmd->queue);
queue            5851 drivers/scsi/ipr.c 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
queue            5852 drivers/scsi/ipr.c 		list_del(&ipr_cmd->queue);
queue            5901 drivers/scsi/ipr.c 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
queue            5902 drivers/scsi/ipr.c 		list_del(&ipr_cmd->queue);
queue            6056 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            6494 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            6523 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            6645 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
queue            6653 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
queue            6664 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
queue            6816 drivers/scsi/ipr.c 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
queue            6887 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            7053 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            7070 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            7239 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            7274 drivers/scsi/ipr.c 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
queue            7283 drivers/scsi/ipr.c 		list_del_init(&ioa_cfg->hostrcb[j]->queue);
queue            7298 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            7344 drivers/scsi/ipr.c 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
queue            7581 drivers/scsi/ipr.c 				    struct ipr_resource_entry, queue);
queue            7633 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            7655 drivers/scsi/ipr.c 					    struct ipr_resource_entry, queue);
queue            7808 drivers/scsi/ipr.c 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
queue            7809 drivers/scsi/ipr.c 		list_move_tail(&res->queue, &old_res);
queue            7823 drivers/scsi/ipr.c 		list_for_each_entry_safe(res, temp, &old_res, queue) {
queue            7825 drivers/scsi/ipr.c 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
queue            7839 drivers/scsi/ipr.c 					 struct ipr_resource_entry, queue);
queue            7840 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
queue            7850 drivers/scsi/ipr.c 	list_for_each_entry_safe(res, temp, &old_res, queue) {
queue            7854 drivers/scsi/ipr.c 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
queue            7858 drivers/scsi/ipr.c 	list_for_each_entry_safe(res, temp, &old_res, queue) {
queue            7860 drivers/scsi/ipr.c 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
queue            8140 drivers/scsi/ipr.c 			list_add_tail(&ipr_cmd->queue,
queue            8284 drivers/scsi/ipr.c 		list_del(&ipr_cmd->queue);
queue            8310 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            8403 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            8472 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            8564 drivers/scsi/ipr.c 			     struct ipr_hostrcb, queue);
queue            8565 drivers/scsi/ipr.c 	list_del_init(&hostrcb->queue);
queue            8582 drivers/scsi/ipr.c 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
queue            9003 drivers/scsi/ipr.c 		list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
queue            9006 drivers/scsi/ipr.c 			list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            9042 drivers/scsi/ipr.c 			list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
queue            9209 drivers/scsi/ipr.c 			list_add_tail(&ipr_cmd->queue,
queue            9348 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
queue            9724 drivers/scsi/ipr.c 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            9753 drivers/scsi/ipr.c 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
queue            9805 drivers/scsi/ipr.c 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
queue            10396 drivers/scsi/ipr.c 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
queue            10488 drivers/scsi/ipr.c 	list_del(&ioa_cfg->queue);
queue            10792 drivers/scsi/ipr.c 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
queue            10812 drivers/scsi/ipr.c 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
queue            1234 drivers/scsi/ipr.h 	struct list_head queue;
queue            1326 drivers/scsi/ipr.h 	struct list_head queue;
queue            1469 drivers/scsi/ipr.h 	struct list_head queue;
queue            1612 drivers/scsi/ipr.h 	struct list_head queue;
queue            2753 drivers/scsi/ips.c ips_putq_scb_head(ips_scb_queue_t * queue, ips_scb_t * item)
queue            2760 drivers/scsi/ips.c 	item->q_next = queue->head;
queue            2761 drivers/scsi/ips.c 	queue->head = item;
queue            2763 drivers/scsi/ips.c 	if (!queue->tail)
queue            2764 drivers/scsi/ips.c 		queue->tail = item;
queue            2766 drivers/scsi/ips.c 	queue->count++;
queue            2781 drivers/scsi/ips.c ips_removeq_scb_head(ips_scb_queue_t * queue)
queue            2787 drivers/scsi/ips.c 	item = queue->head;
queue            2793 drivers/scsi/ips.c 	queue->head = item->q_next;
queue            2796 drivers/scsi/ips.c 	if (queue->tail == item)
queue            2797 drivers/scsi/ips.c 		queue->tail = NULL;
queue            2799 drivers/scsi/ips.c 	queue->count--;
queue            2816 drivers/scsi/ips.c ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
queue            2825 drivers/scsi/ips.c 	if (item == queue->head) {
queue            2826 drivers/scsi/ips.c 		return (ips_removeq_scb_head(queue));
queue            2829 drivers/scsi/ips.c 	p = queue->head;
queue            2839 drivers/scsi/ips.c 			queue->tail = p;
queue            2842 drivers/scsi/ips.c 		queue->count--;
queue            2861 drivers/scsi/ips.c static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
queue            2870 drivers/scsi/ips.c 	if (queue->tail)
queue            2871 drivers/scsi/ips.c 		queue->tail->host_scribble = (char *) item;
queue            2873 drivers/scsi/ips.c 	queue->tail = item;
queue            2875 drivers/scsi/ips.c 	if (!queue->head)
queue            2876 drivers/scsi/ips.c 		queue->head = item;
queue            2878 drivers/scsi/ips.c 	queue->count++;
queue            2892 drivers/scsi/ips.c static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
queue            2898 drivers/scsi/ips.c 	item = queue->head;
queue            2904 drivers/scsi/ips.c 	queue->head = (struct scsi_cmnd *) item->host_scribble;
queue            2907 drivers/scsi/ips.c 	if (queue->tail == item)
queue            2908 drivers/scsi/ips.c 		queue->tail = NULL;
queue            2910 drivers/scsi/ips.c 	queue->count--;
queue            2926 drivers/scsi/ips.c static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
queue            2936 drivers/scsi/ips.c 	if (item == queue->head) {
queue            2937 drivers/scsi/ips.c 		return (ips_removeq_wait_head(queue));
queue            2940 drivers/scsi/ips.c 	p = queue->head;
queue            2950 drivers/scsi/ips.c 			queue->tail = p;
queue            2953 drivers/scsi/ips.c 		queue->count--;
queue            2973 drivers/scsi/ips.c ips_putq_copp_tail(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
queue            2982 drivers/scsi/ips.c 	if (queue->tail)
queue            2983 drivers/scsi/ips.c 		queue->tail->next = item;
queue            2985 drivers/scsi/ips.c 	queue->tail = item;
queue            2987 drivers/scsi/ips.c 	if (!queue->head)
queue            2988 drivers/scsi/ips.c 		queue->head = item;
queue            2990 drivers/scsi/ips.c 	queue->count++;
queue            3005 drivers/scsi/ips.c ips_removeq_copp_head(ips_copp_queue_t * queue)
queue            3011 drivers/scsi/ips.c 	item = queue->head;
queue            3017 drivers/scsi/ips.c 	queue->head = item->next;
queue            3020 drivers/scsi/ips.c 	if (queue->tail == item)
queue            3021 drivers/scsi/ips.c 		queue->tail = NULL;
queue            3023 drivers/scsi/ips.c 	queue->count--;
queue            3040 drivers/scsi/ips.c ips_removeq_copp(ips_copp_queue_t * queue, ips_copp_wait_item_t * item)
queue            3049 drivers/scsi/ips.c 	if (item == queue->head) {
queue            3050 drivers/scsi/ips.c 		return (ips_removeq_copp_head(queue));
queue            3053 drivers/scsi/ips.c 	p = queue->head;
queue            3063 drivers/scsi/ips.c 			queue->tail = p;
queue            3066 drivers/scsi/ips.c 		queue->count--;
queue             466 drivers/scsi/libiscsi.c 	kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
queue             700 drivers/scsi/libiscsi.c 		if (!kfifo_out(&session->cmdpool.queue,
queue            1603 drivers/scsi/libiscsi.c 	if (!kfifo_out(&conn->session->cmdpool.queue,
queue            2553 drivers/scsi/libiscsi.c 	kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
queue            2561 drivers/scsi/libiscsi.c 		kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
queue            2918 drivers/scsi/libiscsi.c 	if (!kfifo_out(&session->cmdpool.queue,
queue            2938 drivers/scsi/libiscsi.c 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
queue            2982 drivers/scsi/libiscsi.c 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
queue             466 drivers/scsi/libiscsi_tcp.c 		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
queue             473 drivers/scsi/libiscsi_tcp.c 		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
queue             586 drivers/scsi/libiscsi_tcp.c 	rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
queue            1008 drivers/scsi/libiscsi_tcp.c 				kfifo_in(&tcp_task->r2tpool.queue,
queue            14471 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_queue_free(struct lpfc_queue *queue)
queue            14475 drivers/scsi/lpfc/lpfc_sli.c 	if (!queue)
queue            14478 drivers/scsi/lpfc/lpfc_sli.c 	if (!list_empty(&queue->wq_list))
queue            14479 drivers/scsi/lpfc/lpfc_sli.c 		list_del(&queue->wq_list);
queue            14481 drivers/scsi/lpfc/lpfc_sli.c 	while (!list_empty(&queue->page_list)) {
queue            14482 drivers/scsi/lpfc/lpfc_sli.c 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
queue            14484 drivers/scsi/lpfc/lpfc_sli.c 		dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
queue            14488 drivers/scsi/lpfc/lpfc_sli.c 	if (queue->rqbp) {
queue            14489 drivers/scsi/lpfc/lpfc_sli.c 		lpfc_free_rq_buffer(queue->phba, queue);
queue            14490 drivers/scsi/lpfc/lpfc_sli.c 		kfree(queue->rqbp);
queue            14493 drivers/scsi/lpfc/lpfc_sli.c 	if (!list_empty(&queue->cpu_list))
queue            14494 drivers/scsi/lpfc/lpfc_sli.c 		list_del(&queue->cpu_list);
queue            14496 drivers/scsi/lpfc/lpfc_sli.c 	kfree(queue);
queue            14516 drivers/scsi/lpfc/lpfc_sli.c 	struct lpfc_queue *queue;
queue            14530 drivers/scsi/lpfc/lpfc_sli.c 	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
queue            14532 drivers/scsi/lpfc/lpfc_sli.c 	if (!queue)
queue            14535 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->list);
queue            14536 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->_poll_list);
queue            14537 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->wq_list);
queue            14538 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->wqfull_list);
queue            14539 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->page_list);
queue            14540 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->child_list);
queue            14541 drivers/scsi/lpfc/lpfc_sli.c 	INIT_LIST_HEAD(&queue->cpu_list);
queue            14546 drivers/scsi/lpfc/lpfc_sli.c 	queue->page_count = pgcnt;
queue            14547 drivers/scsi/lpfc/lpfc_sli.c 	queue->q_pgs = (void **)&queue[1];
queue            14548 drivers/scsi/lpfc/lpfc_sli.c 	queue->entry_cnt_per_pg = hw_page_size / entry_size;
queue            14549 drivers/scsi/lpfc/lpfc_sli.c 	queue->entry_size = entry_size;
queue            14550 drivers/scsi/lpfc/lpfc_sli.c 	queue->entry_count = entry_count;
queue            14551 drivers/scsi/lpfc/lpfc_sli.c 	queue->page_size = hw_page_size;
queue            14552 drivers/scsi/lpfc/lpfc_sli.c 	queue->phba = phba;
queue            14554 drivers/scsi/lpfc/lpfc_sli.c 	for (x = 0; x < queue->page_count; x++) {
queue            14567 drivers/scsi/lpfc/lpfc_sli.c 		list_add_tail(&dmabuf->list, &queue->page_list);
queue            14569 drivers/scsi/lpfc/lpfc_sli.c 		queue->q_pgs[x] = dmabuf->virt;
queue            14571 drivers/scsi/lpfc/lpfc_sli.c 	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
queue            14572 drivers/scsi/lpfc/lpfc_sli.c 	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
queue            14573 drivers/scsi/lpfc/lpfc_sli.c 	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
queue            14574 drivers/scsi/lpfc/lpfc_sli.c 	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
queue            14578 drivers/scsi/lpfc/lpfc_sli.c 	return queue;
queue            14580 drivers/scsi/lpfc/lpfc_sli.c 	lpfc_sli4_queue_free(queue);
queue            5698 drivers/scsi/megaraid/megaraid_sas_base.c 	unsigned int queue, cpu, low_latency_index_start;
queue            5702 drivers/scsi/megaraid/megaraid_sas_base.c 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
queue            5703 drivers/scsi/megaraid/megaraid_sas_base.c 		mask = pci_irq_get_affinity(instance->pdev, queue);
queue            5708 drivers/scsi/megaraid/megaraid_sas_base.c 			instance->reply_map[cpu] = queue;
queue            5713 drivers/scsi/megaraid/megaraid_sas_base.c 	queue = low_latency_index_start;
queue            5715 drivers/scsi/megaraid/megaraid_sas_base.c 		instance->reply_map[cpu] = queue;
queue            5716 drivers/scsi/megaraid/megaraid_sas_base.c 		if (queue == (instance->msix_vectors - 1))
queue            5717 drivers/scsi/megaraid/megaraid_sas_base.c 			queue = low_latency_index_start;
queue            5719 drivers/scsi/megaraid/megaraid_sas_base.c 			queue++;
queue             156 drivers/scsi/pmcraid.c 	list_for_each_entry(temp, &pinstance->used_res_q, queue) {
queue            1576 drivers/scsi/pmcraid.c 	list_for_each_entry(res, &pinstance->used_res_q, queue) {
queue            1612 drivers/scsi/pmcraid.c 				 struct pmcraid_resource_entry, queue);
queue            1614 drivers/scsi/pmcraid.c 		list_del(&res->queue);
queue            1617 drivers/scsi/pmcraid.c 		list_add_tail(&res->queue, &pinstance->used_res_q);
queue            1635 drivers/scsi/pmcraid.c 			list_move_tail(&res->queue, &pinstance->free_res_q);
queue            4308 drivers/scsi/pmcraid.c 	list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
queue            4324 drivers/scsi/pmcraid.c 				list_move_tail(&res->queue,
queue            4342 drivers/scsi/pmcraid.c 	list_for_each_entry(res, &pinstance->used_res_q, queue) {
queue            4825 drivers/scsi/pmcraid.c 			list_del(&pinstance->res_entries[i].queue);
queue            4855 drivers/scsi/pmcraid.c 		list_add_tail(&pinstance->res_entries[i].queue,
queue            5506 drivers/scsi/pmcraid.c 	list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
queue            5507 drivers/scsi/pmcraid.c 		list_move_tail(&res->queue, &old_res);
queue            5523 drivers/scsi/pmcraid.c 		list_for_each_entry_safe(res, temp, &old_res, queue) {
queue            5529 drivers/scsi/pmcraid.c 				list_move_tail(&res->queue,
queue            5546 drivers/scsi/pmcraid.c 					 struct pmcraid_resource_entry, queue);
queue            5551 drivers/scsi/pmcraid.c 			list_move_tail(&res->queue, &pinstance->used_res_q);
queue            5570 drivers/scsi/pmcraid.c 	list_for_each_entry_safe(res, temp, &old_res, queue) {
queue            5576 drivers/scsi/pmcraid.c 			list_move_tail(&res->queue, &pinstance->used_res_q);
queue            5578 drivers/scsi/pmcraid.c 			list_move_tail(&res->queue, &pinstance->free_res_q);
queue             795 drivers/scsi/pmcraid.h 	struct list_head queue;	/* link to "to be exposed" resources */
queue             577 drivers/scsi/qla2xxx/qla_dbg.c 		qh->queue = htonl(TYPE_ATIO_QUEUE);
queue             621 drivers/scsi/qla2xxx/qla_dbg.c 		qh->queue = htonl(TYPE_REQUEST_QUEUE);
queue             649 drivers/scsi/qla2xxx/qla_dbg.c 		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
queue             253 drivers/scsi/qla2xxx/qla_dbg.h 	uint32_t queue;
queue            1625 drivers/scsi/scsi_lib.c 	struct request_queue *q = hctx->queue;
queue            1633 drivers/scsi/scsi_lib.c 	struct request_queue *q = hctx->queue;
queue            1845 drivers/scsi/scsi_lib.c 	struct request_queue *q = hctx->queue;
queue             154 drivers/scsi/sd.c 	blk_queue_write_cache(sdkp->disk->queue, wc, fua);
queue             772 drivers/scsi/sd.c 	struct request_queue *q = sdkp->disk->queue;
queue             948 drivers/scsi/sd.c 	struct request_queue *q = sdkp->disk->queue;
queue            2873 drivers/scsi/sd.c 	blk_queue_io_min(sdkp->disk->queue,
queue            2928 drivers/scsi/sd.c 	struct request_queue *q = sdkp->disk->queue;
queue            3102 drivers/scsi/sd.c 	struct request_queue *q = sdkp->disk->queue;
queue            3362 drivers/scsi/sd.c 	gd->queue = sdkp->device->request_queue;
queue            3466 drivers/scsi/sd.c 	struct request_queue *q = disk->queue;
queue             128 drivers/scsi/sd_zbc.c 	struct request_queue *q = sdkp->disk->queue;
queue             484 drivers/scsi/sd_zbc.c 	blk_queue_chunk_sectors(sdkp->disk->queue,
queue             486 drivers/scsi/sd_zbc.c 	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
queue             487 drivers/scsi/sd_zbc.c 	blk_queue_required_elevator_features(sdkp->disk->queue,
queue             508 drivers/scsi/sd_zbc.c 	    disk->queue->nr_zones != nr_zones) {
queue             749 drivers/scsi/sr.c 	disk->queue = sdev->request_queue;
queue             787 drivers/scsi/sr.c 	struct request_queue *queue;
queue             856 drivers/scsi/sr.c 	queue = cd->device->request_queue;
queue             857 drivers/scsi/sr.c 	blk_queue_logical_block_size(queue, sector_size);
queue            3841 drivers/scsi/st.c 				i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
queue            4308 drivers/scsi/st.c 	disk->queue = SDp->request_queue;
queue            4417 drivers/scsi/st.c 	blk_put_queue(disk->queue);
queue             198 drivers/soc/fsl/qbman/qman_test_stash.c static DECLARE_WAIT_QUEUE_HEAD(queue);
queue             309 drivers/soc/fsl/qbman/qman_test_stash.c 		wake_up(&queue);
queue             613 drivers/soc/fsl/qbman/qman_test_stash.c 	wait_event(queue, loop_counter == HP_LOOPS);
queue              28 drivers/soc/ixp4xx/ixp4xx-qmgr.c void qmgr_put_entry(unsigned int queue, u32 val)
queue              31 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
queue              34 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	       qmgr_queue_descs[queue], queue, val);
queue              36 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	__raw_writel(val, &qmgr_regs->acc[queue][0]);
queue              39 drivers/soc/ixp4xx/ixp4xx-qmgr.c u32 qmgr_get_entry(unsigned int queue)
queue              42 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	val = __raw_readl(&qmgr_regs->acc[queue][0]);
queue              44 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
queue              47 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	       qmgr_queue_descs[queue], queue, val);
queue              52 drivers/soc/ixp4xx/ixp4xx-qmgr.c static int __qmgr_get_stat1(unsigned int queue)
queue              54 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
queue              55 drivers/soc/ixp4xx/ixp4xx-qmgr.c 		>> ((queue & 7) << 2)) & 0xF;
queue              58 drivers/soc/ixp4xx/ixp4xx-qmgr.c static int __qmgr_get_stat2(unsigned int queue)
queue              60 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(queue >= HALF_QUEUES);
queue              61 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
queue              62 drivers/soc/ixp4xx/ixp4xx-qmgr.c 		>> ((queue & 0xF) << 1)) & 0x3;
queue              71 drivers/soc/ixp4xx/ixp4xx-qmgr.c int qmgr_stat_empty(unsigned int queue)
queue              73 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(queue >= HALF_QUEUES);
queue              74 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
queue              83 drivers/soc/ixp4xx/ixp4xx-qmgr.c int qmgr_stat_below_low_watermark(unsigned int queue)
queue              85 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	if (queue >= HALF_QUEUES)
queue              87 drivers/soc/ixp4xx/ixp4xx-qmgr.c 			(queue - HALF_QUEUES)) & 0x01;
queue              88 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
queue              97 drivers/soc/ixp4xx/ixp4xx-qmgr.c int qmgr_stat_full(unsigned int queue)
queue              99 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	if (queue >= HALF_QUEUES)
queue             101 drivers/soc/ixp4xx/ixp4xx-qmgr.c 			(queue - HALF_QUEUES)) & 0x01;
queue             102 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
queue             111 drivers/soc/ixp4xx/ixp4xx-qmgr.c int qmgr_stat_overflow(unsigned int queue)
queue             113 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
queue             116 drivers/soc/ixp4xx/ixp4xx-qmgr.c void qmgr_set_irq(unsigned int queue, int src,
queue             122 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	if (queue < HALF_QUEUES) {
queue             126 drivers/soc/ixp4xx/ixp4xx-qmgr.c 		reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
queue             127 drivers/soc/ixp4xx/ixp4xx-qmgr.c 		bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
queue             134 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	irq_handlers[queue] = handler;
queue             135 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	irq_pdevs[queue] = pdev;
queue             203 drivers/soc/ixp4xx/ixp4xx-qmgr.c void qmgr_enable_irq(unsigned int queue)
queue             206 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	int half = queue / 32;
queue             207 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
queue             215 drivers/soc/ixp4xx/ixp4xx-qmgr.c void qmgr_disable_irq(unsigned int queue)
queue             218 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	int half = queue / 32;
queue             219 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
queue             237 drivers/soc/ixp4xx/ixp4xx-qmgr.c int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
queue             242 drivers/soc/ixp4xx/ixp4xx-qmgr.c int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
queue             250 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(queue >= QUEUES);
queue             285 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	if (__raw_readl(&qmgr_regs->sram[queue])) {
queue             301 drivers/soc/ixp4xx/ixp4xx-qmgr.c 			       " queue %i\n", queue);
queue             311 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
queue             313 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
queue             316 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	       qmgr_queue_descs[queue], queue, addr);
queue             327 drivers/soc/ixp4xx/ixp4xx-qmgr.c void qmgr_release_queue(unsigned int queue)
queue             331 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	BUG_ON(queue >= QUEUES); /* not in valid range */
queue             334 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	cfg = __raw_readl(&qmgr_regs->sram[queue]);
queue             353 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	       qmgr_queue_descs[queue], queue);
queue             354 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	qmgr_queue_descs[queue][0] = '\x0';
queue             357 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	while ((addr = qmgr_get_entry(queue)))
queue             359 drivers/soc/ixp4xx/ixp4xx-qmgr.c 		       queue, addr);
queue             361 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	__raw_writel(0, &qmgr_regs->sram[queue]);
queue             367 drivers/soc/ixp4xx/ixp4xx-qmgr.c 	irq_handlers[queue] = NULL; /* catch IRQ bugs */
queue             223 drivers/soc/ti/knav_qmss.h 	struct knav_queue		*queue;
queue              28 drivers/soc/ti/knav_qmss_acc.c 	int range_base, queue;
queue              33 drivers/soc/ti/knav_qmss_acc.c 		for (queue = 0; queue < range->num_queues; queue++) {
queue              35 drivers/soc/ti/knav_qmss_acc.c 								queue);
queue              39 drivers/soc/ti/knav_qmss_acc.c 					range_base + queue);
queue              44 drivers/soc/ti/knav_qmss_acc.c 		queue = acc->channel - range->acc_info.start_channel;
queue              45 drivers/soc/ti/knav_qmss_acc.c 		inst = knav_range_offset_to_inst(kdev, range, queue);
queue              47 drivers/soc/ti/knav_qmss_acc.c 			range_base + queue);
queue              87 drivers/soc/ti/knav_qmss_acc.c 	int range_base, channel, queue = 0;
queue              98 drivers/soc/ti/knav_qmss_acc.c 		for (queue = 0; queue < range->num_irqs; queue++)
queue              99 drivers/soc/ti/knav_qmss_acc.c 			if (range->irqs[queue].irq == irq)
queue             101 drivers/soc/ti/knav_qmss_acc.c 		kq = knav_range_offset_to_inst(kdev, range, queue);
queue             102 drivers/soc/ti/knav_qmss_acc.c 		acc += queue;
queue             148 drivers/soc/ti/knav_qmss_acc.c 			queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
queue             149 drivers/soc/ti/knav_qmss_acc.c 			if (queue < range_base ||
queue             150 drivers/soc/ti/knav_qmss_acc.c 			    queue >= range_base + range->num_queues) {
queue             153 drivers/soc/ti/knav_qmss_acc.c 					queue, range_base,
queue             157 drivers/soc/ti/knav_qmss_acc.c 			queue -= range_base;
queue             159 drivers/soc/ti/knav_qmss_acc.c 								queue);
queue             166 drivers/soc/ti/knav_qmss_acc.c 				queue + range_base);
queue             174 drivers/soc/ti/knav_qmss_acc.c 			val, idx, queue + range_base);
queue             196 drivers/soc/ti/knav_qmss_acc.c 				int queue, bool enabled)
queue             209 drivers/soc/ti/knav_qmss_acc.c 		acc = range->acc + queue;
queue             210 drivers/soc/ti/knav_qmss_acc.c 		irq = range->irqs[queue].irq;
queue             211 drivers/soc/ti/knav_qmss_acc.c 		cpu_mask = range->irqs[queue].cpu_mask;
queue             216 drivers/soc/ti/knav_qmss_acc.c 		new = old | BIT(queue);
queue             218 drivers/soc/ti/knav_qmss_acc.c 		new = old & ~BIT(queue);
queue             303 drivers/soc/ti/knav_qmss_acc.c 				int queue)
queue             315 drivers/soc/ti/knav_qmss_acc.c 		acc = range->acc + queue;
queue             316 drivers/soc/ti/knav_qmss_acc.c 		queue_base = range->queue_base + queue;
queue             336 drivers/soc/ti/knav_qmss_acc.c 				int queue)
queue             342 drivers/soc/ti/knav_qmss_acc.c 	acc = range->acc + queue;
queue             344 drivers/soc/ti/knav_qmss_acc.c 	knav_acc_setup_cmd(kdev, range, &cmd, queue);
queue             354 drivers/soc/ti/knav_qmss_acc.c 						int queue)
queue             360 drivers/soc/ti/knav_qmss_acc.c 	acc = range->acc + queue;
queue             362 drivers/soc/ti/knav_qmss_acc.c 	knav_acc_setup_cmd(kdev, range, &cmd, queue);
queue             377 drivers/soc/ti/knav_qmss_acc.c 	int queue;
queue             379 drivers/soc/ti/knav_qmss_acc.c 	for (queue = 0; queue < range->num_queues; queue++) {
queue             380 drivers/soc/ti/knav_qmss_acc.c 		acc = range->acc + queue;
queue             382 drivers/soc/ti/knav_qmss_acc.c 		knav_acc_stop(kdev, range, queue);
queue             384 drivers/soc/ti/knav_qmss_acc.c 		result = knav_acc_start(kdev, range, queue);
queue             112 drivers/soc/ti/knav_qmss_queue.c 	unsigned queue = inst->id - range->queue_base;
queue             116 drivers/soc/ti/knav_qmss_queue.c 		irq = range->irqs[queue].irq;
queue             122 drivers/soc/ti/knav_qmss_queue.c 		if (range->irqs[queue].cpu_mask) {
queue             123 drivers/soc/ti/knav_qmss_queue.c 			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
queue             137 drivers/soc/ti/knav_qmss_queue.c 	unsigned queue = inst->id - inst->range->queue_base;
queue             141 drivers/soc/ti/knav_qmss_queue.c 		irq = range->irqs[queue].irq;
queue             384 drivers/soc/ti/knav_qmss_queue.c 	unsigned queue;
queue             387 drivers/soc/ti/knav_qmss_queue.c 		queue = inst->id - range->queue_base;
queue             389 drivers/soc/ti/knav_qmss_queue.c 			enable_irq(range->irqs[queue].irq);
queue             391 drivers/soc/ti/knav_qmss_queue.c 			disable_irq_nosync(range->irqs[queue].irq);
queue             708 drivers/soc/ti/knav_qmss_queue.c 		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
queue             720 drivers/soc/ti/knav_qmss_queue.c 	if (!pool->queue)
queue             724 drivers/soc/ti/knav_qmss_queue.c 		dma = knav_queue_pop(pool->queue, &size);
queue             735 drivers/soc/ti/knav_qmss_queue.c 	knav_queue_close(pool->queue);
queue             799 drivers/soc/ti/knav_qmss_queue.c 	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
queue             800 drivers/soc/ti/knav_qmss_queue.c 	if (IS_ERR_OR_NULL(pool->queue)) {
queue             803 drivers/soc/ti/knav_qmss_queue.c 			name, PTR_ERR(pool->queue));
queue             804 drivers/soc/ti/knav_qmss_queue.c 		ret = PTR_ERR(pool->queue);
queue             905 drivers/soc/ti/knav_qmss_queue.c 	dma = knav_queue_pop(pool->queue, &size);
queue             922 drivers/soc/ti/knav_qmss_queue.c 	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
queue             984 drivers/soc/ti/knav_qmss_queue.c 	return knav_queue_get_count(pool->queue);
queue              43 drivers/spi/spi-mpc52xx-psc.c 	struct list_head queue;
queue             207 drivers/spi/spi-mpc52xx-psc.c 	while (!list_empty(&mps->queue)) {
queue             214 drivers/spi/spi-mpc52xx-psc.c 		m = container_of(mps->queue.next, struct spi_message, queue);
queue             215 drivers/spi/spi-mpc52xx-psc.c 		list_del_init(&m->queue);
queue             296 drivers/spi/spi-mpc52xx-psc.c 	list_add_tail(&m->queue, &mps->queue);
queue             421 drivers/spi/spi-mpc52xx-psc.c 	INIT_LIST_HEAD(&mps->queue);
queue              75 drivers/spi/spi-mpc52xx.c 	struct list_head queue;		/* queue of pending messages */
queue             154 drivers/spi/spi-mpc52xx.c 	if (list_empty(&ms->queue))
queue             158 drivers/spi/spi-mpc52xx.c 	ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
queue             159 drivers/spi/spi-mpc52xx.c 	list_del_init(&ms->message->queue);
queue             369 drivers/spi/spi-mpc52xx.c 	list_add_tail(&m->queue, &ms->queue);
queue             470 drivers/spi/spi-mpc52xx.c 	INIT_LIST_HEAD(&ms->queue);
queue              76 drivers/spi/spi-sh.c 	struct list_head queue;
queue             285 drivers/spi/spi-sh.c 	while (!list_empty(&ss->queue)) {
queue             286 drivers/spi/spi-sh.c 		mesg = list_entry(ss->queue.next, struct spi_message, queue);
queue             287 drivers/spi/spi-sh.c 		list_del_init(&mesg->queue);
queue             373 drivers/spi/spi-sh.c 	list_add_tail(&mesg->queue, &ss->queue);
queue             472 drivers/spi/spi-sh.c 	INIT_LIST_HEAD(&ss->queue);
queue             165 drivers/spi/spi-topcliff-pch.c 	struct list_head queue;
queue             503 drivers/spi/spi-topcliff-pch.c 	list_add_tail(&pmsg->queue, &data->queue);
queue             585 drivers/spi/spi-topcliff-pch.c 		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
queue             592 drivers/spi/spi-topcliff-pch.c 			list_del_init(&pmsg->queue);
queue             656 drivers/spi/spi-topcliff-pch.c 	if ((list_empty(&data->queue) == 0) &&
queue             670 drivers/spi/spi-topcliff-pch.c 		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
queue             677 drivers/spi/spi-topcliff-pch.c 			list_del_init(&pmsg->queue);
queue            1133 drivers/spi/spi-topcliff-pch.c 		list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
queue            1143 drivers/spi/spi-topcliff-pch.c 			list_del_init(&pmsg->queue);
queue            1155 drivers/spi/spi-topcliff-pch.c 	data->current_msg = list_entry(data->queue.next, struct spi_message,
queue            1156 drivers/spi/spi-topcliff-pch.c 					queue);
queue            1158 drivers/spi/spi-topcliff-pch.c 	list_del_init(&data->current_msg->queue);
queue            1367 drivers/spi/spi-topcliff-pch.c 	INIT_LIST_HEAD(&data->queue);
queue            1436 drivers/spi/spi-topcliff-pch.c 	while ((list_empty(&data->queue) == 0) && --count) {
queue              77 drivers/spi/spi-txx9.c 	struct list_head queue;
queue             286 drivers/spi/spi-txx9.c 	while (!list_empty(&c->queue)) {
queue             289 drivers/spi/spi-txx9.c 		m = container_of(c->queue.next, struct spi_message, queue);
queue             290 drivers/spi/spi-txx9.c 		list_del_init(&m->queue);
queue             316 drivers/spi/spi-txx9.c 	list_add_tail(&m->queue, &c->queue);
queue             340 drivers/spi/spi-txx9.c 	INIT_LIST_HEAD(&c->queue);
queue            1290 drivers/spi/spi.c 	if (list_empty(&ctlr->queue) || !ctlr->running) {
queue            1329 drivers/spi/spi.c 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
queue            1332 drivers/spi/spi.c 	list_del_init(&msg->queue);
queue            1489 drivers/spi/spi.c 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
queue            1490 drivers/spi/spi.c 					queue);
queue            1572 drivers/spi/spi.c 	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
queue            1578 drivers/spi/spi.c 	if (!list_empty(&ctlr->queue) || ctlr->busy)
queue            1631 drivers/spi/spi.c 	list_add_tail(&msg->queue, &ctlr->queue);
queue            2436 drivers/spi/spi.c 	INIT_LIST_HEAD(&ctlr->queue);
queue            1222 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
queue            1681 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
queue            1704 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
queue            1727 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
queue            1831 drivers/staging/emxx_udc/emxx_udc.c 	list_del_init(&req->queue);
queue            1842 drivers/staging/emxx_udc/emxx_udc.c 		if (!list_empty(&ep->queue))
queue            2023 drivers/staging/emxx_udc/emxx_udc.c 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
queue            2075 drivers/staging/emxx_udc/emxx_udc.c 	if (list_empty(&ep->queue))
queue            2079 drivers/staging/emxx_udc/emxx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            2517 drivers/staging/emxx_udc/emxx_udc.c 	INIT_LIST_HEAD(&req->queue);
queue            2560 drivers/staging/emxx_udc/emxx_udc.c 		     !list_empty(&req->queue))) {
queue            2567 drivers/staging/emxx_udc/emxx_udc.c 		if (!list_empty(&req->queue))
queue            2615 drivers/staging/emxx_udc/emxx_udc.c 	bflag = list_empty(&ep->queue);
queue            2616 drivers/staging/emxx_udc/emxx_udc.c 	list_add_tail(&req->queue, &ep->queue);
queue            2623 drivers/staging/emxx_udc/emxx_udc.c 			list_del(&req->queue);
queue            2663 drivers/staging/emxx_udc/emxx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            2709 drivers/staging/emxx_udc/emxx_udc.c 		if (list_empty(&ep->queue))
queue            2809 drivers/staging/emxx_udc/emxx_udc.c 	.queue		= nbu2ss_ep_queue,
queue            3034 drivers/staging/emxx_udc/emxx_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             523 drivers/staging/emxx_udc/emxx_udc.h 	struct list_head		queue;
queue             536 drivers/staging/emxx_udc/emxx_udc.h 	struct list_head		queue;
queue             860 drivers/staging/media/imx/imx-media-capture.c 	vfd->queue = &priv->q;
queue             812 drivers/staging/media/imx/imx-media-utils.c 				if (buftype == vfd->queue->type)
queue             845 drivers/staging/media/imx/imx-media-utils.c 		if (buftype == vfd->queue->type)
queue             277 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue)	\
queue             278 drivers/staging/media/ipu3/ipu3-abi.h 				(0 << 24 | (thread) << 16 | (queue) << 8)
queue             279 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_ABI_EVENT_BUFFER_DEQUEUED(queue)	(1 << 24 | (queue) << 8)
queue             379 drivers/staging/media/ipu3/ipu3-css-params.c 	target_width = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue             380 drivers/staging/media/ipu3/ipu3-css-params.c 	target_height = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue             391 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             394 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue             396 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue             398 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
queue             400 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             403 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue             405 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue             407 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
queue             409 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
queue             414 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue             416 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue             418 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue             420 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue            1764 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
queue            1766 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
queue            1768 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
queue            1807 drivers/staging/media/ipu3/ipu3-css-params.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue            1822 drivers/staging/media/ipu3/ipu3-css-params.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue            1863 drivers/staging/media/ipu3/ipu3-css-params.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue            1877 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue            1879 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue            1888 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue            1890 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue            1962 drivers/staging/media/ipu3/ipu3-css-params.c 		&css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
queue            1978 drivers/staging/media/ipu3/ipu3-css-params.c 	ofs_x += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
queue            1980 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
queue            1984 drivers/staging/media/ipu3/ipu3-css-params.c 	ofs_y += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
queue            1986 drivers/staging/media/ipu3/ipu3-css-params.c 		css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
queue             123 drivers/staging/media/ipu3/ipu3-css.c static int imgu_css_queue_init(struct imgu_css_queue *queue,
queue             126 drivers/staging/media/ipu3/ipu3-css.c 	struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
queue             130 drivers/staging/media/ipu3/ipu3-css.c 	INIT_LIST_HEAD(&queue->bufs);
queue             132 drivers/staging/media/ipu3/ipu3-css.c 	queue->css_fmt = NULL;	/* Disable */
queue             139 drivers/staging/media/ipu3/ipu3-css.c 		queue->css_fmt = &imgu_css_formats[i];
queue             143 drivers/staging/media/ipu3/ipu3-css.c 	if (!queue->css_fmt)
queue             146 drivers/staging/media/ipu3/ipu3-css.c 	queue->fmt.mpix = *fmt;
queue             152 drivers/staging/media/ipu3/ipu3-css.c 	queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
queue             153 drivers/staging/media/ipu3/ipu3-css.c 	if (queue->css_fmt->frame_format != IMGU_ABI_FRAME_FORMAT_RAW_PACKED)
queue             154 drivers/staging/media/ipu3/ipu3-css.c 		f->plane_fmt[0].bytesperline = DIV_ROUND_UP(queue->width_pad *
queue             155 drivers/staging/media/ipu3/ipu3-css.c 					queue->css_fmt->bytesperpixel_num,
queue             162 drivers/staging/media/ipu3/ipu3-css.c 					     queue->css_fmt->bytesperpixel_num;
queue             165 drivers/staging/media/ipu3/ipu3-css.c 	if (queue->css_fmt->chroma_decim)
queue             166 drivers/staging/media/ipu3/ipu3-css.c 		sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
queue             171 drivers/staging/media/ipu3/ipu3-css.c 	f->colorspace = queue->css_fmt->colorspace;
queue             735 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
queue             737 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
queue             739 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
queue             741 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
queue             743 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
queue             745 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
queue             753 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             755 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
queue             757 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
queue             761 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue             763 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue             765 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
queue             767 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             769 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
queue             771 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
queue             775 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue             777 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue             779 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
queue             781 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
queue             783 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
queue             785 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
queue             939 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
queue             941 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
queue             943 drivers/staging/media/ipu3/ipu3-css.c 					css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
queue             945 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
queue             947 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
queue             949 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
queue             956 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
queue             958 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue             960 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
queue             962 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             964 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
queue             966 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
queue             969 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad *
queue             970 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
queue             985 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue             987 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
queue             989 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
queue             993 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
queue             995 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue             997 drivers/staging/media/ipu3/ipu3-css.c 					css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
queue             999 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
queue            1001 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
queue            1003 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
queue            1006 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
queue            1007 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
queue            1009 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
queue            1010 drivers/staging/media/ipu3/ipu3-css.c 			css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height * 5 / 4;
queue            1102 drivers/staging/media/ipu3/ipu3-css.c static u8 imgu_css_queue_pos(struct imgu_css *css, int queue, int thread)
queue            1110 drivers/staging/media/ipu3/ipu3-css.c 	return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
queue            1116 drivers/staging/media/ipu3/ipu3-css.c 			       int queue, int thread, u32 data)
queue            1125 drivers/staging/media/ipu3/ipu3-css.c 	if (queue >= 0) {
queue            1126 drivers/staging/media/ipu3/ipu3-css.c 		size = readb(&q->host2sp_bufq_info[thread][queue].size);
queue            1127 drivers/staging/media/ipu3/ipu3-css.c 		start = readb(&q->host2sp_bufq_info[thread][queue].start);
queue            1128 drivers/staging/media/ipu3/ipu3-css.c 		end = readb(&q->host2sp_bufq_info[thread][queue].end);
queue            1142 drivers/staging/media/ipu3/ipu3-css.c 	if (queue >= 0) {
queue            1143 drivers/staging/media/ipu3/ipu3-css.c 		writel(data, &q->host2sp_bufq[thread][queue][end]);
queue            1144 drivers/staging/media/ipu3/ipu3-css.c 		writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
queue            1154 drivers/staging/media/ipu3/ipu3-css.c static int imgu_css_dequeue_data(struct imgu_css *css, int queue, u32 *data)
queue            1163 drivers/staging/media/ipu3/ipu3-css.c 	if (queue >= 0) {
queue            1164 drivers/staging/media/ipu3/ipu3-css.c 		size = readb(&q->sp2host_bufq_info[queue].size);
queue            1165 drivers/staging/media/ipu3/ipu3-css.c 		start = readb(&q->sp2host_bufq_info[queue].start);
queue            1166 drivers/staging/media/ipu3/ipu3-css.c 		end = readb(&q->sp2host_bufq_info[queue].end);
queue            1181 drivers/staging/media/ipu3/ipu3-css.c 	if (queue >= 0) {
queue            1182 drivers/staging/media/ipu3/ipu3-css.c 		*data = readl(&q->sp2host_bufq[queue][start]);
queue            1183 drivers/staging/media/ipu3/ipu3-css.c 		writeb(start2, &q->sp2host_bufq_info[queue].start);
queue            1191 drivers/staging/media/ipu3/ipu3-css.c 		r = imgu_css_queue_data(css, queue, 0,
queue            1426 drivers/staging/media/ipu3/ipu3-css.c 						 &css_pipe->queue[q].bufs,
queue            1444 drivers/staging/media/ipu3/ipu3-css.c 		if (!list_empty(&css_pipe->queue[q].bufs))
queue            1566 drivers/staging/media/ipu3/ipu3-css.c 			r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
queue            1597 drivers/staging/media/ipu3/ipu3-css.c 				struct imgu_css_queue queue[IPU3_CSS_QUEUES],
queue            1605 drivers/staging/media/ipu3/ipu3-css.c 					&queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
queue            1607 drivers/staging/media/ipu3/ipu3-css.c 					&queue[IPU3_CSS_QUEUE_OUT].fmt.mpix;
queue            1609 drivers/staging/media/ipu3/ipu3-css.c 					&queue[IPU3_CSS_QUEUE_VF].fmt.mpix;
queue            1614 drivers/staging/media/ipu3/ipu3-css.c 	if (!imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
queue            1662 drivers/staging/media/ipu3/ipu3-css.c 		if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
queue            1666 drivers/staging/media/ipu3/ipu3-css.c 			q_fmt = queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
queue            1684 drivers/staging/media/ipu3/ipu3-css.c 		if (imgu_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
queue            1688 drivers/staging/media/ipu3/ipu3-css.c 			q_fmt = queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
queue            1897 drivers/staging/media/ipu3/ipu3-css.c 		if (imgu_css_queue_init(&css_pipe->queue[i], fmts[i],
queue            1943 drivers/staging/media/ipu3/ipu3-css.c 	if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
queue            1946 drivers/staging/media/ipu3/ipu3-css.c 	b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
queue            1949 drivers/staging/media/ipu3/ipu3-css.c 	if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
queue            1951 drivers/staging/media/ipu3/ipu3-css.c 	abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
queue            1956 drivers/staging/media/ipu3/ipu3-css.c 	buf_addr = (void *)abi_buf + imgu_css_queues[b->queue].ptr_ofs;
queue            1959 drivers/staging/media/ipu3/ipu3-css.c 	if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
queue            1962 drivers/staging/media/ipu3/ipu3-css.c 	if (b->queue == IPU3_CSS_QUEUE_OUT)
queue            1964 drivers/staging/media/ipu3/ipu3-css.c 				css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
queue            1966 drivers/staging/media/ipu3/ipu3-css.c 	if (b->queue == IPU3_CSS_QUEUE_VF)
queue            1968 drivers/staging/media/ipu3/ipu3-css.c 					css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
queue            1971 drivers/staging/media/ipu3/ipu3-css.c 	list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
queue            1975 drivers/staging/media/ipu3/ipu3-css.c 	data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
queue            1976 drivers/staging/media/ipu3/ipu3-css.c 	r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
queue            1982 drivers/staging/media/ipu3/ipu3-css.c 					      imgu_css_queues[b->queue].qid);
queue            1988 drivers/staging/media/ipu3/ipu3-css.c 		b, b->queue, pipe);
queue            2015 drivers/staging/media/ipu3/ipu3-css.c 	int evtype, pipe, pipeid, queue, qid, r;
queue            2037 drivers/staging/media/ipu3/ipu3-css.c 		queue = evtype_to_queue[evtype];
queue            2038 drivers/staging/media/ipu3/ipu3-css.c 		qid = imgu_css_queues[queue].qid;
queue            2052 drivers/staging/media/ipu3/ipu3-css.c 			event, queue, pipe, pipeid);
queue            2069 drivers/staging/media/ipu3/ipu3-css.c 		if (list_empty(&css_pipe->queue[queue].bufs)) {
queue            2074 drivers/staging/media/ipu3/ipu3-css.c 		b = list_first_entry(&css_pipe->queue[queue].bufs,
queue            2076 drivers/staging/media/ipu3/ipu3-css.c 		if (queue != b->queue ||
queue            2078 drivers/staging/media/ipu3/ipu3-css.c 			[b->queue][b->queue_pos].daddr) {
queue              71 drivers/staging/media/ipu3/ipu3-css.h 	unsigned int queue;
queue             107 drivers/staging/media/ipu3/ipu3-css.h 	struct imgu_css_queue queue[IPU3_CSS_QUEUES];
queue             207 drivers/staging/media/ipu3/ipu3-css.h 				     unsigned int queue, dma_addr_t daddr)
queue             210 drivers/staging/media/ipu3/ipu3-css.h 	b->queue = queue;
queue             313 drivers/staging/media/ipu3/ipu3-v4l2.c 	unsigned int queue = imgu_node_to_queue(node->id);
queue             315 drivers/staging/media/ipu3/ipu3-v4l2.c 	if (queue == IPU3_CSS_QUEUE_PARAMS)
queue             329 drivers/staging/media/ipu3/ipu3-v4l2.c 	unsigned int queue = imgu_node_to_queue(node->id);
queue             331 drivers/staging/media/ipu3/ipu3-v4l2.c 	if (queue == IPU3_CSS_QUEUE_PARAMS)
queue             343 drivers/staging/media/ipu3/ipu3-v4l2.c 	unsigned int queue = imgu_node_to_queue(node->id);
queue             355 drivers/staging/media/ipu3/ipu3-v4l2.c 	if (queue == IPU3_CSS_QUEUE_PARAMS && payload && payload < need_bytes) {
queue             362 drivers/staging/media/ipu3/ipu3-v4l2.c 	if (queue != IPU3_CSS_QUEUE_PARAMS)
queue             363 drivers/staging/media/ipu3/ipu3-v4l2.c 		imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
queue            1245 drivers/staging/media/ipu3/ipu3-v4l2.c 	vdev->queue = &node->vbq;
queue             153 drivers/staging/media/ipu3/ipu3.c 						   int queue, unsigned int pipe)
queue             159 drivers/staging/media/ipu3/ipu3.c 	if (queue == IPU3_CSS_QUEUE_IN)
queue             162 drivers/staging/media/ipu3/ipu3.c 	if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
queue             167 drivers/staging/media/ipu3/ipu3.c 		if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
queue             174 drivers/staging/media/ipu3/ipu3.c 	imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
queue             175 drivers/staging/media/ipu3/ipu3.c 			  imgu_pipe->queues[queue].dmap.daddr);
queue             177 drivers/staging/media/ipu3/ipu3.c 	return &imgu_pipe->queues[queue].dummybufs[i];
queue             189 drivers/staging/media/ipu3/ipu3.c 		if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
queue             529 drivers/staging/media/ipu3/ipu3.c 		node = imgu_map_node(imgu, b->queue);
queue             819 drivers/staging/media/omap4iss/iss_csi2.c 	.queue = csi2_queue,
queue             288 drivers/staging/media/omap4iss/iss_ipipeif.c 	.queue = ipipeif_video_queue,
queue             338 drivers/staging/media/omap4iss/iss_resizer.c 	.queue = resizer_video_queue,
queue             392 drivers/staging/media/omap4iss/iss_video.c 		video->ops->queue(video, buffer);
queue             518 drivers/staging/media/omap4iss/iss_video.c 	vb2_queue_error(video->queue);
queue             783 drivers/staging/media/omap4iss/iss_video.c 	return vb2_reqbufs(&vfh->queue, rb);
queue             791 drivers/staging/media/omap4iss/iss_video.c 	return vb2_querybuf(&vfh->queue, b);
queue             800 drivers/staging/media/omap4iss/iss_video.c 	return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
queue             808 drivers/staging/media/omap4iss/iss_video.c 	return vb2_expbuf(&vfh->queue, e);
queue             816 drivers/staging/media/omap4iss/iss_video.c 	return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
queue             942 drivers/staging/media/omap4iss/iss_video.c 	video->queue = &vfh->queue;
queue             947 drivers/staging/media/omap4iss/iss_video.c 	ret = vb2_streamon(&vfh->queue, type);
queue             976 drivers/staging/media/omap4iss/iss_video.c 	vb2_streamoff(&vfh->queue, type);
queue             982 drivers/staging/media/omap4iss/iss_video.c 	video->queue = NULL;
queue            1008 drivers/staging/media/omap4iss/iss_video.c 	if (!vb2_is_streaming(&vfh->queue))
queue            1025 drivers/staging/media/omap4iss/iss_video.c 	vb2_streamoff(&vfh->queue, type);
queue            1026 drivers/staging/media/omap4iss/iss_video.c 	video->queue = NULL;
queue            1120 drivers/staging/media/omap4iss/iss_video.c 	q = &handle->queue;
queue            1166 drivers/staging/media/omap4iss/iss_video.c 	vb2_queue_release(&handle->queue);
queue            1182 drivers/staging/media/omap4iss/iss_video.c 	return vb2_poll(&vfh->queue, file, wait);
queue            1189 drivers/staging/media/omap4iss/iss_video.c 	return vb2_mmap(&vfh->queue, vma);
queue             137 drivers/staging/media/omap4iss/iss_video.h 	int (*queue)(struct iss_video *video, struct iss_buffer *buffer);
queue             163 drivers/staging/media/omap4iss/iss_video.h 	struct vb2_queue *queue;
queue             176 drivers/staging/media/omap4iss/iss_video.h 	struct vb2_queue queue;
queue             183 drivers/staging/media/omap4iss/iss_video.h 				container_of(q, struct iss_video_fh, queue)
queue              74 drivers/staging/octeon/ethernet-mdio.c 				      priv->port, priv->queue);
queue             428 drivers/staging/octeon/ethernet-tx.c 	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
queue             433 drivers/staging/octeon/ethernet-tx.c 						 priv->queue + qos,
queue             425 drivers/staging/octeon/ethernet.c 		priv->queue = -1;
queue             427 drivers/staging/octeon/ethernet.c 	if (priv->queue != -1)
queue             786 drivers/staging/octeon/ethernet.c 			priv->queue = -1;
queue             839 drivers/staging/octeon/ethernet.c 			priv->queue = cvmx_pko_get_base_queue(priv->port);
queue              53 drivers/staging/octeon/octeon-ethernet.h 	int queue;
queue            1380 drivers/staging/octeon/octeon-stubs.h static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
queue            1385 drivers/staging/octeon/octeon-stubs.h 		uint64_t queue, cvmx_pko_command_word0_t pko_command,
queue            1697 drivers/staging/rtl8188eu/core/rtw_ap.c 	INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
queue              38 drivers/staging/rtl8188eu/core/rtw_cmd.c static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
queue              45 drivers/staging/rtl8188eu/core/rtw_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue              47 drivers/staging/rtl8188eu/core/rtw_cmd.c 	list_add_tail(&obj->list, &queue->queue);
queue              49 drivers/staging/rtl8188eu/core/rtw_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue              56 drivers/staging/rtl8188eu/core/rtw_cmd.c struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue)
queue              61 drivers/staging/rtl8188eu/core/rtw_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue              62 drivers/staging/rtl8188eu/core/rtw_cmd.c 	obj = list_first_entry_or_null(&queue->queue, struct cmd_obj, list);
queue              65 drivers/staging/rtl8188eu/core/rtw_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue            1215 drivers/staging/rtl8188eu/core/rtw_cmd.c 				      &pmlmepriv->scanned_queue.queue);
queue              21 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue              25 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 	phead = get_list_head(queue);
queue              38 drivers/staging/rtl8188eu/core/rtw_ioctl_set.c 	if (list_empty(&queue->queue)) {
queue              61 drivers/staging/rtl8188eu/core/rtw_mlme.c 		list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
queue             114 drivers/staging/rtl8188eu/core/rtw_mlme.c 	pnetwork = list_first_entry_or_null(&free_queue->queue,
queue             158 drivers/staging/rtl8188eu/core/rtw_mlme.c 	list_add_tail(&pnetwork->list, &free_queue->queue);
queue             397 drivers/staging/rtl8188eu/core/rtw_mlme.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue             401 drivers/staging/rtl8188eu/core/rtw_mlme.c 	spin_lock_bh(&queue->lock);
queue             402 drivers/staging/rtl8188eu/core/rtw_mlme.c 	phead = get_list_head(queue);
queue             419 drivers/staging/rtl8188eu/core/rtw_mlme.c 		if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
queue             460 drivers/staging/rtl8188eu/core/rtw_mlme.c 			list_add_tail(&pnetwork->list, &queue->queue);
queue             479 drivers/staging/rtl8188eu/core/rtw_mlme.c 	spin_unlock_bh(&queue->lock);
queue             701 drivers/staging/rtl8188eu/core/rtw_mlme.c 		list_add_tail(plist, &free_queue->queue);
queue            1460 drivers/staging/rtl8188eu/core/rtw_mlme.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue            1466 drivers/staging/rtl8188eu/core/rtw_mlme.c 	phead = get_list_head(queue);
queue            1706 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue            1768 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		phead = get_list_head(queue);
queue              70 drivers/staging/rtl8188eu/core/rtw_recv.c 				     &precvpriv->free_recv_queue.queue);
queue             104 drivers/staging/rtl8188eu/core/rtw_recv.c 	hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
queue             146 drivers/staging/rtl8188eu/core/rtw_recv.c int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
queue             149 drivers/staging/rtl8188eu/core/rtw_recv.c 	list_add_tail(&precvframe->list, get_list_head(queue));
queue             154 drivers/staging/rtl8188eu/core/rtw_recv.c int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
queue             158 drivers/staging/rtl8188eu/core/rtw_recv.c 	spin_lock_bh(&queue->lock);
queue             159 drivers/staging/rtl8188eu/core/rtw_recv.c 	ret = _rtw_enqueue_recvframe(precvframe, queue);
queue             160 drivers/staging/rtl8188eu/core/rtw_recv.c 	spin_unlock_bh(&queue->lock);
queue            1438 drivers/staging/rtl8188eu/core/rtw_recv.c 				if (!list_empty(&pdefrag_q->queue))
queue             179 drivers/staging/rtl8188eu/core/rtw_sta_mgt.c 	psta = list_first_entry_or_null(&pfree_sta_queue->queue,
queue              94 drivers/staging/rtl8188eu/core/rtw_xmit.c 		list_add_tail(&pxframe->list, &pxmitpriv->free_xmit_queue.queue);
queue             137 drivers/staging/rtl8188eu/core/rtw_xmit.c 		list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmitbuf_queue.queue);
queue             171 drivers/staging/rtl8188eu/core/rtw_xmit.c 		list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
queue             858 drivers/staging/rtl8188eu/core/rtw_xmit.c 	return (!list_empty(&pxmitpriv->be_pending.queue) ||
queue             859 drivers/staging/rtl8188eu/core/rtw_xmit.c 		!list_empty(&pxmitpriv->bk_pending.queue) ||
queue             860 drivers/staging/rtl8188eu/core/rtw_xmit.c 		!list_empty(&pxmitpriv->vi_pending.queue) ||
queue             861 drivers/staging/rtl8188eu/core/rtw_xmit.c 		!list_empty(&pxmitpriv->vo_pending.queue));
queue            1147 drivers/staging/rtl8188eu/core/rtw_xmit.c 	pxmitbuf = list_first_entry_or_null(&pfree_queue->queue,
queue            1190 drivers/staging/rtl8188eu/core/rtw_xmit.c 	pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
queue            1259 drivers/staging/rtl8188eu/core/rtw_xmit.c 	pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
queue            1417 drivers/staging/rtl8188eu/core/rtw_xmit.c 				if (list_empty(&pframe_queue->queue)) /* must be done after get_next and before break */
queue             562 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 	if (list_empty(&ptxservq->sta_pending.queue))
queue              47 drivers/staging/rtl8188eu/include/osdep_service.h 	struct	list_head	queue;
queue              51 drivers/staging/rtl8188eu/include/osdep_service.h static inline struct list_head *get_list_head(struct __queue *queue)
queue              53 drivers/staging/rtl8188eu/include/osdep_service.h 	return &(queue->queue);
queue              52 drivers/staging/rtl8188eu/include/rtw_cmd.h struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue);
queue             236 drivers/staging/rtl8188eu/include/rtw_recv.h #define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
queue             238 drivers/staging/rtl8188eu/include/rtw_recv.h 			   struct __queue *queue);
queue             239 drivers/staging/rtl8188eu/include/rtw_recv.h int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
queue             960 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	struct __queue *queue	= &(pmlmepriv->scanned_queue);
queue             980 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	spin_lock_bh(&queue->lock);
queue             981 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	phead = get_list_head(queue);
queue             996 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 				spin_unlock_bh(&queue->lock);
queue            1003 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	spin_unlock_bh(&queue->lock);
queue            1208 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	struct __queue *queue	= &(pmlmepriv->scanned_queue);
queue            1238 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	phead = get_list_head(queue);
queue            1276 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue            1321 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		spin_lock_bh(&queue->lock);
queue            1322 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		phead = get_list_head(queue);
queue            1348 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 					spin_unlock_bh(&queue->lock);
queue            1355 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		spin_unlock_bh(&queue->lock);
queue              23 drivers/staging/rtl8188eu/os_dep/osdep_service.c 	INIT_LIST_HEAD(&pqueue->queue);
queue              53 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	u16 queue;
queue              56 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
queue              58 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
queue              59 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		    (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
queue              60 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 			netif_wake_subqueue(padapter->pnetdev, queue);
queue              62 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		if (__netif_subqueue_stopped(padapter->pnetdev, queue))
queue              63 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 			netif_wake_subqueue(padapter->pnetdev, queue);
queue              97 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	u16 queue;
queue              99 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
queue             102 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 		if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
queue             103 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 			netif_stop_subqueue(padapter->pnetdev, queue);
queue             106 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 			if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
queue             107 drivers/staging/rtl8188eu/os_dep/xmit_linux.c 				netif_stop_subqueue(padapter->pnetdev, queue);
queue            1481 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				if (skb_queue_len(&ring->queue) == 0) {
queue            1510 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				if (skb_queue_len(&ring->queue) == 0) {
queue             265 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	if (ring->entries - skb_queue_len(&ring->queue) >= 2)
queue             528 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	pskb = __skb_dequeue(&ring->queue);
queue             545 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, pnewskb);
queue            1101 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (skb_queue_len(&(&priv->tx_ring[i])->queue) > 0) {
queue            1103 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			       i, skb_queue_len(&(&priv->tx_ring[i])->queue));
queue            1138 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (skb_queue_len(&ring->queue) == 0) {
queue            1141 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb = __skb_peek(&ring->queue);
queue            1582 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	while (skb_queue_len(&ring->queue)) {
queue            1584 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
queue            1669 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	while (skb_queue_len(&ring->queue)) {
queue            1679 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb = __skb_dequeue(&ring->queue);
queue            1701 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
queue            1708 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
queue            1751 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
queue            1760 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			    skb_queue_len(&ring->queue));
queue            1770 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
queue            1845 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	skb_queue_head_init(&priv->tx_ring[prio].queue);
queue            1904 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			while (skb_queue_len(&ring->queue)) {
queue            1907 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 						 __skb_dequeue(&ring->queue);
queue             266 drivers/staging/rtl8192e/rtl8192e/rtl_core.h 	struct sk_buff_head queue;
queue              34 drivers/staging/rtl8712/osdep_service.h 	struct	list_head	queue;
queue              44 drivers/staging/rtl8712/osdep_service.h 		INIT_LIST_HEAD(&((pqueue)->queue));	\
queue             377 drivers/staging/rtl8712/rtl8712_cmd.c 			if (list_empty(&pcmdpriv->cmd_queue.queue)) {
queue              64 drivers/staging/rtl8712/rtl8712_recv.c 				 &(precvpriv->free_recv_buf_queue.queue));
queue             134 drivers/staging/rtl8712/rtl8712_recv.c 	list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
queue             190 drivers/staging/rtl8712/rtl8712_recv.c 	phead = &defrag_q->queue;
queue             205 drivers/staging/rtl8712/rtl8712_recv.c 	plist = &defrag_q->queue;
queue             275 drivers/staging/rtl8712/rtl8712_recv.c 				if (!list_empty(&pdefrag_q->queue)) {
queue             282 drivers/staging/rtl8712/rtl8712_recv.c 			phead = &pdefrag_q->queue;
queue             299 drivers/staging/rtl8712/rtl8712_recv.c 			phead = &pdefrag_q->queue;
queue             484 drivers/staging/rtl8712/rtl8712_recv.c 	phead = &ppending_recvframe_queue->queue;
queue             512 drivers/staging/rtl8712/rtl8712_recv.c 	phead = &ppending_recvframe_queue->queue;
queue             157 drivers/staging/rtl8712/rtl8712_xmit.c 	xmitframe_phead = &pframe_queue->queue;
queue             197 drivers/staging/rtl8712/rtl8712_xmit.c 		sta_phead = &phwxmit->sta_queue->queue;
queue             211 drivers/staging/rtl8712/rtl8712_xmit.c 			if (list_empty(&pframe_queue->queue)) {
queue             115 drivers/staging/rtl8712/rtl871x_cmd.c 	struct __queue *queue;
queue             122 drivers/staging/rtl8712/rtl871x_cmd.c 	queue = &pcmdpriv->cmd_queue;
queue             123 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             124 drivers/staging/rtl8712/rtl871x_cmd.c 	list_add_tail(&obj->list, &queue->queue);
queue             125 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue             129 drivers/staging/rtl8712/rtl871x_cmd.c struct cmd_obj *r8712_dequeue_cmd(struct  __queue *queue)
queue             134 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             135 drivers/staging/rtl8712/rtl871x_cmd.c 	obj = list_first_entry_or_null(&queue->queue,
queue             139 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue             146 drivers/staging/rtl8712/rtl871x_cmd.c 	struct  __queue *queue;
queue             152 drivers/staging/rtl8712/rtl871x_cmd.c 	queue = &pcmdpriv->cmd_queue;
queue             153 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             154 drivers/staging/rtl8712/rtl871x_cmd.c 	list_add_tail(&obj->list, &queue->queue);
queue             155 drivers/staging/rtl8712/rtl871x_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue             715 drivers/staging/rtl8712/rtl871x_cmd.c 					 &pmlmepriv->scanned_queue.queue);
queue              84 drivers/staging/rtl8712/rtl871x_cmd.h struct cmd_obj *r8712_dequeue_cmd(struct  __queue *queue);
queue            1038 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue            1053 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	spin_lock_irqsave(&queue->lock, irqL);
queue            1054 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	phead = &queue->queue;
queue            1069 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue            1188 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue            1205 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	spin_lock_irqsave(&queue->lock, irqL);
queue            1206 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	phead = &queue->queue;
queue            1219 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue            1243 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue            1265 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 		phead = &queue->queue;
queue            1947 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue            1974 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	phead = &queue->queue;
queue              45 drivers/staging/rtl8712/rtl871x_ioctl_set.c 	struct  __queue	*queue	= &(pmlmepriv->scanned_queue);
queue              48 drivers/staging/rtl8712/rtl871x_ioctl_set.c 	phead = &queue->queue;
queue              57 drivers/staging/rtl8712/rtl871x_ioctl_set.c 	    list_empty(&queue->queue)) {
queue              62 drivers/staging/rtl8712/rtl871x_mlme.c 				 &(pmlmepriv->free_bss_pool.queue));
queue              80 drivers/staging/rtl8712/rtl871x_mlme.c 	pnetwork = list_first_entry_or_null(&free_queue->queue,
queue             108 drivers/staging/rtl8712/rtl871x_mlme.c 	list_add_tail(&pnetwork->list, &free_queue->queue);
queue             123 drivers/staging/rtl8712/rtl871x_mlme.c 	list_add_tail(&pnetwork->list, &free_queue->queue);
queue             142 drivers/staging/rtl8712/rtl871x_mlme.c 	phead = &scanned_queue->queue;
queue             163 drivers/staging/rtl8712/rtl871x_mlme.c 	phead = &scanned_queue->queue;
queue             264 drivers/staging/rtl8712/rtl871x_mlme.c 	phead = &scanned_queue->queue;
queue             339 drivers/staging/rtl8712/rtl871x_mlme.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue             343 drivers/staging/rtl8712/rtl871x_mlme.c 	phead = &queue->queue;
queue             366 drivers/staging/rtl8712/rtl871x_mlme.c 		if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
queue             383 drivers/staging/rtl8712/rtl871x_mlme.c 			list_add_tail(&pnetwork->list, &queue->queue);
queue             401 drivers/staging/rtl8712/rtl871x_mlme.c 	struct  __queue *queue = &pmlmepriv->scanned_queue;
queue             403 drivers/staging/rtl8712/rtl871x_mlme.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             406 drivers/staging/rtl8712/rtl871x_mlme.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue            1073 drivers/staging/rtl8712/rtl871x_mlme.c 	struct  __queue *queue = NULL;
queue            1078 drivers/staging/rtl8712/rtl871x_mlme.c 	queue = &pmlmepriv->scanned_queue;
queue            1079 drivers/staging/rtl8712/rtl871x_mlme.c 	phead = &queue->queue;
queue              57 drivers/staging/rtl8712/rtl871x_mp.c 				 &(pmp_priv->free_mp_xmitqueue.queue));
queue              77 drivers/staging/rtl8712/rtl871x_recv.c 				 &(precvpriv->free_recv_queue.queue));
queue             100 drivers/staging/rtl8712/rtl871x_recv.c 	precvframe = list_first_entry_or_null(&pfree_recv_queue->queue,
queue             127 drivers/staging/rtl8712/rtl871x_recv.c 	phead = &pframequeue->queue;
queue              57 drivers/staging/rtl8712/rtl871x_sta_mgt.c 		list_add_tail(&psta->list, &pstapriv->free_sta_queue.queue);
queue              72 drivers/staging/rtl8712/rtl871x_sta_mgt.c 	phead = &pstapriv->free_sta_queue.queue;
queue             102 drivers/staging/rtl8712/rtl871x_sta_mgt.c 	psta = list_first_entry_or_null(&pfree_sta_queue->queue,
queue             186 drivers/staging/rtl8712/rtl871x_sta_mgt.c 	list_add_tail(&psta->list, &pfree_sta_queue->queue);
queue              95 drivers/staging/rtl8712/rtl871x_xmit.c 				 &(pxmitpriv->free_xmit_queue.queue));
queue             139 drivers/staging/rtl8712/rtl871x_xmit.c 				 &(pxmitpriv->free_xmitbuf_queue.queue));
queue             746 drivers/staging/rtl8712/rtl871x_xmit.c 	pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
queue             765 drivers/staging/rtl8712/rtl871x_xmit.c 	list_add_tail(&(pxmitbuf->list), &pfree_xmitbuf_queue->queue);
queue             793 drivers/staging/rtl8712/rtl871x_xmit.c 	pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
queue             820 drivers/staging/rtl8712/rtl871x_xmit.c 	list_add_tail(&pxmitframe->list, &pfree_xmit_queue->queue);
queue             844 drivers/staging/rtl8712/rtl871x_xmit.c 	phead = &pframequeue->queue;
queue             927 drivers/staging/rtl8712/rtl871x_xmit.c 		list_add_tail(&ptxservq->tx_pending, &pstapending->queue);
queue             928 drivers/staging/rtl8712/rtl871x_xmit.c 	list_add_tail(&pxmitframe->list, &ptxservq->sta_pending.queue);
queue            2379 drivers/staging/rtl8723bs/core/rtw_ap.c 	INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
queue             260 drivers/staging/rtl8723bs/core/rtw_cmd.c int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
queue             268 drivers/staging/rtl8723bs/core/rtw_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             270 drivers/staging/rtl8723bs/core/rtw_cmd.c 	list_add_tail(&obj->list, &queue->queue);
queue             273 drivers/staging/rtl8723bs/core/rtw_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue             279 drivers/staging/rtl8723bs/core/rtw_cmd.c struct	cmd_obj	*_rtw_dequeue_cmd(struct __queue *queue)
queue             285 drivers/staging/rtl8723bs/core/rtw_cmd.c 	spin_lock_irqsave(&queue->lock, irqL);
queue             286 drivers/staging/rtl8723bs/core/rtw_cmd.c 	if (list_empty(&(queue->queue)))
queue             289 drivers/staging/rtl8723bs/core/rtw_cmd.c 		obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
queue             294 drivers/staging/rtl8723bs/core/rtw_cmd.c 	spin_unlock_irqrestore(&queue->lock, irqL);
queue             441 drivers/staging/rtl8723bs/core/rtw_cmd.c 		if (list_empty(&(pcmdpriv->cmd_queue.queue))) {
queue            2092 drivers/staging/rtl8723bs/core/rtw_cmd.c 			list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
queue              57 drivers/staging/rtl8723bs/core/rtw_ioctl_set.c 	struct __queue	*queue	= &(pmlmepriv->scanned_queue);
queue              61 drivers/staging/rtl8723bs/core/rtw_ioctl_set.c 	phead = get_list_head(queue);
queue              74 drivers/staging/rtl8723bs/core/rtw_ioctl_set.c 	if (list_empty(&queue->queue)) {
queue              54 drivers/staging/rtl8723bs/core/rtw_mlme.c 		list_add_tail(&pnetwork->list, &pmlmepriv->free_bss_pool.queue);
queue             155 drivers/staging/rtl8723bs/core/rtw_mlme.c 	if (list_empty(&free_queue->queue)) {
queue             159 drivers/staging/rtl8723bs/core/rtw_mlme.c 	plist = get_next(&(free_queue->queue));
queue             208 drivers/staging/rtl8723bs/core/rtw_mlme.c 	list_add_tail(&(pnetwork->list), &(free_queue->queue));
queue             594 drivers/staging/rtl8723bs/core/rtw_mlme.c 	struct __queue	*queue	= &(pmlmepriv->scanned_queue);
queue             600 drivers/staging/rtl8723bs/core/rtw_mlme.c 	spin_lock_bh(&queue->lock);
queue             601 drivers/staging/rtl8723bs/core/rtw_mlme.c 	phead = get_list_head(queue);
queue             633 drivers/staging/rtl8723bs/core/rtw_mlme.c 		if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
queue             673 drivers/staging/rtl8723bs/core/rtw_mlme.c 			list_add_tail(&(pnetwork->list), &(queue->queue));
queue             702 drivers/staging/rtl8723bs/core/rtw_mlme.c 	spin_unlock_bh(&queue->lock);
queue             982 drivers/staging/rtl8723bs/core/rtw_mlme.c 		list_add_tail(plist, &free_queue->queue);
queue            1998 drivers/staging/rtl8723bs/core/rtw_mlme.c 	struct __queue	*queue	= &(mlme->scanned_queue);
queue            2008 drivers/staging/rtl8723bs/core/rtw_mlme.c 	phead = get_list_head(queue);
queue            2131 drivers/staging/rtl8723bs/core/rtw_mlme.c 	struct __queue	*queue	= &(pmlmepriv->scanned_queue);
queue            2145 drivers/staging/rtl8723bs/core/rtw_mlme.c 	phead = get_list_head(queue);
queue            4045 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	struct __queue		*queue	= &(pmlmepriv->scanned_queue);
queue            4112 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		phead = get_list_head(queue);
queue              68 drivers/staging/rtl8723bs/core/rtw_recv.c 		list_add_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
queue             114 drivers/staging/rtl8723bs/core/rtw_recv.c 	if (list_empty(&pfree_recv_queue->queue))
queue             174 drivers/staging/rtl8723bs/core/rtw_recv.c sint _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
queue             184 drivers/staging/rtl8723bs/core/rtw_recv.c 	list_add_tail(&(precvframe->u.hdr.list), get_list_head(queue));
queue             187 drivers/staging/rtl8723bs/core/rtw_recv.c 		if (queue == &precvpriv->free_recv_queue)
queue             193 drivers/staging/rtl8723bs/core/rtw_recv.c sint rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
queue             198 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_lock_bh(&queue->lock);
queue             199 drivers/staging/rtl8723bs/core/rtw_recv.c 	ret = _rtw_enqueue_recvframe(precvframe, queue);
queue             201 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_unlock_bh(&queue->lock);
queue             261 drivers/staging/rtl8723bs/core/rtw_recv.c sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
queue             263 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_lock_bh(&queue->lock);
queue             266 drivers/staging/rtl8723bs/core/rtw_recv.c 	list_add(&precvbuf->list, get_list_head(queue));
queue             268 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_unlock_bh(&queue->lock);
queue             273 drivers/staging/rtl8723bs/core/rtw_recv.c sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
queue             275 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_lock_bh(&queue->lock);
queue             279 drivers/staging/rtl8723bs/core/rtw_recv.c 	list_add_tail(&precvbuf->list, get_list_head(queue));
queue             280 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_unlock_bh(&queue->lock);
queue             285 drivers/staging/rtl8723bs/core/rtw_recv.c struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue)
queue             290 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_lock_bh(&queue->lock);
queue             292 drivers/staging/rtl8723bs/core/rtw_recv.c 	if (list_empty(&queue->queue))
queue             295 drivers/staging/rtl8723bs/core/rtw_recv.c 		phead = get_list_head(queue);
queue             305 drivers/staging/rtl8723bs/core/rtw_recv.c 	spin_unlock_bh(&queue->lock);
queue            1833 drivers/staging/rtl8723bs/core/rtw_recv.c 				if (!list_empty(&pdefrag_q->queue))
queue             203 drivers/staging/rtl8723bs/core/rtw_sta_mgt.c 	if (list_empty(&pfree_sta_queue->queue)) {
queue             208 drivers/staging/rtl8723bs/core/rtw_sta_mgt.c 		psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
queue             102 drivers/staging/rtl8723bs/core/rtw_xmit.c 		list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
queue             153 drivers/staging/rtl8723bs/core/rtw_xmit.c 		list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
queue             191 drivers/staging/rtl8723bs/core/rtw_xmit.c 		list_add_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
queue             230 drivers/staging/rtl8723bs/core/rtw_xmit.c 		list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
queue            1159 drivers/staging/rtl8723bs/core/rtw_xmit.c 	return ((!list_empty(&pxmitpriv->be_pending.queue)) ||
queue            1160 drivers/staging/rtl8723bs/core/rtw_xmit.c 			 (!list_empty(&pxmitpriv->bk_pending.queue)) ||
queue            1161 drivers/staging/rtl8723bs/core/rtw_xmit.c 			 (!list_empty(&pxmitpriv->vi_pending.queue)) ||
queue            1162 drivers/staging/rtl8723bs/core/rtw_xmit.c 			 (!list_empty(&pxmitpriv->vo_pending.queue)));
queue            1681 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (list_empty(&pfree_queue->queue)) {
queue            1753 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (list_empty(&pfree_xmitbuf_queue->queue)) {
queue            1872 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (list_empty(&pfree_xmit_queue->queue)) {
queue            1897 drivers/staging/rtl8723bs/core/rtw_xmit.c 	struct __queue *queue = &pxmitpriv->free_xframe_ext_queue;
queue            1899 drivers/staging/rtl8723bs/core/rtw_xmit.c 	spin_lock_bh(&queue->lock);
queue            1901 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (list_empty(&queue->queue)) {
queue            1905 drivers/staging/rtl8723bs/core/rtw_xmit.c 		phead = get_list_head(queue);
queue            1914 drivers/staging/rtl8723bs/core/rtw_xmit.c 	spin_unlock_bh(&queue->lock);
queue            1952 drivers/staging/rtl8723bs/core/rtw_xmit.c 	struct __queue *queue = NULL;
queue            1973 drivers/staging/rtl8723bs/core/rtw_xmit.c 		queue = &pxmitpriv->free_xmit_queue;
queue            1975 drivers/staging/rtl8723bs/core/rtw_xmit.c 		queue = &pxmitpriv->free_xframe_ext_queue;
queue            1980 drivers/staging/rtl8723bs/core/rtw_xmit.c 	spin_lock_bh(&queue->lock);
queue            1983 drivers/staging/rtl8723bs/core/rtw_xmit.c 	list_add_tail(&pxmitframe->list, get_list_head(queue));
queue            1993 drivers/staging/rtl8723bs/core/rtw_xmit.c 	spin_unlock_bh(&queue->lock);
queue            2907 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (!list_empty(&pqueue->queue)) {
queue            2933 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (!list_empty(&pqueue->queue)) {
queue            2973 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (!list_empty(&pqueue->queue))
queue             457 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 		list_add_tail(&precvbuf->list, &precvpriv->free_recv_buf_queue.queue);
queue             371 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c 			if (list_empty(&pframe_queue->queue))
queue             651 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c 	if (!list_empty(&pqueue->queue)) {
queue              15 drivers/staging/rtl8723bs/include/cmd_osdep.h int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
queue              16 drivers/staging/rtl8723bs/include/cmd_osdep.h extern struct	cmd_obj	*_rtw_dequeue_cmd(struct __queue *queue);
queue              48 drivers/staging/rtl8723bs/include/osdep_service_linux.h 		struct	list_head	queue;
queue              72 drivers/staging/rtl8723bs/include/osdep_service_linux.h static inline struct list_head	*get_list_head(struct __queue	*queue)
queue              74 drivers/staging/rtl8723bs/include/osdep_service_linux.h 	return (&(queue->queue));
queue             395 drivers/staging/rtl8723bs/include/rtw_recv.h #define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
queue             396 drivers/staging/rtl8723bs/include/rtw_recv.h extern int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
queue             397 drivers/staging/rtl8723bs/include/rtw_recv.h extern int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
queue             402 drivers/staging/rtl8723bs/include/rtw_recv.h sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue);
queue             403 drivers/staging/rtl8723bs/include/rtw_recv.h sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
queue             404 drivers/staging/rtl8723bs/include/rtw_recv.h struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
queue            1432 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	struct __queue *queue	= &(pmlmepriv->scanned_queue);
queue            1441 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c 	phead = get_list_head(queue);
queue            1134 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct __queue	*queue	= &(pmlmepriv->scanned_queue);
queue            1156 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	spin_lock_bh(&queue->lock);
queue            1157 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	phead = get_list_head(queue);
queue            1175 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 				spin_unlock_bh(&queue->lock);
queue            1182 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	spin_unlock_bh(&queue->lock);
queue            1421 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct __queue				*queue	= &(pmlmepriv->scanned_queue);
queue            1447 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	phead = get_list_head(queue);
queue            1499 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct __queue *queue = &pmlmepriv->scanned_queue;
queue            1550 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		spin_lock_bh(&queue->lock);
queue            1551 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		phead = get_list_head(queue);
queue            1584 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 					spin_unlock_bh(&queue->lock);
queue            1591 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 		spin_unlock_bh(&queue->lock);
queue            2448 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct __queue *queue = &(pmlmepriv->scanned_queue);
queue            2480 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	phead = get_list_head(queue);
queue              63 drivers/staging/rtl8723bs/os_dep/osdep_service.c 	INIT_LIST_HEAD(&(pqueue->queue));
queue              72 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	u16 queue;
queue              75 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
queue              77 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
queue              78 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		    (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
queue              79 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			netif_wake_subqueue(padapter->pnetdev, queue);
queue              81 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		if (__netif_subqueue_stopped(padapter->pnetdev, queue))
queue              82 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			netif_wake_subqueue(padapter->pnetdev, queue);
queue             103 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	if (!list_empty(&padapter->xmitpriv.pending_xmitbuf_queue.queue))
queue             110 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	u16 queue;
queue             112 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 	queue = skb_get_queue_mapping(pkt);
queue             115 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 		if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) {
queue             117 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			netif_stop_subqueue(padapter->pnetdev, queue);
queue             121 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 			if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
queue             122 drivers/staging/rtl8723bs/os_dep/xmit_linux.c 				netif_stop_subqueue(padapter->pnetdev, queue);
queue            1770 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c 	vfd->queue = &dev->capture.vb_vidq;
queue            1182 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
queue            1190 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		(queue == &service->bulk_tx) ? 't' : 'r',
queue            1191 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		queue->process, queue->remote_notify, queue->remove);
queue            1193 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->remote_notify = queue->process;
queue            1196 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		while (queue->remove != queue->remote_notify) {
queue            1198 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				&queue->bulks[BULK_INDEX(queue->remove)];
queue            1250 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 			queue->remove++;
queue            1259 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 			(queue == &service->bulk_tx) ?
queue            1334 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 			struct vchiq_bulk_queue *queue)
queue            1336 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	int is_tx = (queue == &service->bulk_tx);
queue            1341 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		queue->local_insert, queue->remote_insert, queue->process);
queue            1343 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
queue            1344 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
queue            1346 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	while ((queue->process != queue->local_insert) ||
queue            1347 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		(queue->process != queue->remote_insert)) {
queue            1349 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				&queue->bulks[BULK_INDEX(queue->process)];
queue            1351 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		if (queue->process == queue->remote_insert) {
queue            1355 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 			queue->remote_insert++;
queue            1358 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		if (queue->process != queue->local_insert) {
queue            1376 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 			queue->local_insert++;
queue            1379 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		queue->process++;
queue            1714 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				struct vchiq_bulk_queue *queue;
queue            1717 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
queue            1725 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				if ((int)(queue->remote_insert -
queue            1726 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					queue->local_insert) >= 0) {
queue            1732 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 						queue->remote_insert,
queue            1733 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 						queue->local_insert);
queue            1737 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				if (queue->process != queue->remote_insert) {
queue            1740 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					       queue->process,
queue            1741 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					       queue->remote_insert);
queue            1746 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				bulk = &queue->bulks[
queue            1747 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					BULK_INDEX(queue->remote_insert)];
queue            1749 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				queue->remote_insert++;
queue            1762 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					queue->local_insert,
queue            1763 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 					queue->remote_insert, queue->process);
queue            1766 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				WARN_ON(queue->process == queue->local_insert);
queue            1768 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				queue->process++;
queue            1771 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 				notify_bulks(service, queue, 1/*retry_poll*/);
queue            2071 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c init_bulk_queue(struct vchiq_bulk_queue *queue)
queue            2073 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->local_insert = 0;
queue            2074 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->remote_insert = 0;
queue            2075 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->process = 0;
queue            2076 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->remote_notify = 0;
queue            2077 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->remove = 0;
queue            2964 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	struct vchiq_bulk_queue *queue;
queue            2998 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
queue            3006 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
queue            3019 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		} while (queue->local_insert == queue->remove +
queue            3023 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
queue            3067 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 	queue->local_insert++;
queue            3076 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c 		queue->local_insert, queue->remote_insert, queue->process);
queue              17 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 	struct vchiu_queue queue;
queue              51 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		if (vchiu_queue_is_empty(&service->queue))
queue              54 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 	header = vchiu_queue_peek(&service->queue);
queue              79 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 	header = vchiu_queue_pop(&service->queue);
queue             336 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		if (vchiu_queue_is_empty(&service->queue))
queue             339 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 	header = vchiu_queue_pop(&service->queue);
queue             410 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		if (vchiu_queue_is_empty(&service->queue))
queue             413 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 	header = vchiu_queue_pop(&service->queue);
queue             525 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		vchiu_queue_push(&service->queue, header);
queue             582 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		if (vchiu_queue_init(&service->queue, 64)) {
queue             597 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c 		vchiu_queue_delete(&service->queue);
queue              11 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c int vchiu_queue_init(struct vchiu_queue *queue, int size)
queue              15 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->size = size;
queue              16 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->read = 0;
queue              17 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->write = 0;
queue              18 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->initialized = 1;
queue              20 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	init_completion(&queue->pop);
queue              21 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	init_completion(&queue->push);
queue              23 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->storage = kcalloc(size, sizeof(struct vchiq_header *),
queue              25 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	if (!queue->storage) {
queue              26 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 		vchiu_queue_delete(queue);
queue              32 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c void vchiu_queue_delete(struct vchiu_queue *queue)
queue              34 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	kfree(queue->storage);
queue              37 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c int vchiu_queue_is_empty(struct vchiu_queue *queue)
queue              39 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	return queue->read == queue->write;
queue              42 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c void vchiu_queue_push(struct vchiu_queue *queue, struct vchiq_header *header)
queue              44 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	if (!queue->initialized)
queue              47 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	while (queue->write == queue->read + queue->size) {
queue              48 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 		if (wait_for_completion_interruptible(&queue->pop))
queue              52 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->storage[queue->write & (queue->size - 1)] = header;
queue              53 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->write++;
queue              55 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	complete(&queue->push);
queue              58 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c struct vchiq_header *vchiu_queue_peek(struct vchiu_queue *queue)
queue              60 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	while (queue->write == queue->read) {
queue              61 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 		if (wait_for_completion_interruptible(&queue->push))
queue              65 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	complete(&queue->push); // We haven't removed anything from the queue.
queue              67 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	return queue->storage[queue->read & (queue->size - 1)];
queue              70 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c struct vchiq_header *vchiu_queue_pop(struct vchiu_queue *queue)
queue              74 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	while (queue->write == queue->read) {
queue              75 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 		if (wait_for_completion_interruptible(&queue->push))
queue              79 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	header = queue->storage[queue->read & (queue->size - 1)];
queue              80 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	queue->read++;
queue              82 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c 	complete(&queue->pop);
queue              39 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern int  vchiu_queue_init(struct vchiu_queue *queue, int size);
queue              40 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern void vchiu_queue_delete(struct vchiu_queue *queue);
queue              42 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern int vchiu_queue_is_empty(struct vchiu_queue *queue);
queue              44 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern void vchiu_queue_push(struct vchiu_queue *queue,
queue              47 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern struct vchiq_header *vchiu_queue_peek(struct vchiu_queue *queue);
queue              48 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h extern struct vchiq_header *vchiu_queue_pop(struct vchiu_queue *queue);
queue             996 drivers/target/target_core_user.c 		goto queue;
queue            1016 drivers/target/target_core_user.c 		goto queue;
queue            1100 drivers/target/target_core_user.c queue:
queue            1198 drivers/target/target_core_user.c static void tcmu_set_next_deadline(struct list_head *queue,
queue            1204 drivers/target/target_core_user.c 	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
queue             198 drivers/thunderbolt/nhi.c 	list_for_each_entry_safe(frame, n, &ring->queue, list) {
queue             241 drivers/thunderbolt/nhi.c 		list_splice_tail_init(&ring->queue, &done);
queue             285 drivers/thunderbolt/nhi.c 		list_add_tail(&frame->list, &ring->queue);
queue             508 drivers/thunderbolt/nhi.c 	INIT_LIST_HEAD(&ring->queue);
queue             295 drivers/tty/ipwireless/hardware.c 	struct list_head queue;
queue             336 drivers/tty/ipwireless/hardware.c 	struct list_head queue;
queue             510 drivers/tty/ipwireless/hardware.c 		list_add(&packet->queue, &hw->tx_queue[0]);
queue             566 drivers/tty/ipwireless/hardware.c 					struct ipw_rx_packet, queue);
queue             569 drivers/tty/ipwireless/hardware.c 			list_del(&packet->queue);
queue             615 drivers/tty/ipwireless/hardware.c 		list_add(&packet->queue, &hw->rx_pool);
queue             690 drivers/tty/ipwireless/hardware.c 		list_add_tail(&packet->queue, &hw->rx_queue);
queue             713 drivers/tty/ipwireless/hardware.c 					struct ipw_rx_packet, queue);
queue             717 drivers/tty/ipwireless/hardware.c 		list_del(&packet->queue);
queue             971 drivers/tty/ipwireless/hardware.c 						queue);
queue             974 drivers/tty/ipwireless/hardware.c 				list_del(&packet->queue);
queue            1243 drivers/tty/ipwireless/hardware.c 	list_add_tail(&packet->queue, &hw->tx_queue[priority]);
queue            1262 drivers/tty/ipwireless/hardware.c 	INIT_LIST_HEAD(&packet->queue);
queue            1285 drivers/tty/ipwireless/hardware.c 	INIT_LIST_HEAD(&packet->header.queue);
queue            1745 drivers/tty/ipwireless/hardware.c 		list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {
queue            1746 drivers/tty/ipwireless/hardware.c 			list_del(&tp->queue);
queue            1750 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
queue            1751 drivers/tty/ipwireless/hardware.c 		list_del(&rp->queue);
queue            1755 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
queue            1756 drivers/tty/ipwireless/hardware.c 		list_del(&rp->queue);
queue             742 drivers/tty/serial/ifx6x60.c 		INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
queue              30 drivers/usb/c67x00/c67x00-sched.c 	struct list_head queue;
queue             253 drivers/usb/c67x00/c67x00-sched.c 	INIT_LIST_HEAD(&ep_data->queue);
queue             292 drivers/usb/c67x00/c67x00-sched.c 	if (!list_empty(&ep_data->queue))
queue             296 drivers/usb/c67x00/c67x00-sched.c 	list_del(&ep_data->queue);
queue             401 drivers/usb/c67x00/c67x00-sched.c 		if (list_empty(&urbp->ep_data->queue))
queue             407 drivers/usb/c67x00/c67x00-sched.c 			last_urb = list_entry(urbp->ep_data->queue.prev,
queue             420 drivers/usb/c67x00/c67x00-sched.c 	list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
queue             784 drivers/usb/c67x00/c67x00-sched.c 		if (!list_empty(&ep_data->queue)) {
queue             787 drivers/usb/c67x00/c67x00-sched.c 			urb = list_entry(ep_data->queue.next,
queue             800 drivers/usb/cdns3/ep0.c 	.queue = cdns3_gadget_ep0_queue,
queue            2230 drivers/usb/cdns3/gadget.c 	.queue = cdns3_gadget_ep_queue,
queue              93 drivers/usb/chipidea/ci.h 		struct list_head	queue;
queue             168 drivers/usb/chipidea/debug.c 		list_for_each_entry(req, &ci->ci_hw_ep[i].qh.queue, queue) {
queue             470 drivers/usb/chipidea/udc.c 	if (!list_empty(&hwep->qh.queue)) {
queue             477 drivers/usb/chipidea/udc.c 		hwreqprev = list_entry(hwep->qh.queue.prev,
queue             478 drivers/usb/chipidea/udc.c 				struct ci_hw_req, queue);
queue             633 drivers/usb/chipidea/udc.c 	while (!list_empty(&hwep->qh.queue)) {
queue             636 drivers/usb/chipidea/udc.c 		struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
queue             637 drivers/usb/chipidea/udc.c 						     struct ci_hw_req, queue);
queue             646 drivers/usb/chipidea/udc.c 		list_del_init(&hwreq->queue);
queue             677 drivers/usb/chipidea/udc.c 		!list_empty(&hwep->qh.queue) &&
queue             816 drivers/usb/chipidea/udc.c 		if (!list_empty(&hwep->qh.queue)) {
queue             830 drivers/usb/chipidea/udc.c 	if (!list_empty(&hwreq->queue)) {
queue             844 drivers/usb/chipidea/udc.c 		list_add_tail(&hwreq->queue, &hwep->qh.queue);
queue             978 drivers/usb/chipidea/udc.c 	list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
queue             979 drivers/usb/chipidea/udc.c 			queue) {
queue             983 drivers/usb/chipidea/udc.c 		list_del_init(&hwreq->queue);
queue            1241 drivers/usb/chipidea/udc.c 	if (!list_empty(&hwep->qh.queue)) {
queue            1343 drivers/usb/chipidea/udc.c 		INIT_LIST_HEAD(&hwreq->queue);
queue            1364 drivers/usb/chipidea/udc.c 	} else if (!list_empty(&hwreq->queue)) {
queue            1421 drivers/usb/chipidea/udc.c 		hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
queue            1422 drivers/usb/chipidea/udc.c 		list_empty(&hwep->qh.queue))
queue            1436 drivers/usb/chipidea/udc.c 	list_del_init(&hwreq->queue);
queue            1517 drivers/usb/chipidea/udc.c 	.queue	       = ep_queue,
queue            1715 drivers/usb/chipidea/udc.c 			INIT_LIST_HEAD(&hwep->qh.queue);
queue              77 drivers/usb/chipidea/udc.h 	struct list_head	queue;
queue             151 drivers/usb/dwc2/core.h 	struct list_head        queue;
queue             194 drivers/usb/dwc2/core.h 	struct list_head        queue;
queue             259 drivers/usb/dwc2/debugfs.c 		   ep->queue.next, ep->queue.prev);
queue             263 drivers/usb/dwc2/debugfs.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             392 drivers/usb/dwc2/gadget.c 	INIT_LIST_HEAD(&req->queue);
queue             963 drivers/usb/dwc2/gadget.c 	if (list_empty(&hs_ep->queue)) {
queue             978 drivers/usb/dwc2/gadget.c 	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
queue            1385 drivers/usb/dwc2/gadget.c 	INIT_LIST_HEAD(&hs_req->queue);
queue            1431 drivers/usb/dwc2/gadget.c 	first = list_empty(&hs_ep->queue);
queue            1432 drivers/usb/dwc2/gadget.c 	list_add_tail(&hs_req->queue, &hs_ep->queue);
queue            1694 drivers/usb/dwc2/gadget.c 	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
queue            1695 drivers/usb/dwc2/gadget.c 					queue);
queue            1714 drivers/usb/dwc2/gadget.c 	if (!list_empty(&hs_ep->queue)) {
queue            1823 drivers/usb/dwc2/gadget.c 					list_del_init(&hs_req->queue);
queue            2015 drivers/usb/dwc2/gadget.c 	if (!list_empty(&hs_req->queue)) {
queue            2109 drivers/usb/dwc2/gadget.c 	list_del_init(&hs_req->queue);
queue            2826 drivers/usb/dwc2/gadget.c 	if (list_empty(&hs_ep->queue)) {
queue            3237 drivers/usb/dwc2/gadget.c 	while (!list_empty(&ep->queue)) {
queue            4219 drivers/usb/dwc2/gadget.c 	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
queue            4293 drivers/usb/dwc2/gadget.c 	if (!now && value && !list_empty(&hs_ep->queue)) {
queue            4360 drivers/usb/dwc2/gadget.c 	.queue		= dwc2_hsotg_ep_queue_lock,
queue            4646 drivers/usb/dwc2/gadget.c 	INIT_LIST_HEAD(&hs_ep->queue);
queue            1694 drivers/usb/dwc3/gadget.c 	.queue		= dwc3_gadget_ep0_queue,
queue            1705 drivers/usb/dwc3/gadget.c 	.queue		= dwc3_gadget_ep_queue,
queue             360 drivers/usb/gadget/function/u_serial.c 	struct list_head	*queue = &port->read_queue;
queue             367 drivers/usb/gadget/function/u_serial.c 	while (!list_empty(queue)) {
queue             370 drivers/usb/gadget/function/u_serial.c 		req = list_first_entry(queue, struct usb_request, list);
queue             438 drivers/usb/gadget/function/u_serial.c 	if (!list_empty(queue) && !tty_throttled(tty))
queue             102 drivers/usb/gadget/function/uvc.h 	struct uvc_video_queue queue;
queue              44 drivers/usb/gadget/function/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
queue              45 drivers/usb/gadget/function/uvc_queue.c 	struct uvc_video *video = container_of(queue, struct uvc_video, queue);
queue              59 drivers/usb/gadget/function/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue              69 drivers/usb/gadget/function/uvc_queue.c 	if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
queue              85 drivers/usb/gadget/function/uvc_queue.c 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
queue              90 drivers/usb/gadget/function/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue              92 drivers/usb/gadget/function/uvc_queue.c 	if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
queue              93 drivers/usb/gadget/function/uvc_queue.c 		list_add_tail(&buf->queue, &queue->irqqueue);
queue             102 drivers/usb/gadget/function/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             113 drivers/usb/gadget/function/uvc_queue.c int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue             118 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.type = type;
queue             119 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
queue             120 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.drv_priv = queue;
queue             121 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
queue             122 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.ops = &uvc_queue_qops;
queue             123 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.lock = lock;
queue             124 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.mem_ops = &vb2_vmalloc_memops;
queue             125 drivers/usb/gadget/function/uvc_queue.c 	queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
queue             127 drivers/usb/gadget/function/uvc_queue.c 	ret = vb2_queue_init(&queue->queue);
queue             131 drivers/usb/gadget/function/uvc_queue.c 	spin_lock_init(&queue->irqlock);
queue             132 drivers/usb/gadget/function/uvc_queue.c 	INIT_LIST_HEAD(&queue->irqqueue);
queue             133 drivers/usb/gadget/function/uvc_queue.c 	queue->flags = 0;
queue             141 drivers/usb/gadget/function/uvc_queue.c void uvcg_free_buffers(struct uvc_video_queue *queue)
queue             143 drivers/usb/gadget/function/uvc_queue.c 	vb2_queue_release(&queue->queue);
queue             149 drivers/usb/gadget/function/uvc_queue.c int uvcg_alloc_buffers(struct uvc_video_queue *queue,
queue             154 drivers/usb/gadget/function/uvc_queue.c 	ret = vb2_reqbufs(&queue->queue, rb);
queue             159 drivers/usb/gadget/function/uvc_queue.c int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
queue             161 drivers/usb/gadget/function/uvc_queue.c 	return vb2_querybuf(&queue->queue, buf);
queue             164 drivers/usb/gadget/function/uvc_queue.c int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
queue             169 drivers/usb/gadget/function/uvc_queue.c 	ret = vb2_qbuf(&queue->queue, NULL, buf);
queue             173 drivers/usb/gadget/function/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             174 drivers/usb/gadget/function/uvc_queue.c 	ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
queue             175 drivers/usb/gadget/function/uvc_queue.c 	queue->flags &= ~UVC_QUEUE_PAUSED;
queue             176 drivers/usb/gadget/function/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             184 drivers/usb/gadget/function/uvc_queue.c int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
queue             187 drivers/usb/gadget/function/uvc_queue.c 	return vb2_dqbuf(&queue->queue, buf, nonblocking);
queue             196 drivers/usb/gadget/function/uvc_queue.c __poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
queue             199 drivers/usb/gadget/function/uvc_queue.c 	return vb2_poll(&queue->queue, file, wait);
queue             202 drivers/usb/gadget/function/uvc_queue.c int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
queue             204 drivers/usb/gadget/function/uvc_queue.c 	return vb2_mmap(&queue->queue, vma);
queue             213 drivers/usb/gadget/function/uvc_queue.c unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
queue             216 drivers/usb/gadget/function/uvc_queue.c 	return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
queue             232 drivers/usb/gadget/function/uvc_queue.c void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
queue             237 drivers/usb/gadget/function/uvc_queue.c 	spin_lock_irqsave(&queue->irqlock, flags);
queue             238 drivers/usb/gadget/function/uvc_queue.c 	while (!list_empty(&queue->irqqueue)) {
queue             239 drivers/usb/gadget/function/uvc_queue.c 		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue             240 drivers/usb/gadget/function/uvc_queue.c 				       queue);
queue             241 drivers/usb/gadget/function/uvc_queue.c 		list_del(&buf->queue);
queue             252 drivers/usb/gadget/function/uvc_queue.c 		queue->flags |= UVC_QUEUE_DISCONNECTED;
queue             253 drivers/usb/gadget/function/uvc_queue.c 	spin_unlock_irqrestore(&queue->irqlock, flags);
queue             273 drivers/usb/gadget/function/uvc_queue.c int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
queue             279 drivers/usb/gadget/function/uvc_queue.c 		ret = vb2_streamon(&queue->queue, queue->queue.type);
queue             283 drivers/usb/gadget/function/uvc_queue.c 		queue->sequence = 0;
queue             284 drivers/usb/gadget/function/uvc_queue.c 		queue->buf_used = 0;
queue             286 drivers/usb/gadget/function/uvc_queue.c 		ret = vb2_streamoff(&queue->queue, queue->queue.type);
queue             290 drivers/usb/gadget/function/uvc_queue.c 		spin_lock_irqsave(&queue->irqlock, flags);
queue             291 drivers/usb/gadget/function/uvc_queue.c 		INIT_LIST_HEAD(&queue->irqqueue);
queue             299 drivers/usb/gadget/function/uvc_queue.c 		queue->flags &= ~UVC_QUEUE_DISCONNECTED;
queue             300 drivers/usb/gadget/function/uvc_queue.c 		spin_unlock_irqrestore(&queue->irqlock, flags);
queue             307 drivers/usb/gadget/function/uvc_queue.c struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
queue             312 drivers/usb/gadget/function/uvc_queue.c 	if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
queue             319 drivers/usb/gadget/function/uvc_queue.c 	list_del(&buf->queue);
queue             320 drivers/usb/gadget/function/uvc_queue.c 	if (!list_empty(&queue->irqqueue))
queue             321 drivers/usb/gadget/function/uvc_queue.c 		nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue             322 drivers/usb/gadget/function/uvc_queue.c 					   queue);
queue             327 drivers/usb/gadget/function/uvc_queue.c 	buf->buf.sequence = queue->sequence++;
queue             336 drivers/usb/gadget/function/uvc_queue.c struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
queue             340 drivers/usb/gadget/function/uvc_queue.c 	if (!list_empty(&queue->irqqueue))
queue             341 drivers/usb/gadget/function/uvc_queue.c 		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue             342 drivers/usb/gadget/function/uvc_queue.c 				       queue);
queue             344 drivers/usb/gadget/function/uvc_queue.c 		queue->flags |= UVC_QUEUE_PAUSED;
queue              33 drivers/usb/gadget/function/uvc_queue.h 	struct list_head queue;
queue              46 drivers/usb/gadget/function/uvc_queue.h 	struct vb2_queue queue;
queue              57 drivers/usb/gadget/function/uvc_queue.h static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
queue              59 drivers/usb/gadget/function/uvc_queue.h 	return vb2_is_streaming(&queue->queue);
queue              62 drivers/usb/gadget/function/uvc_queue.h int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
queue              65 drivers/usb/gadget/function/uvc_queue.h void uvcg_free_buffers(struct uvc_video_queue *queue);
queue              67 drivers/usb/gadget/function/uvc_queue.h int uvcg_alloc_buffers(struct uvc_video_queue *queue,
queue              70 drivers/usb/gadget/function/uvc_queue.h int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
queue              72 drivers/usb/gadget/function/uvc_queue.h int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
queue              74 drivers/usb/gadget/function/uvc_queue.h int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
queue              77 drivers/usb/gadget/function/uvc_queue.h __poll_t uvcg_queue_poll(struct uvc_video_queue *queue,
queue              80 drivers/usb/gadget/function/uvc_queue.h int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
queue              83 drivers/usb/gadget/function/uvc_queue.h unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
queue              87 drivers/usb/gadget/function/uvc_queue.h void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
queue              89 drivers/usb/gadget/function/uvc_queue.h int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
queue              91 drivers/usb/gadget/function/uvc_queue.h struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
queue              94 drivers/usb/gadget/function/uvc_queue.h struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
queue             144 drivers/usb/gadget/function/uvc_v4l2.c 	if (b->type != video->queue.queue.type)
queue             147 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_alloc_buffers(&video->queue, b);
queue             157 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_query_buffer(&video->queue, b);
queue             168 drivers/usb/gadget/function/uvc_v4l2.c 	ret = uvcg_queue_buffer(&video->queue, b);
queue             182 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK);
queue             193 drivers/usb/gadget/function/uvc_v4l2.c 	if (type != video->queue.queue.type)
queue             218 drivers/usb/gadget/function/uvc_v4l2.c 	if (type != video->queue.queue.type)
queue             309 drivers/usb/gadget/function/uvc_v4l2.c 	uvcg_free_buffers(&video->queue);
queue             326 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_queue_mmap(&uvc->video.queue, vma);
queue             335 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_queue_poll(&uvc->video.queue, file, wait);
queue             346 drivers/usb/gadget/function/uvc_v4l2.c 	return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff);
queue              33 drivers/usb/gadget/function/uvc_video.c 	if (buf->bytesused - video->queue.buf_used <= len - 2)
queue              43 drivers/usb/gadget/function/uvc_video.c 	struct uvc_video_queue *queue = &video->queue;
queue              48 drivers/usb/gadget/function/uvc_video.c 	mem = buf->mem + queue->buf_used;
queue              49 drivers/usb/gadget/function/uvc_video.c 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
queue              52 drivers/usb/gadget/function/uvc_video.c 	queue->buf_used += nbytes;
queue              83 drivers/usb/gadget/function/uvc_video.c 	if (buf->bytesused == video->queue.buf_used) {
queue              84 drivers/usb/gadget/function/uvc_video.c 		video->queue.buf_used = 0;
queue              86 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_next_buffer(&video->queue, buf);
queue              93 drivers/usb/gadget/function/uvc_video.c 	    buf->bytesused == video->queue.buf_used)
queue             116 drivers/usb/gadget/function/uvc_video.c 	if (buf->bytesused == video->queue.buf_used) {
queue             117 drivers/usb/gadget/function/uvc_video.c 		video->queue.buf_used = 0;
queue             119 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_next_buffer(&video->queue, buf);
queue             179 drivers/usb/gadget/function/uvc_video.c 	struct uvc_video_queue *queue = &video->queue;
queue             190 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_cancel(queue, 1);
queue             197 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_cancel(queue, 0);
queue             201 drivers/usb/gadget/function/uvc_video.c 	spin_lock_irqsave(&video->queue.irqlock, flags);
queue             202 drivers/usb/gadget/function/uvc_video.c 	buf = uvcg_queue_head(&video->queue);
queue             204 drivers/usb/gadget/function/uvc_video.c 		spin_unlock_irqrestore(&video->queue.irqlock, flags);
queue             211 drivers/usb/gadget/function/uvc_video.c 	spin_unlock_irqrestore(&video->queue.irqlock, flags);
queue             214 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_cancel(queue, 0);
queue             299 drivers/usb/gadget/function/uvc_video.c 	struct uvc_video_queue *queue = &video->queue;
queue             326 drivers/usb/gadget/function/uvc_video.c 		spin_lock_irqsave(&queue->irqlock, flags);
queue             327 drivers/usb/gadget/function/uvc_video.c 		buf = uvcg_queue_head(queue);
queue             329 drivers/usb/gadget/function/uvc_video.c 			spin_unlock_irqrestore(&queue->irqlock, flags);
queue             337 drivers/usb/gadget/function/uvc_video.c 		spin_unlock_irqrestore(&queue->irqlock, flags);
queue             340 drivers/usb/gadget/function/uvc_video.c 			uvcg_queue_cancel(queue, 0);
queue             371 drivers/usb/gadget/function/uvc_video.c 		uvcg_queue_enable(&video->queue, 0);
queue             375 drivers/usb/gadget/function/uvc_video.c 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
queue             406 drivers/usb/gadget/function/uvc_video.c 	uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT,
queue             489 drivers/usb/gadget/udc/amd5536udc.h 	struct list_head		queue;
queue             520 drivers/usb/gadget/udc/amd5536udc.h 	struct list_head		queue;
queue              42 drivers/usb/gadget/udc/aspeed-vhub/core.c 	list_del_init(&req->queue);
queue              71 drivers/usb/gadget/udc/aspeed-vhub/core.c 	while (!list_empty(&ep->queue)) {
queue              72 drivers/usb/gadget/udc/aspeed-vhub/core.c 		req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
queue              60 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
queue             284 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
queue             405 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	if (!list_empty(&ep->queue) ||
queue             410 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 		       list_empty(&ep->queue), ep->ep0.state);
queue             416 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	list_add_tail(&req->queue, &ep->queue);
queue             448 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
queue             472 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	.queue		= ast_vhub_ep0_queue,
queue             493 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	INIT_LIST_HEAD(&ep->queue);
queue             502 drivers/usb/gadget/udc/aspeed-vhub/ep0.c 	INIT_LIST_HEAD(&ep->ep0.req.queue);
queue              98 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
queue             140 drivers/usb/gadget/udc/aspeed-vhub/epn.c 		req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
queue             141 drivers/usb/gadget/udc/aspeed-vhub/epn.c 					       queue);
queue             262 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
queue             313 drivers/usb/gadget/udc/aspeed-vhub/epn.c 			req = list_first_entry_or_null(&ep->queue,
queue             315 drivers/usb/gadget/udc/aspeed-vhub/epn.c 						       queue);
queue             402 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	empty = list_empty(&ep->queue);
queue             405 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	list_add_tail(&req->queue, &ep->queue);
queue             478 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	list_for_each_entry (req, &ep->queue, queue) {
queue             533 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
queue             786 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	.queue		= ast_vhub_epn_queue,
queue             817 drivers/usb/gadget/udc/aspeed-vhub/epn.c 	INIT_LIST_HEAD(&ep->queue);
queue             236 drivers/usb/gadget/udc/aspeed-vhub/vhub.h 	struct list_head	queue;
queue             272 drivers/usb/gadget/udc/aspeed-vhub/vhub.h 	struct list_head	queue;
queue             150 drivers/usb/gadget/udc/at91_udc.c 	if (list_empty (&ep->queue))
queue             153 drivers/usb/gadget/udc/at91_udc.c 	else list_for_each_entry (req, &ep->queue, queue) {
queue             264 drivers/usb/gadget/udc/at91_udc.c 	list_del_init(&req->queue);
queue             279 drivers/usb/gadget/udc/at91_udc.c 	if (list_empty(&ep->queue) && ep->int_mask != (1 << 0))
queue             459 drivers/usb/gadget/udc/at91_udc.c 	if (list_empty(&ep->queue))
queue             463 drivers/usb/gadget/udc/at91_udc.c 	while (!list_empty(&ep->queue)) {
queue             464 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep->queue.next, struct at91_request, queue);
queue             591 drivers/usb/gadget/udc/at91_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             600 drivers/usb/gadget/udc/at91_udc.c 	BUG_ON(!list_empty(&req->queue));
queue             617 drivers/usb/gadget/udc/at91_udc.c 			|| !_req->buf || !list_empty(&req->queue)) {
queue             640 drivers/usb/gadget/udc/at91_udc.c 	if (list_empty(&ep->queue) && !ep->stopped) {
queue             696 drivers/usb/gadget/udc/at91_udc.c 		list_add_tail (&req->queue, &ep->queue);
queue             720 drivers/usb/gadget/udc/at91_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
queue             756 drivers/usb/gadget/udc/at91_udc.c 	if (ep->is_in && (!list_empty(&ep->queue) || (csr >> 16) != 0))
queue             781 drivers/usb/gadget/udc/at91_udc.c 	.queue		= at91_ep_queue,
queue             843 drivers/usb/gadget/udc/at91_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue            1009 drivers/usb/gadget/udc/at91_udc.c 	if (!list_empty(&ep->queue))
queue            1010 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep->queue.next,
queue            1011 drivers/usb/gadget/udc/at91_udc.c 			struct at91_request, queue);
queue            1235 drivers/usb/gadget/udc/at91_udc.c 		if (!list_empty(&ep->queue))
queue            1294 drivers/usb/gadget/udc/at91_udc.c 	if (list_empty(&ep0->queue))
queue            1297 drivers/usb/gadget/udc/at91_udc.c 		req = list_entry(ep0->queue.next, struct at91_request, queue);
queue              92 drivers/usb/gadget/udc/at91_udc.h 	struct list_head		queue;
queue             156 drivers/usb/gadget/udc/at91_udc.h 	struct list_head		queue;
queue              48 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue              52 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_add_tail(&req_copy->queue, queue_data);
queue              61 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, req_copy, queue_data, queue) {
queue              62 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
queue              86 drivers/usb/gadget/udc/atmel_usba_udc.c 	struct list_head *queue = file->private_data;
queue              95 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, queue, queue) {
queue             110 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
queue             131 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
queue             132 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del(&req->queue);
queue             466 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (list_empty(&ep->queue)) {
queue             471 drivers/usb/gadget/udc/atmel_usba_udc.c 	req = list_entry(ep->queue.next, struct usba_request, queue);
queue             497 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (list_empty(&ep->queue)) {
queue             501 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next,
queue             502 drivers/usb/gadget/udc/atmel_usba_udc.c 				 struct usba_request, queue);
queue             522 drivers/usb/gadget/udc/atmel_usba_udc.c 			list_del_init(&req->queue);
queue             544 drivers/usb/gadget/udc/atmel_usba_udc.c 	WARN_ON(!list_empty(&req->queue));
queue             566 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, list, queue) {
queue             567 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
queue             689 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_splice_init(&ep->queue, &req_list);
queue             716 drivers/usb/gadget/udc/atmel_usba_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             769 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (list_empty(&ep->queue))
queue             772 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue             810 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue             877 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             892 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (ep->queue.next == &req->queue) {
queue             911 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_del_init(&req->queue);
queue             949 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (!list_empty(&ep->queue)
queue             987 drivers/usb/gadget/udc/atmel_usba_udc.c 	.queue		= usba_ep_queue,
queue            1141 drivers/usb/gadget/udc/atmel_usba_udc.c 	list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
queue            1142 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
queue            1442 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (!list_empty(&ep->queue))
queue            1443 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next,
queue            1444 drivers/usb/gadget/udc/atmel_usba_udc.c 				 struct usba_request, queue);
queue            1476 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
queue            1506 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
queue            1556 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
queue            1630 drivers/usb/gadget/udc/atmel_usba_udc.c 		if (list_empty(&ep->queue)) {
queue            1636 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next, struct usba_request, queue);
queue            1644 drivers/usb/gadget/udc/atmel_usba_udc.c 			list_del_init(&req->queue);
queue            1654 drivers/usb/gadget/udc/atmel_usba_udc.c 				list_del_init(&req->queue);
queue            1695 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (list_empty(&ep->queue))
queue            1700 drivers/usb/gadget/udc/atmel_usba_udc.c 		req = list_entry(ep->queue.next, struct usba_request, queue);
queue            1703 drivers/usb/gadget/udc/atmel_usba_udc.c 		list_del_init(&req->queue);
queue            2158 drivers/usb/gadget/udc/atmel_usba_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             277 drivers/usb/gadget/udc/atmel_usba_udc.h 	struct list_head			queue;
queue             295 drivers/usb/gadget/udc/atmel_usba_udc.h 	struct list_head			queue;
queue             242 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct list_head		queue;
queue             255 drivers/usb/gadget/udc/bcm63xx_udc.c 	struct list_head		queue;		/* ep's requests */
queue             773 drivers/usb/gadget/udc/bcm63xx_udc.c 		INIT_LIST_HEAD(&bep->queue);
queue             966 drivers/usb/gadget/udc/bcm63xx_udc.c 		INIT_LIST_HEAD(&bep->queue);
queue            1053 drivers/usb/gadget/udc/bcm63xx_udc.c 	BUG_ON(!list_empty(&bep->queue));
queue            1092 drivers/usb/gadget/udc/bcm63xx_udc.c 	if (!list_empty(&bep->queue)) {
queue            1093 drivers/usb/gadget/udc/bcm63xx_udc.c 		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
queue            1096 drivers/usb/gadget/udc/bcm63xx_udc.c 			list_del(&breq->queue);
queue            1186 drivers/usb/gadget/udc/bcm63xx_udc.c 		list_add_tail(&breq->queue, &bep->queue);
queue            1187 drivers/usb/gadget/udc/bcm63xx_udc.c 		if (list_is_singular(&bep->queue))
queue            1214 drivers/usb/gadget/udc/bcm63xx_udc.c 	if (list_empty(&bep->queue)) {
queue            1219 drivers/usb/gadget/udc/bcm63xx_udc.c 	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
queue            1224 drivers/usb/gadget/udc/bcm63xx_udc.c 		list_del(&breq->queue);
queue            1226 drivers/usb/gadget/udc/bcm63xx_udc.c 		if (!list_empty(&bep->queue)) {
queue            1229 drivers/usb/gadget/udc/bcm63xx_udc.c 			next = list_first_entry(&bep->queue,
queue            1230 drivers/usb/gadget/udc/bcm63xx_udc.c 				struct bcm63xx_req, queue);
queue            1234 drivers/usb/gadget/udc/bcm63xx_udc.c 		list_del(&breq->queue);
queue            1294 drivers/usb/gadget/udc/bcm63xx_udc.c 	.queue		= bcm63xx_udc_queue,
queue            2080 drivers/usb/gadget/udc/bcm63xx_udc.c 	} else if (!list_empty(&bep->queue)) {
queue            2081 drivers/usb/gadget/udc/bcm63xx_udc.c 		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
queue            2089 drivers/usb/gadget/udc/bcm63xx_udc.c 				list_del(&breq->queue);
queue            2093 drivers/usb/gadget/udc/bcm63xx_udc.c 				if (!list_empty(&bep->queue)) {
queue            2096 drivers/usb/gadget/udc/bcm63xx_udc.c 					next = list_first_entry(&bep->queue,
queue            2097 drivers/usb/gadget/udc/bcm63xx_udc.c 						struct bcm63xx_req, queue);
queue            2212 drivers/usb/gadget/udc/bcm63xx_udc.c 			list_for_each(pos, &iudma->bep->queue)
queue             335 drivers/usb/gadget/udc/bdc/bdc.h 	struct list_head	queue;
queue             352 drivers/usb/gadget/udc/bdc/bdc.h 	struct list_head queue;
queue             530 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_add_tail(&req->queue, &ep->queue);
queue             547 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_del(&req->queue);
queue             575 drivers/usb/gadget/udc/bdc/bdc_ep.c 	while (!list_empty(&ep->queue)) {
queue             576 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req = list_entry(ep->queue.next, struct bdc_req,
queue             577 drivers/usb/gadget/udc/bdc/bdc_ep.c 				queue);
queue             826 drivers/usb/gadget/udc/bdc/bdc_ep.c 	first_req = list_first_entry(&ep->queue, struct bdc_req,
queue             827 drivers/usb/gadget/udc/bdc/bdc_ep.c 			queue);
queue             972 drivers/usb/gadget/udc/bdc/bdc_ep.c 	if (unlikely(list_empty(&ep->queue))) {
queue             976 drivers/usb/gadget/udc/bdc/bdc_ep.c 	req = list_entry(ep->queue.next, struct bdc_req,
queue             977 drivers/usb/gadget/udc/bdc/bdc_ep.c 			queue);
queue            1096 drivers/usb/gadget/udc/bdc/bdc_ep.c 	while (!list_empty(&ep->queue)) {
queue            1097 drivers/usb/gadget/udc/bdc/bdc_ep.c 		req = list_entry(ep->queue.next, struct bdc_req,
queue            1098 drivers/usb/gadget/udc/bdc/bdc_ep.c 				queue);
queue            1773 drivers/usb/gadget/udc/bdc/bdc_ep.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1809 drivers/usb/gadget/udc/bdc/bdc_ep.c 	else if (!list_empty(&ep->queue))
queue            1929 drivers/usb/gadget/udc/bdc/bdc_ep.c 	.queue = bdc_gadget_ep_queue,
queue            1985 drivers/usb/gadget/udc/bdc/bdc_ep.c 	INIT_LIST_HEAD(&ep->queue);
queue             281 drivers/usb/gadget/udc/core.c 	ret = ep->ops->queue(ep, req, gfp_flags);
queue              83 drivers/usb/gadget/udc/dummy_hcd.c 	struct list_head		queue;
queue              96 drivers/usb/gadget/udc/dummy_hcd.c 	struct list_head		queue;		/* ep's requests */
queue             331 drivers/usb/gadget/udc/dummy_hcd.c 	while (!list_empty(&ep->queue)) {
queue             334 drivers/usb/gadget/udc/dummy_hcd.c 		req = list_entry(ep->queue.next, struct dummy_request, queue);
queue             335 drivers/usb/gadget/udc/dummy_hcd.c 		list_del_init(&req->queue);
queue             665 drivers/usb/gadget/udc/dummy_hcd.c 	INIT_LIST_HEAD(&req->queue);
queue             679 drivers/usb/gadget/udc/dummy_hcd.c 	WARN_ON(!list_empty(&req->queue));
queue             697 drivers/usb/gadget/udc/dummy_hcd.c 	if (!_req || !list_empty(&req->queue) || !_req->complete)
queue             719 drivers/usb/gadget/udc/dummy_hcd.c 			list_empty(&dum->fifo_req.queue) &&
queue             720 drivers/usb/gadget/udc/dummy_hcd.c 			list_empty(&ep->queue) &&
queue             729 drivers/usb/gadget/udc/dummy_hcd.c 		list_add_tail(&req->queue, &ep->queue);
queue             736 drivers/usb/gadget/udc/dummy_hcd.c 		list_add_tail(&req->queue, &ep->queue);
queue             763 drivers/usb/gadget/udc/dummy_hcd.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             765 drivers/usb/gadget/udc/dummy_hcd.c 			list_del_init(&req->queue);
queue             798 drivers/usb/gadget/udc/dummy_hcd.c 			!list_empty(&ep->queue))
queue             829 drivers/usb/gadget/udc/dummy_hcd.c 	.queue		= dummy_queue,
queue            1043 drivers/usb/gadget/udc/dummy_hcd.c 		INIT_LIST_HEAD(&ep->queue);
queue            1048 drivers/usb/gadget/udc/dummy_hcd.c 	INIT_LIST_HEAD(&dum->fifo_req.queue);
queue            1389 drivers/usb/gadget/udc/dummy_hcd.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1493 drivers/usb/gadget/udc/dummy_hcd.c 			list_del_init(&req->queue);
queue            1868 drivers/usb/gadget/udc/dummy_hcd.c 			list_for_each_entry(req, &ep->queue, queue) {
queue            1869 drivers/usb/gadget/udc/dummy_hcd.c 				list_del_init(&req->queue);
queue              61 drivers/usb/gadget/udc/fotg210-udc.c 	list_del_init(&req->queue);
queue              74 drivers/usb/gadget/udc/fotg210-udc.c 		if (list_empty(&ep->queue))
queue             219 drivers/usb/gadget/udc/fotg210-udc.c 	while (!list_empty(&ep->queue)) {
queue             220 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_entry(ep->queue.next,
queue             221 drivers/usb/gadget/udc/fotg210-udc.c 			struct fotg210_request, queue);
queue             239 drivers/usb/gadget/udc/fotg210-udc.c 	INIT_LIST_HEAD(&req->queue);
queue             409 drivers/usb/gadget/udc/fotg210-udc.c 	if (list_empty(&ep->queue))
queue             412 drivers/usb/gadget/udc/fotg210-udc.c 	list_add_tail(&req->queue, &ep->queue);
queue             437 drivers/usb/gadget/udc/fotg210-udc.c 	if (!list_empty(&ep->queue))
queue             501 drivers/usb/gadget/udc/fotg210-udc.c 		if (!list_empty(&ep->queue))
queue             530 drivers/usb/gadget/udc/fotg210-udc.c 	.queue		= fotg210_ep_queue,
queue             794 drivers/usb/gadget/udc/fotg210-udc.c 	if (!list_empty(&ep->queue) && !ep->dir_in) {
queue             797 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_first_entry(&ep->queue,
queue             798 drivers/usb/gadget/udc/fotg210-udc.c 			struct fotg210_request, queue);
queue             814 drivers/usb/gadget/udc/fotg210-udc.c 	if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
queue             817 drivers/usb/gadget/udc/fotg210-udc.c 		req = list_entry(ep->queue.next,
queue             818 drivers/usb/gadget/udc/fotg210-udc.c 				struct fotg210_request, queue);
queue             840 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req = list_entry(ep->queue.next,
queue             841 drivers/usb/gadget/udc/fotg210-udc.c 					struct fotg210_request, queue);
queue             850 drivers/usb/gadget/udc/fotg210-udc.c 	struct fotg210_request *req = list_entry(ep->queue.next,
queue             851 drivers/usb/gadget/udc/fotg210-udc.c 						 struct fotg210_request, queue);
queue            1138 drivers/usb/gadget/udc/fotg210-udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             212 drivers/usb/gadget/udc/fotg210.h 	struct list_head	queue;
queue             219 drivers/usb/gadget/udc/fotg210.h 	struct list_head	queue;
queue              85 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_del_init(&req->queue);
queue             130 drivers/usb/gadget/udc/fsl_qe_udc.c 	while (!list_empty(&ep->queue)) {
queue             132 drivers/usb/gadget/udc/fsl_qe_udc.c 		req = list_entry(ep->queue.next, struct qe_req, queue);
queue             773 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
queue             900 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (list_empty(&ep->queue)) {
queue             903 drivers/usb/gadget/udc/fsl_qe_udc.c 		req = list_entry(ep->queue.next, struct qe_req, queue);
queue             915 drivers/usb/gadget/udc/fsl_qe_udc.c 				if (list_empty(&ep->queue) && ep->epnum != 0)
queue             953 drivers/usb/gadget/udc/fsl_qe_udc.c 			if (list_empty(&ep->queue)) {
queue            1046 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (list_empty(&ep->queue)) {
queue            1170 drivers/usb/gadget/udc/fsl_qe_udc.c 		if (!list_empty(&ep->queue)) {
queue            1171 drivers/usb/gadget/udc/fsl_qe_udc.c 			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
queue            1172 drivers/usb/gadget/udc/fsl_qe_udc.c 							queue);
queue            1474 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (list_empty(&ep->queue)) {
queue            1521 drivers/usb/gadget/udc/fsl_qe_udc.c 						if (list_empty(&ep->queue))
queue            1672 drivers/usb/gadget/udc/fsl_qe_udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1697 drivers/usb/gadget/udc/fsl_qe_udc.c 			|| !list_empty(&req->queue)) {
queue            1732 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_add_tail(&req->queue, &ep->queue);
queue            1782 drivers/usb/gadget/udc/fsl_qe_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1820 drivers/usb/gadget/udc/fsl_qe_udc.c 	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
queue            1853 drivers/usb/gadget/udc/fsl_qe_udc.c 	.queue = qe_ep_queue,
queue            2432 drivers/usb/gadget/udc/fsl_qe_udc.c 	INIT_LIST_HEAD(&ep->queue);
queue             254 drivers/usb/gadget/udc/fsl_qe_udc.h 	struct list_head queue;
queue             263 drivers/usb/gadget/udc/fsl_qe_udc.h 	struct list_head queue;
queue             168 drivers/usb/gadget/udc/fsl_udc_core.c 	list_del_init(&req->queue);
queue             215 drivers/usb/gadget/udc/fsl_udc_core.c 	while (!list_empty(&ep->queue)) {
queue             218 drivers/usb/gadget/udc/fsl_udc_core.c 		req = list_entry(ep->queue.next, struct fsl_req, queue);
queue             686 drivers/usb/gadget/udc/fsl_udc_core.c 	INIT_LIST_HEAD(&req->queue);
queue             735 drivers/usb/gadget/udc/fsl_udc_core.c 	if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
queue             738 drivers/usb/gadget/udc/fsl_udc_core.c 		lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
queue             878 drivers/usb/gadget/udc/fsl_udc_core.c 			|| !list_empty(&req->queue)) {
queue             915 drivers/usb/gadget/udc/fsl_udc_core.c 		list_add_tail(&req->queue, &ep->queue);
queue             947 drivers/usb/gadget/udc/fsl_udc_core.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             957 drivers/usb/gadget/udc/fsl_udc_core.c 	if (ep->queue.next == &req->queue) {
queue             962 drivers/usb/gadget/udc/fsl_udc_core.c 		if (req->queue.next != &ep->queue) {
queue             965 drivers/usb/gadget/udc/fsl_udc_core.c 			next_req = list_entry(req->queue.next, struct fsl_req,
queue             966 drivers/usb/gadget/udc/fsl_udc_core.c 					queue);
queue             975 drivers/usb/gadget/udc/fsl_udc_core.c 		prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
queue            1024 drivers/usb/gadget/udc/fsl_udc_core.c 	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
queue            1125 drivers/usb/gadget/udc/fsl_udc_core.c 	.queue = fsl_ep_queue,
queue            1300 drivers/usb/gadget/udc/fsl_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
queue            1384 drivers/usb/gadget/udc/fsl_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
queue            1703 drivers/usb/gadget/udc/fsl_udc_core.c 		list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
queue            1704 drivers/usb/gadget/udc/fsl_udc_core.c 				queue) {
queue            2174 drivers/usb/gadget/udc/fsl_udc_core.c 	if (list_empty(&ep->queue)) {
queue            2177 drivers/usb/gadget/udc/fsl_udc_core.c 		list_for_each_entry(req, &ep->queue, queue) {
queue            2193 drivers/usb/gadget/udc/fsl_udc_core.c 			if (list_empty(&ep->queue)) {
queue            2196 drivers/usb/gadget/udc/fsl_udc_core.c 				list_for_each_entry(req, &ep->queue, queue) {
queue            2347 drivers/usb/gadget/udc/fsl_udc_core.c 	INIT_LIST_HEAD(&ep->queue);
queue             445 drivers/usb/gadget/udc/fsl_usb2_udc.h 	struct list_head queue;
queue             460 drivers/usb/gadget/udc/fsl_usb2_udc.h 	struct list_head queue;
queue             258 drivers/usb/gadget/udc/fusb300_udc.c 	while (!list_empty(&ep->queue)) {
queue             259 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_entry(ep->queue.next, struct fusb300_request, queue);
queue             276 drivers/usb/gadget/udc/fusb300_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             435 drivers/usb/gadget/udc/fusb300_udc.c 	if (list_empty(&ep->queue))
queue             438 drivers/usb/gadget/udc/fusb300_udc.c 	list_add_tail(&req->queue, &ep->queue);
queue             463 drivers/usb/gadget/udc/fusb300_udc.c 	if (!list_empty(&ep->queue))
queue             483 drivers/usb/gadget/udc/fusb300_udc.c 	if (!list_empty(&ep->queue)) {
queue             525 drivers/usb/gadget/udc/fusb300_udc.c 	.queue		= fusb300_queue,
queue             779 drivers/usb/gadget/udc/fusb300_udc.c 				if (!list_empty(&ep->queue))
queue             867 drivers/usb/gadget/udc/fusb300_udc.c 	list_del_init(&req->queue);
queue             881 drivers/usb/gadget/udc/fusb300_udc.c 		if (!list_empty(&ep->queue))
queue             961 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req = list_entry(ep->queue.next,
queue             962 drivers/usb/gadget/udc/fusb300_udc.c 					struct fusb300_request, queue);
queue             972 drivers/usb/gadget/udc/fusb300_udc.c 	struct fusb300_request *req = list_entry(ep->queue.next,
queue             973 drivers/usb/gadget/udc/fusb300_udc.c 						 struct fusb300_request, queue);
queue            1011 drivers/usb/gadget/udc/fusb300_udc.c 	if (!list_empty(&ep->queue)) {
queue            1014 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_first_entry(&ep->queue,
queue            1015 drivers/usb/gadget/udc/fusb300_udc.c 			struct fusb300_request, queue);
queue            1032 drivers/usb/gadget/udc/fusb300_udc.c 	if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
queue            1033 drivers/usb/gadget/udc/fusb300_udc.c 		req = list_entry(ep->queue.next,
queue            1034 drivers/usb/gadget/udc/fusb300_udc.c 				struct fusb300_request, queue);
queue            1449 drivers/usb/gadget/udc/fusb300_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             635 drivers/usb/gadget/udc/fusb300_udc.h 	struct list_head	queue;
queue             643 drivers/usb/gadget/udc/fusb300_udc.h 	struct list_head	queue;
queue             277 drivers/usb/gadget/udc/goku_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             290 drivers/usb/gadget/udc/goku_udc.c 	WARN_ON(!list_empty(&req->queue));
queue             302 drivers/usb/gadget/udc/goku_udc.c 	list_del_init(&req->queue);
queue             488 drivers/usb/gadget/udc/goku_udc.c 			if (dbuff && !list_empty(&ep->queue)) {
queue             489 drivers/usb/gadget/udc/goku_udc.c 				req = list_entry(ep->queue.next,
queue             490 drivers/usb/gadget/udc/goku_udc.c 						struct goku_request, queue);
queue             522 drivers/usb/gadget/udc/goku_udc.c 	if (unlikely(list_empty (&ep->queue)))
queue             524 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
queue             595 drivers/usb/gadget/udc/goku_udc.c 	if (unlikely(list_empty(&ep->queue))) {
queue             604 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
queue             629 drivers/usb/gadget/udc/goku_udc.c 	if (list_empty(&ep->queue))
queue             631 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
queue             649 drivers/usb/gadget/udc/goku_udc.c 	req = list_entry(ep->queue.next, struct goku_request, queue);
queue             719 drivers/usb/gadget/udc/goku_udc.c 			|| !_req->buf || !list_empty(&req->queue)))
queue             758 drivers/usb/gadget/udc/goku_udc.c 	if (list_empty(&ep->queue) && likely(!ep->stopped)) {
queue             776 drivers/usb/gadget/udc/goku_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue             778 drivers/usb/gadget/udc/goku_udc.c 	if (likely(!list_empty(&ep->queue))
queue             796 drivers/usb/gadget/udc/goku_udc.c 	if (list_empty(&ep->queue))
queue             800 drivers/usb/gadget/udc/goku_udc.c 	while (!list_empty(&ep->queue)) {
queue             801 drivers/usb/gadget/udc/goku_udc.c 		req = list_entry(ep->queue.next, struct goku_request, queue);
queue             833 drivers/usb/gadget/udc/goku_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
queue             842 drivers/usb/gadget/udc/goku_udc.c 	if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
queue             846 drivers/usb/gadget/udc/goku_udc.c 	} else if (!list_empty(&req->queue))
queue             868 drivers/usb/gadget/udc/goku_udc.c 			if (list_empty(&ep->queue))
queue             870 drivers/usb/gadget/udc/goku_udc.c 			req = list_entry(ep->queue.next, struct goku_request,
queue             871 drivers/usb/gadget/udc/goku_udc.c 						queue);
queue             902 drivers/usb/gadget/udc/goku_udc.c 	if (!list_empty(&ep->queue))
queue             979 drivers/usb/gadget/udc/goku_udc.c 	.queue		= goku_queue,
queue            1215 drivers/usb/gadget/udc/goku_udc.c 		if (list_empty(&ep->queue)) {
queue            1221 drivers/usb/gadget/udc/goku_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
queue            1222 drivers/usb/gadget/udc/goku_udc.c 			if (ep->dma && req->queue.prev == &ep->queue) {
queue            1272 drivers/usb/gadget/udc/goku_udc.c 		INIT_LIST_HEAD (&ep->queue);
queue            1674 drivers/usb/gadget/udc/goku_udc.c 		if (list_empty (&ep->queue))
queue             215 drivers/usb/gadget/udc/goku_udc.h 	struct list_head			queue;
queue             224 drivers/usb/gadget/udc/goku_udc.h 	struct list_head		queue;
queue             157 drivers/usb/gadget/udc/gr_udc.c 	if (list_empty(&ep->queue)) {
queue             163 drivers/usb/gadget/udc/gr_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             289 drivers/usb/gadget/udc/gr_udc.c 	list_del_init(&req->queue);
queue             354 drivers/usb/gadget/udc/gr_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             369 drivers/usb/gadget/udc/gr_udc.c 	if (list_empty(&ep->queue)) {
queue             374 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
queue             410 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
queue             586 drivers/usb/gadget/udc/gr_udc.c 	if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
queue             589 drivers/usb/gadget/udc/gr_udc.c 			ep->ep.name, req->req.buf, list_empty(&req->queue));
queue             620 drivers/usb/gadget/udc/gr_udc.c 	list_add_tail(&req->queue, &ep->queue);
queue             659 drivers/usb/gadget/udc/gr_udc.c 	while (!list_empty(&ep->queue)) {
queue             660 drivers/usb/gadget/udc/gr_udc.c 		req = list_first_entry(&ep->queue, struct gr_request, queue);
queue            1240 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
queue            1269 drivers/usb/gadget/udc/gr_udc.c 	req = list_first_entry(&ep->queue, struct gr_request, queue);
queue            1415 drivers/usb/gadget/udc/gr_udc.c 		if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
queue            1422 drivers/usb/gadget/udc/gr_udc.c 		if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
queue            1644 drivers/usb/gadget/udc/gr_udc.c 	WARN(!list_empty(&req->queue),
queue            1712 drivers/usb/gadget/udc/gr_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1721 drivers/usb/gadget/udc/gr_udc.c 	if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
queue            1728 drivers/usb/gadget/udc/gr_udc.c 	} else if (!list_empty(&req->queue)) {
queue            1754 drivers/usb/gadget/udc/gr_udc.c 	if (halt && ep->is_in && !list_empty(&ep->queue)) {
queue            1831 drivers/usb/gadget/udc/gr_udc.c 	.queue		= gr_queue_ext,
queue            1979 drivers/usb/gadget/udc/gr_udc.c 	INIT_LIST_HEAD(&ep->queue);
queue             152 drivers/usb/gadget/udc/gr_udc.h 	struct list_head queue;
queue             163 drivers/usb/gadget/udc/gr_udc.h 	struct list_head queue;
queue             100 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct list_head	queue;
queue             177 drivers/usb/gadget/udc/lpc32xx_udc.c 	struct list_head	queue;
queue             485 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (list_empty(&ep->queue))
queue             488 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
queue             995 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
queue            1021 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
queue            1401 drivers/usb/gadget/udc/lpc32xx_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue            1413 drivers/usb/gadget/udc/lpc32xx_udc.c 	list_del_init(&req->queue);
queue            1440 drivers/usb/gadget/udc/lpc32xx_udc.c 	while (!list_empty(&ep->queue)) {
queue            1441 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
queue            1458 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (list_empty(&ep0->queue))
queue            1462 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue            1463 drivers/usb/gadget/udc/lpc32xx_udc.c 				 queue);
queue            1496 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (list_empty(&ep0->queue))
queue            1499 drivers/usb/gadget/udc/lpc32xx_udc.c 		req = list_entry(ep0->queue.next, struct lpc32xx_request,
queue            1500 drivers/usb/gadget/udc/lpc32xx_udc.c 				 queue);
queue            1726 drivers/usb/gadget/udc/lpc32xx_udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1740 drivers/usb/gadget/udc/lpc32xx_udc.c 	BUG_ON(!list_empty(&req->queue));
queue            1758 drivers/usb/gadget/udc/lpc32xx_udc.c 	    !list_empty(&req->queue))
queue            1811 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (list_empty(&ep->queue)) {
queue            1812 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue            1834 drivers/usb/gadget/udc/lpc32xx_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue            1855 drivers/usb/gadget/udc/lpc32xx_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1921 drivers/usb/gadget/udc/lpc32xx_udc.c 	.queue		= lpc32xx_ep_queue,
queue            1973 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
queue            1978 drivers/usb/gadget/udc/lpc32xx_udc.c 		if (!list_empty(&ep->queue)) {
queue            2000 drivers/usb/gadget/udc/lpc32xx_udc.c 	req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
queue            2099 drivers/usb/gadget/udc/lpc32xx_udc.c 	if (!list_empty((&ep->queue))) {
queue              99 drivers/usb/gadget/udc/m66592-udc.c 	INIT_LIST_HEAD(&m66592->ep[0].queue);
queue             363 drivers/usb/gadget/udc/m66592-udc.c 	INIT_LIST_HEAD(&ep->queue);
queue             719 drivers/usb/gadget/udc/m66592-udc.c 	list_del_init(&req->queue);
queue             725 drivers/usb/gadget/udc/m66592-udc.c 	if (!list_empty(&ep->queue))
queue             733 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
queue             907 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
queue             915 drivers/usb/gadget/udc/m66592-udc.c 				req = list_entry(ep->queue.next,
queue             916 drivers/usb/gadget/udc/m66592-udc.c 						 struct m66592_request, queue);
queue             938 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
queue             951 drivers/usb/gadget/udc/m66592-udc.c 					req = list_entry(ep->queue.next,
queue             953 drivers/usb/gadget/udc/m66592-udc.c 							 queue);
queue             954 drivers/usb/gadget/udc/m66592-udc.c 					if (!list_empty(&ep->queue))
queue            1020 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next,
queue            1021 drivers/usb/gadget/udc/m66592-udc.c 		struct m66592_request, queue);
queue            1024 drivers/usb/gadget/udc/m66592-udc.c 			if (list_empty(&ep->queue))
queue            1027 drivers/usb/gadget/udc/m66592-udc.c 		} else if (!list_empty(&ep->queue))
queue            1169 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
queue            1317 drivers/usb/gadget/udc/m66592-udc.c 	while (!list_empty(&ep->queue)) {
queue            1318 drivers/usb/gadget/udc/m66592-udc.c 		req = list_entry(ep->queue.next, struct m66592_request, queue);
queue            1337 drivers/usb/gadget/udc/m66592-udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1366 drivers/usb/gadget/udc/m66592-udc.c 	if (list_empty(&ep->queue))
queue            1369 drivers/usb/gadget/udc/m66592-udc.c 	list_add_tail(&req->queue, &ep->queue);
queue            1395 drivers/usb/gadget/udc/m66592-udc.c 	if (!list_empty(&ep->queue))
queue            1409 drivers/usb/gadget/udc/m66592-udc.c 	if (!list_empty(&ep->queue)) {
queue            1429 drivers/usb/gadget/udc/m66592-udc.c 	if (list_empty(&ep->queue) && !ep->busy) {
queue            1443 drivers/usb/gadget/udc/m66592-udc.c 	.queue		= m66592_queue,
queue            1626 drivers/usb/gadget/udc/m66592-udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             438 drivers/usb/gadget/udc/m66592-udc.h 	struct list_head	queue;
queue             445 drivers/usb/gadget/udc/m66592-udc.h 	struct list_head	queue;
queue             290 drivers/usb/gadget/udc/mv_u3d.h 	struct list_head	queue;	/* ep request queued hardware */
queue             308 drivers/usb/gadget/udc/mv_u3d.h 	struct list_head	queue;	/* ep requst queued on hardware */
queue             183 drivers/usb/gadget/udc/mv_u3d_core.c 	list_del_init(&req->queue);
queue             236 drivers/usb/gadget/udc/mv_u3d_core.c 	if (!list_empty(&ep->queue)) {
queue             512 drivers/usb/gadget/udc/mv_u3d_core.c 	list_add_tail(&req->queue, &ep->queue);
queue             675 drivers/usb/gadget/udc/mv_u3d_core.c 	INIT_LIST_HEAD(&req->queue);
queue             800 drivers/usb/gadget/udc/mv_u3d_core.c 			|| !list_empty(&req->queue)) {
queue             807 drivers/usb/gadget/udc/mv_u3d_core.c 			list_empty(&req->queue));
queue             865 drivers/usb/gadget/udc/mv_u3d_core.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             875 drivers/usb/gadget/udc/mv_u3d_core.c 	if (ep->queue.next == &req->queue) {
queue             880 drivers/usb/gadget/udc/mv_u3d_core.c 		if (req->queue.next != &ep->queue) {
queue             884 drivers/usb/gadget/udc/mv_u3d_core.c 			next_req = list_entry(req->queue.next,
queue             885 drivers/usb/gadget/udc/mv_u3d_core.c 					struct mv_u3d_req, queue);
queue             966 drivers/usb/gadget/udc/mv_u3d_core.c 			&& !list_empty(&ep->queue)) {
queue            1002 drivers/usb/gadget/udc/mv_u3d_core.c 	.queue		= mv_u3d_ep_queue,
queue            1320 drivers/usb/gadget/udc/mv_u3d_core.c 	INIT_LIST_HEAD(&ep->queue);
queue            1351 drivers/usb/gadget/udc/mv_u3d_core.c 		INIT_LIST_HEAD(&ep->queue);
queue            1368 drivers/usb/gadget/udc/mv_u3d_core.c 	while (!list_empty(&ep->queue)) {
queue            1370 drivers/usb/gadget/udc/mv_u3d_core.c 		req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
queue            1654 drivers/usb/gadget/udc/mv_u3d_core.c 			&curr_ep->queue, queue) {
queue            1906 drivers/usb/gadget/udc/mv_u3d_core.c 	INIT_LIST_HEAD(&u3d->status_req->queue);
queue             228 drivers/usb/gadget/udc/mv_udc.h 	struct list_head	queue;
queue             243 drivers/usb/gadget/udc/mv_udc.h 	struct list_head	queue;
queue             218 drivers/usb/gadget/udc/mv_udc_core.c 	list_del_init(&req->queue);
queue             267 drivers/usb/gadget/udc/mv_udc_core.c 	if (!(list_empty(&ep->queue))) {
queue             269 drivers/usb/gadget/udc/mv_udc_core.c 		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
queue             606 drivers/usb/gadget/udc/mv_udc_core.c 	INIT_LIST_HEAD(&req->queue);
queue             693 drivers/usb/gadget/udc/mv_udc_core.c 			|| !list_empty(&req->queue)) {
queue             739 drivers/usb/gadget/udc/mv_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
queue             797 drivers/usb/gadget/udc/mv_udc_core.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             807 drivers/usb/gadget/udc/mv_udc_core.c 	if (ep->queue.next == &req->queue) {
queue             812 drivers/usb/gadget/udc/mv_udc_core.c 		if (req->queue.next != &ep->queue) {
queue             815 drivers/usb/gadget/udc/mv_udc_core.c 			next_req = list_entry(req->queue.next,
queue             816 drivers/usb/gadget/udc/mv_udc_core.c 				struct mv_req, queue);
queue             832 drivers/usb/gadget/udc/mv_udc_core.c 		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
queue             912 drivers/usb/gadget/udc/mv_udc_core.c 	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
queue             950 drivers/usb/gadget/udc/mv_udc_core.c 	.queue		= mv_ep_queue,
queue            1256 drivers/usb/gadget/udc/mv_udc_core.c 	INIT_LIST_HEAD(&ep->queue);
queue            1285 drivers/usb/gadget/udc/mv_udc_core.c 		INIT_LIST_HEAD(&ep->queue);
queue            1303 drivers/usb/gadget/udc/mv_udc_core.c 	while (!list_empty(&ep->queue)) {
queue            1305 drivers/usb/gadget/udc/mv_udc_core.c 		req = list_entry(ep->queue.next, struct mv_req, queue);
queue            1494 drivers/usb/gadget/udc/mv_udc_core.c 	list_add_tail(&req->queue, &ep->queue);
queue            1837 drivers/usb/gadget/udc/mv_udc_core.c 			&curr_ep->queue, queue) {
queue            2231 drivers/usb/gadget/udc/mv_udc_core.c 	INIT_LIST_HEAD(&udc->status_req->queue);
queue             254 drivers/usb/gadget/udc/net2272.c 	INIT_LIST_HEAD(&ep->queue);
queue             328 drivers/usb/gadget/udc/net2272.c 	INIT_LIST_HEAD(&req->queue);
queue             342 drivers/usb/gadget/udc/net2272.c 	WARN_ON(!list_empty(&req->queue));
queue             360 drivers/usb/gadget/udc/net2272.c 	list_del_init(&req->queue);
queue             465 drivers/usb/gadget/udc/net2272.c 			if (!list_empty(&ep->queue)) {
queue             466 drivers/usb/gadget/udc/net2272.c 				req = list_entry(ep->queue.next,
queue             468 drivers/usb/gadget/udc/net2272.c 						queue);
queue             594 drivers/usb/gadget/udc/net2272.c 			if (!list_empty(&ep->queue)) {
queue             595 drivers/usb/gadget/udc/net2272.c 				req = list_entry(ep->queue.next,
queue             596 drivers/usb/gadget/udc/net2272.c 					struct net2272_request, queue);
queue             614 drivers/usb/gadget/udc/net2272.c 	if (unlikely(list_empty(&ep->queue)))
queue             617 drivers/usb/gadget/udc/net2272.c 	req = list_entry(ep->queue.next, struct net2272_request, queue);
queue             819 drivers/usb/gadget/udc/net2272.c 			|| !list_empty(&req->queue))
queue             846 drivers/usb/gadget/udc/net2272.c 	if (list_empty(&ep->queue) && !ep->stopped) {
queue             897 drivers/usb/gadget/udc/net2272.c 		list_add_tail(&req->queue, &ep->queue);
queue             899 drivers/usb/gadget/udc/net2272.c 	if (likely(!list_empty(&ep->queue)))
queue             916 drivers/usb/gadget/udc/net2272.c 	while (!list_empty(&ep->queue)) {
queue             917 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
queue             919 drivers/usb/gadget/udc/net2272.c 				queue);
queue             942 drivers/usb/gadget/udc/net2272.c 	list_for_each_entry(req, &ep->queue, queue) {
queue             953 drivers/usb/gadget/udc/net2272.c 	if (ep->queue.next == &req->queue) {
queue             982 drivers/usb/gadget/udc/net2272.c 	if (!list_empty(&ep->queue))
queue            1064 drivers/usb/gadget/udc/net2272.c 	.queue         = net2272_queue,
queue            1513 drivers/usb/gadget/udc/net2272.c 	if (!list_empty(&ep->queue))
queue            1514 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
queue            1515 drivers/usb/gadget/udc/net2272.c 				struct net2272_request, queue);
queue            1546 drivers/usb/gadget/udc/net2272.c 		if (!list_empty(&ep->queue)) {
queue            1547 drivers/usb/gadget/udc/net2272.c 			req = list_entry(ep->queue.next,
queue            1548 drivers/usb/gadget/udc/net2272.c 					struct net2272_request, queue);
queue            1590 drivers/usb/gadget/udc/net2272.c 	if (!list_empty(&ep->queue))
queue            1591 drivers/usb/gadget/udc/net2272.c 		req = list_entry(ep->queue.next,
queue            1592 drivers/usb/gadget/udc/net2272.c 			struct net2272_request, queue);
queue            1733 drivers/usb/gadget/udc/net2272.c 		while (!list_empty(&ep->queue)) {
queue            1734 drivers/usb/gadget/udc/net2272.c 			req = list_entry(ep->queue.next,
queue            1735 drivers/usb/gadget/udc/net2272.c 				struct net2272_request, queue);
queue             425 drivers/usb/gadget/udc/net2272.h 	struct list_head queue;
queue             582 drivers/usb/gadget/udc/net2272.h 	struct list_head queue;
queue             385 drivers/usb/gadget/udc/net2280.c 	INIT_LIST_HEAD(&ep->queue);
queue             461 drivers/usb/gadget/udc/net2280.c 	INIT_LIST_HEAD(&ep->queue);
queue             562 drivers/usb/gadget/udc/net2280.c 	INIT_LIST_HEAD(&req->queue);
queue             594 drivers/usb/gadget/udc/net2280.c 	WARN_ON(!list_empty(&req->queue));
queue             959 drivers/usb/gadget/udc/net2280.c 	list_del_init(&req->queue);
queue            1004 drivers/usb/gadget/udc/net2280.c 				!list_empty(&req->queue)) {
queue            1041 drivers/usb/gadget/udc/net2280.c 	if  (list_empty(&ep->queue) && !ep->stopped &&
queue            1113 drivers/usb/gadget/udc/net2280.c 		list_add_tail(&req->queue, &ep->queue);
queue            1140 drivers/usb/gadget/udc/net2280.c 	while (!list_empty(&ep->queue)) {
queue            1144 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
queue            1145 drivers/usb/gadget/udc/net2280.c 				struct net2280_request, queue);
queue            1206 drivers/usb/gadget/udc/net2280.c 	req = list_entry(ep->queue.next, struct net2280_request, queue);
queue            1214 drivers/usb/gadget/udc/net2280.c 	if (likely(!list_empty(&ep->queue))) {
queue            1232 drivers/usb/gadget/udc/net2280.c 	while (!list_empty(&ep->queue)) {
queue            1233 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
queue            1235 drivers/usb/gadget/udc/net2280.c 				queue);
queue            1270 drivers/usb/gadget/udc/net2280.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1282 drivers/usb/gadget/udc/net2280.c 	if (ep->queue.next == &req->queue) {
queue            1287 drivers/usb/gadget/udc/net2280.c 			if (likely(ep->queue.next == &req->queue)) {
queue            1307 drivers/usb/gadget/udc/net2280.c 		if (list_empty(&ep->queue))
queue            1314 drivers/usb/gadget/udc/net2280.c 				start_dma(ep, list_entry(ep->queue.next,
queue            1315 drivers/usb/gadget/udc/net2280.c 					struct net2280_request, queue));
queue            1350 drivers/usb/gadget/udc/net2280.c 	if (!list_empty(&ep->queue)) {
queue            1371 drivers/usb/gadget/udc/net2280.c 				!list_empty(&ep->queue) && ep->td_dma)
queue            1456 drivers/usb/gadget/udc/net2280.c 	.queue		= net2280_queue,
queue            1847 drivers/usb/gadget/udc/net2280.c 		if (list_empty(&ep->queue)) {
queue            1855 drivers/usb/gadget/udc/net2280.c 		list_for_each_entry(req, &ep->queue, queue) {
queue            2519 drivers/usb/gadget/udc/net2280.c 	if (!list_empty(&ep->queue))
queue            2520 drivers/usb/gadget/udc/net2280.c 		req = list_entry(ep->queue.next,
queue            2521 drivers/usb/gadget/udc/net2280.c 			struct net2280_request, queue);
queue            2615 drivers/usb/gadget/udc/net2280.c 				if (unlikely(list_empty(&ep->queue) ||
queue            2620 drivers/usb/gadget/udc/net2280.c 				req = list_entry(ep->queue.next,
queue            2621 drivers/usb/gadget/udc/net2280.c 					struct net2280_request, queue);
queue            2685 drivers/usb/gadget/udc/net2280.c 			if (!list_empty(&ep->queue))
queue            2731 drivers/usb/gadget/udc/net2280.c 			if (!list_empty(&ep->queue) && !ep->stopped)
queue            2732 drivers/usb/gadget/udc/net2280.c 				req = list_entry(ep->queue.next,
queue            2733 drivers/usb/gadget/udc/net2280.c 					struct net2280_request, queue);
queue            2965 drivers/usb/gadget/udc/net2280.c 			if (!list_empty(&e->queue) && e->td_dma)
queue            3134 drivers/usb/gadget/udc/net2280.c 		while (!list_empty(&ep->queue)) {
queue            3135 drivers/usb/gadget/udc/net2280.c 			req = list_entry(ep->queue.next,
queue            3136 drivers/usb/gadget/udc/net2280.c 					struct net2280_request, queue);
queue            3253 drivers/usb/gadget/udc/net2280.c 					!list_empty(&e->queue) && e->td_dma)
queue            3507 drivers/usb/gadget/udc/net2280.c 		if (!list_empty(&ep->queue)) {
queue             101 drivers/usb/gadget/udc/net2280.h 	struct list_head			queue;
queue             143 drivers/usb/gadget/udc/net2280.h 	struct list_head		queue;
queue             272 drivers/usb/gadget/udc/omap_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             293 drivers/usb/gadget/udc/omap_udc.c 	list_del_init(&req->queue);
queue             644 drivers/usb/gadget/udc/omap_udc.c 		if (!list_empty(&ep->queue)) {
queue             645 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
queue             646 drivers/usb/gadget/udc/omap_udc.c 						struct omap_req, queue);
queue             651 drivers/usb/gadget/udc/omap_udc.c 		if (!list_empty(&ep->queue)) {
queue             652 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
queue             653 drivers/usb/gadget/udc/omap_udc.c 					struct omap_req, queue);
queue             663 drivers/usb/gadget/udc/omap_udc.c 		if (!list_empty(&ep->queue)) {
queue             664 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
queue             665 drivers/usb/gadget/udc/omap_udc.c 					struct omap_req, queue);
queue             670 drivers/usb/gadget/udc/omap_udc.c 		if (!list_empty(&ep->queue)) {
queue             671 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
queue             672 drivers/usb/gadget/udc/omap_udc.c 					struct omap_req, queue);
queue             775 drivers/usb/gadget/udc/omap_udc.c 	restart = !ep->stopped && !list_empty(&ep->queue);
queue             788 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep->queue.next, struct omap_req, queue);
queue             812 drivers/usb/gadget/udc/omap_udc.c 	if (!list_empty(&ep->queue))
queue             813 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep->queue.next, struct omap_req, queue);
queue             873 drivers/usb/gadget/udc/omap_udc.c 			|| !list_empty(&req->queue)) {
queue             922 drivers/usb/gadget/udc/omap_udc.c 	} else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
queue             926 drivers/usb/gadget/udc/omap_udc.c 			if (!udc->ep0_pending || !list_empty(&ep->queue)) {
queue             997 drivers/usb/gadget/udc/omap_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue            1015 drivers/usb/gadget/udc/omap_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1024 drivers/usb/gadget/udc/omap_udc.c 	if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
queue            1068 drivers/usb/gadget/udc/omap_udc.c 				&& !list_empty(&ep->queue)) {
queue            1077 drivers/usb/gadget/udc/omap_udc.c 					&& !list_empty(&ep->queue)) {
queue            1118 drivers/usb/gadget/udc/omap_udc.c 	.queue		= omap_ep_queue,
queue            1341 drivers/usb/gadget/udc/omap_udc.c 	while (!list_empty(&ep->queue)) {
queue            1342 drivers/usb/gadget/udc/omap_udc.c 		req = list_entry(ep->queue.next, struct omap_req, queue);
queue            1417 drivers/usb/gadget/udc/omap_udc.c 	if (!list_empty(&ep0->queue))
queue            1418 drivers/usb/gadget/udc/omap_udc.c 		req = container_of(ep0->queue.next, struct omap_req, queue);
queue            1864 drivers/usb/gadget/udc/omap_udc.c 	if (!list_empty(&ep->queue) && ep->ackwait) {
queue            1873 drivers/usb/gadget/udc/omap_udc.c 			req = container_of(ep->queue.next,
queue            1874 drivers/usb/gadget/udc/omap_udc.c 					struct omap_req, queue);
queue            1912 drivers/usb/gadget/udc/omap_udc.c 			if (!list_empty(&ep->queue)) {
queue            1914 drivers/usb/gadget/udc/omap_udc.c 				req = container_of(ep->queue.next,
queue            1915 drivers/usb/gadget/udc/omap_udc.c 						struct omap_req, queue);
queue            1947 drivers/usb/gadget/udc/omap_udc.c 			if (!list_empty(&ep->queue)) {
queue            1948 drivers/usb/gadget/udc/omap_udc.c 				req = container_of(ep->queue.next,
queue            1949 drivers/usb/gadget/udc/omap_udc.c 						struct omap_req, queue);
queue            1979 drivers/usb/gadget/udc/omap_udc.c 		if (ep->has_dma || list_empty(&ep->queue))
queue            1981 drivers/usb/gadget/udc/omap_udc.c 		req = list_entry(ep->queue.next, struct omap_req, queue);
queue            2013 drivers/usb/gadget/udc/omap_udc.c 		if (!list_empty(&ep->queue))
queue            2191 drivers/usb/gadget/udc/omap_udc.c 	if (list_empty(&ep->queue))
queue            2194 drivers/usb/gadget/udc/omap_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
queue            2557 drivers/usb/gadget/udc/omap_udc.c 	INIT_LIST_HEAD(&ep->queue);
queue             134 drivers/usb/gadget/udc/omap_udc.h 	struct list_head		queue;
queue             141 drivers/usb/gadget/udc/omap_udc.h 	struct list_head		queue;
queue             295 drivers/usb/gadget/udc/pch_udc.c 	struct list_head		queue;
queue             399 drivers/usb/gadget/udc/pch_udc.c 	struct list_head		queue;
queue            1439 drivers/usb/gadget/udc/pch_udc.c 	list_del_init(&req->queue);
queue            1493 drivers/usb/gadget/udc/pch_udc.c 	while (!list_empty(&ep->queue)) {
queue            1494 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
queue            1734 drivers/usb/gadget/udc/pch_udc.c 	INIT_LIST_HEAD(&ep->queue);
queue            1764 drivers/usb/gadget/udc/pch_udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1801 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&req->queue))
queue            1840 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&req->queue))
queue            1887 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue) && !ep->halted) {
queue            1909 drivers/usb/gadget/udc/pch_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue            1940 drivers/usb/gadget/udc/pch_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1943 drivers/usb/gadget/udc/pch_udc.c 			if (!list_empty(&req->queue))
queue            1977 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue)) {
queue            2019 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&ep->queue)) {
queue            2055 drivers/usb/gadget/udc/pch_udc.c 	.queue		= pch_udc_pcd_queue,
queue            2091 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue))
queue            2095 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
queue            2127 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue))
queue            2129 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
queue            2147 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&ep->queue)) {
queue            2170 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue))
queue            2173 drivers/usb/gadget/udc/pch_udc.c 	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
queue            2212 drivers/usb/gadget/udc/pch_udc.c 	if (!list_empty(&ep->queue)) {
queue            2213 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
queue            2278 drivers/usb/gadget/udc/pch_udc.c 	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
queue            2280 drivers/usb/gadget/udc/pch_udc.c 		req = list_entry(ep->queue.next, struct pch_udc_request,
queue            2281 drivers/usb/gadget/udc/pch_udc.c 				 queue);
queue            2315 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue))
queue            2430 drivers/usb/gadget/udc/pch_udc.c 		if (!list_empty(&ep->queue)) {
queue            2449 drivers/usb/gadget/udc/pch_udc.c 	if (list_empty(&ep->queue))
queue            2873 drivers/usb/gadget/udc/pch_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue             519 drivers/usb/gadget/udc/pxa25x_udc.c 	INIT_LIST_HEAD (&req->queue);
queue             533 drivers/usb/gadget/udc/pxa25x_udc.c 	WARN_ON(!list_empty (&req->queue));
queue             546 drivers/usb/gadget/udc/pxa25x_udc.c 	list_del_init(&req->queue);
queue             636 drivers/usb/gadget/udc/pxa25x_udc.c 			if (list_empty(&ep->queue))
queue             784 drivers/usb/gadget/udc/pxa25x_udc.c 			if (list_empty(&ep->queue))
queue             849 drivers/usb/gadget/udc/pxa25x_udc.c 			|| !list_empty(&req->queue))) {
queue             883 drivers/usb/gadget/udc/pxa25x_udc.c 	if (list_empty(&ep->queue) && !ep->stopped) {
queue             939 drivers/usb/gadget/udc/pxa25x_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue             954 drivers/usb/gadget/udc/pxa25x_udc.c 	while (!list_empty(&ep->queue)) {
queue             955 drivers/usb/gadget/udc/pxa25x_udc.c 		req = list_entry(ep->queue.next,
queue             957 drivers/usb/gadget/udc/pxa25x_udc.c 				queue);
queue             979 drivers/usb/gadget/udc/pxa25x_udc.c 	list_for_each_entry (req, &ep->queue, queue) {
queue            1022 drivers/usb/gadget/udc/pxa25x_udc.c 			   || !list_empty(&ep->queue))) {
queue            1075 drivers/usb/gadget/udc/pxa25x_udc.c 	if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
queue            1103 drivers/usb/gadget/udc/pxa25x_udc.c 	.queue		= pxa25x_ep_queue,
queue            1321 drivers/usb/gadget/udc/pxa25x_udc.c 		if (list_empty(&ep->queue)) {
queue            1325 drivers/usb/gadget/udc/pxa25x_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
queue            1398 drivers/usb/gadget/udc/pxa25x_udc.c 		INIT_LIST_HEAD (&ep->queue);
queue            1639 drivers/usb/gadget/udc/pxa25x_udc.c 	if (list_empty(&ep->queue))
queue            1642 drivers/usb/gadget/udc/pxa25x_udc.c 		req = list_entry(ep->queue.next, struct pxa25x_request, queue);
queue            1861 drivers/usb/gadget/udc/pxa25x_udc.c 		if (likely (!list_empty(&ep->queue)))
queue            1862 drivers/usb/gadget/udc/pxa25x_udc.c 			req = list_entry(ep->queue.next,
queue            1863 drivers/usb/gadget/udc/pxa25x_udc.c 					struct pxa25x_request, queue);
queue              39 drivers/usb/gadget/udc/pxa25x_udc.h 	struct list_head			queue;
queue              61 drivers/usb/gadget/udc/pxa25x_udc.h 	struct list_head			queue;
queue             153 drivers/usb/gadget/udc/pxa27x_udc.c 		if (list_empty(&ep->queue)) {
queue             158 drivers/usb/gadget/udc/pxa27x_udc.c 		list_for_each_entry(req, &ep->queue, queue) {
queue             583 drivers/usb/gadget/udc/pxa27x_udc.c 	INIT_LIST_HEAD(&req->queue);
queue             602 drivers/usb/gadget/udc/pxa27x_udc.c 	WARN_ON(!list_empty(&req->queue));
queue             624 drivers/usb/gadget/udc/pxa27x_udc.c 	list_add_tail(&req->queue, &ep->queue);
queue             646 drivers/usb/gadget/udc/pxa27x_udc.c 	list_del_init(&req->queue);
queue             648 drivers/usb/gadget/udc/pxa27x_udc.c 	if (!is_ep0(ep) && list_empty(&ep->queue))
queue             775 drivers/usb/gadget/udc/pxa27x_udc.c 	while (!list_empty(&ep->queue)) {
queue             776 drivers/usb/gadget/udc/pxa27x_udc.c 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
queue            1088 drivers/usb/gadget/udc/pxa27x_udc.c 	is_first_req = list_empty(&ep->queue);
queue            1179 drivers/usb/gadget/udc/pxa27x_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1228 drivers/usb/gadget/udc/pxa27x_udc.c 	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
queue            1289 drivers/usb/gadget/udc/pxa27x_udc.c 	if (unlikely(!list_empty(&ep->queue)))
queue            1389 drivers/usb/gadget/udc/pxa27x_udc.c 	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
queue            1409 drivers/usb/gadget/udc/pxa27x_udc.c 	.queue		= pxa_ep_queue,
queue            1676 drivers/usb/gadget/udc/pxa27x_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue            1941 drivers/usb/gadget/udc/pxa27x_udc.c 	if (!list_empty(&ep->queue))
queue            1942 drivers/usb/gadget/udc/pxa27x_udc.c 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
queue            2037 drivers/usb/gadget/udc/pxa27x_udc.c 		if (likely(!list_empty(&ep->queue)))
queue            2038 drivers/usb/gadget/udc/pxa27x_udc.c 			req = list_entry(ep->queue.next,
queue            2039 drivers/usb/gadget/udc/pxa27x_udc.c 					struct pxa27x_request, queue);
queue             347 drivers/usb/gadget/udc/pxa27x_udc.h 	struct list_head	queue;
queue             385 drivers/usb/gadget/udc/pxa27x_udc.h 	struct list_head			queue;
queue              98 drivers/usb/gadget/udc/r8a66597-udc.c 	INIT_LIST_HEAD(&r8a66597->ep[0].queue);
queue             432 drivers/usb/gadget/udc/r8a66597-udc.c 	INIT_LIST_HEAD(&ep->queue);
queue             894 drivers/usb/gadget/udc/r8a66597-udc.c 	return list_entry(ep->queue.next, struct r8a66597_request, queue);
queue             912 drivers/usb/gadget/udc/r8a66597-udc.c 	list_del_init(&req->queue);
queue             918 drivers/usb/gadget/udc/r8a66597-udc.c 	if (!list_empty(&ep->queue))
queue            1151 drivers/usb/gadget/udc/r8a66597-udc.c 					if (!list_empty(&ep->queue))
queue            1226 drivers/usb/gadget/udc/r8a66597-udc.c 			if (list_empty(&ep->queue))
queue            1229 drivers/usb/gadget/udc/r8a66597-udc.c 		} else if (!list_empty(&ep->queue))
queue            1567 drivers/usb/gadget/udc/r8a66597-udc.c 	while (!list_empty(&ep->queue)) {
queue            1587 drivers/usb/gadget/udc/r8a66597-udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1616 drivers/usb/gadget/udc/r8a66597-udc.c 	if (list_empty(&ep->queue))
queue            1619 drivers/usb/gadget/udc/r8a66597-udc.c 	list_add_tail(&req->queue, &ep->queue);
queue            1645 drivers/usb/gadget/udc/r8a66597-udc.c 	if (!list_empty(&ep->queue))
queue            1659 drivers/usb/gadget/udc/r8a66597-udc.c 	if (!list_empty(&ep->queue)) {
queue            1697 drivers/usb/gadget/udc/r8a66597-udc.c 	if (list_empty(&ep->queue) && !ep->busy) {
queue            1713 drivers/usb/gadget/udc/r8a66597-udc.c 	.queue		= r8a66597_queue,
queue            1917 drivers/usb/gadget/udc/r8a66597-udc.c 		INIT_LIST_HEAD(&ep->queue);
queue              52 drivers/usb/gadget/udc/r8a66597-udc.h 	struct list_head	queue;
queue              60 drivers/usb/gadget/udc/r8a66597-udc.h 	struct list_head	queue;
queue             306 drivers/usb/gadget/udc/renesas_usb3.c 	struct list_head	queue;
queue             316 drivers/usb/gadget/udc/renesas_usb3.c 	struct list_head queue;
queue             873 drivers/usb/gadget/udc/renesas_usb3.c 	return list_first_entry_or_null(&usb3_ep->queue,
queue             874 drivers/usb/gadget/udc/renesas_usb3.c 					struct renesas_usb3_request, queue);
queue             902 drivers/usb/gadget/udc/renesas_usb3.c 	list_del_init(&usb3_req->queue);
queue            1527 drivers/usb/gadget/udc/renesas_usb3.c 	list_add_tail(&usb3_req->queue, &usb3_ep->queue);
queue            2187 drivers/usb/gadget/udc/renesas_usb3.c 	INIT_LIST_HEAD(&usb3_req->queue);
queue            2264 drivers/usb/gadget/udc/renesas_usb3.c 	.queue		= renesas_usb3_ep_queue,
queue            2595 drivers/usb/gadget/udc/renesas_usb3.c 		INIT_LIST_HEAD(&usb3_ep->queue);
queue             111 drivers/usb/gadget/udc/s3c-hsudc.c 	struct list_head queue;
queue             125 drivers/usb/gadget/udc/s3c-hsudc.c 	struct list_head queue;
queue             248 drivers/usb/gadget/udc/s3c-hsudc.c 	list_del_init(&hsreq->queue);
queue             272 drivers/usb/gadget/udc/s3c-hsudc.c 	while (!list_empty(&hsep->queue)) {
queue             273 drivers/usb/gadget/udc/s3c-hsudc.c 		hsreq = list_entry(hsep->queue.next,
queue             274 drivers/usb/gadget/udc/s3c-hsudc.c 				struct s3c_hsudc_req, queue);
queue             441 drivers/usb/gadget/udc/s3c-hsudc.c 		if (list_empty(&hsep->queue))
queue             444 drivers/usb/gadget/udc/s3c-hsudc.c 		hsreq = list_entry(hsep->queue.next,
queue             445 drivers/usb/gadget/udc/s3c-hsudc.c 				struct s3c_hsudc_req, queue);
queue             478 drivers/usb/gadget/udc/s3c-hsudc.c 		if (list_empty(&hsep->queue))
queue             481 drivers/usb/gadget/udc/s3c-hsudc.c 		hsreq = list_entry(hsep->queue.next,
queue             482 drivers/usb/gadget/udc/s3c-hsudc.c 				struct s3c_hsudc_req, queue);
queue             506 drivers/usb/gadget/udc/s3c-hsudc.c 	if (value && ep_is_in(hsep) && !list_empty(&hsep->queue))
queue             525 drivers/usb/gadget/udc/s3c-hsudc.c 	if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) {
queue             526 drivers/usb/gadget/udc/s3c-hsudc.c 		hsreq = list_entry(hsep->queue.next,
queue             527 drivers/usb/gadget/udc/s3c-hsudc.c 			struct s3c_hsudc_req, queue);
queue             610 drivers/usb/gadget/udc/s3c-hsudc.c 	INIT_LIST_HEAD(&hsreq.queue);
queue             715 drivers/usb/gadget/udc/s3c-hsudc.c 			if (list_empty(&hsep->queue))
queue             718 drivers/usb/gadget/udc/s3c-hsudc.c 			hsreq = list_entry(hsep->queue.next,
queue             719 drivers/usb/gadget/udc/s3c-hsudc.c 					struct s3c_hsudc_req, queue);
queue             729 drivers/usb/gadget/udc/s3c-hsudc.c 				if (list_empty(&hsep->queue))
queue             731 drivers/usb/gadget/udc/s3c-hsudc.c 				hsreq = list_entry(hsep->queue.next,
queue             732 drivers/usb/gadget/udc/s3c-hsudc.c 					struct s3c_hsudc_req, queue);
queue             835 drivers/usb/gadget/udc/s3c-hsudc.c 	INIT_LIST_HEAD(&hsreq->queue);
queue             851 drivers/usb/gadget/udc/s3c-hsudc.c 	WARN_ON(!list_empty(&hsreq->queue));
queue             875 drivers/usb/gadget/udc/s3c-hsudc.c 		!list_empty(&hsreq->queue)))
queue             896 drivers/usb/gadget/udc/s3c-hsudc.c 	if (list_empty(&hsep->queue) && !hsep->stopped) {
queue             912 drivers/usb/gadget/udc/s3c-hsudc.c 		list_add_tail(&hsreq->queue, &hsep->queue);
queue             938 drivers/usb/gadget/udc/s3c-hsudc.c 	list_for_each_entry(hsreq, &hsep->queue, queue) {
queue             959 drivers/usb/gadget/udc/s3c-hsudc.c 	.queue = s3c_hsudc_queue,
queue             991 drivers/usb/gadget/udc/s3c-hsudc.c 	INIT_LIST_HEAD(&hsep->queue);
queue             239 drivers/usb/gadget/udc/s3c2410_udc.c 	list_del_init(&req->queue);
queue             255 drivers/usb/gadget/udc/s3c2410_udc.c 	if (&ep->queue == NULL)
queue             258 drivers/usb/gadget/udc/s3c2410_udc.c 	while (!list_empty(&ep->queue)) {
queue             260 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next, struct s3c2410_request,
queue             261 drivers/usb/gadget/udc/s3c2410_udc.c 				queue);
queue             730 drivers/usb/gadget/udc/s3c2410_udc.c 	if (list_empty(&ep->queue))
queue             733 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next, struct s3c2410_request, queue);
queue             801 drivers/usb/gadget/udc/s3c2410_udc.c 	if (likely(!list_empty(&ep->queue)))
queue             802 drivers/usb/gadget/udc/s3c2410_udc.c 		req = list_entry(ep->queue.next,
queue             803 drivers/usb/gadget/udc/s3c2410_udc.c 				struct s3c2410_request, queue);
queue            1142 drivers/usb/gadget/udc/s3c2410_udc.c 	INIT_LIST_HEAD(&req->queue);
queue            1160 drivers/usb/gadget/udc/s3c2410_udc.c 	WARN_ON(!list_empty(&req->queue));
queue            1191 drivers/usb/gadget/udc/s3c2410_udc.c 			|| !_req->buf || !list_empty(&req->queue))) {
queue            1197 drivers/usb/gadget/udc/s3c2410_udc.c 				!list_empty(&req->queue));
queue            1224 drivers/usb/gadget/udc/s3c2410_udc.c 	if (list_empty(&ep->queue) && !ep->halted) {
queue            1263 drivers/usb/gadget/udc/s3c2410_udc.c 		list_add_tail(&req->queue, &ep->queue);
queue            1294 drivers/usb/gadget/udc/s3c2410_udc.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1296 drivers/usb/gadget/udc/s3c2410_udc.c 			list_del_init(&req->queue);
queue            1379 drivers/usb/gadget/udc/s3c2410_udc.c 	.queue		= s3c2410_udc_queue,
queue            1594 drivers/usb/gadget/udc/s3c2410_udc.c 		INIT_LIST_HEAD(&ep->queue);
queue              14 drivers/usb/gadget/udc/s3c2410_udc.h 	struct list_head		queue;
queue              52 drivers/usb/gadget/udc/s3c2410_udc.h 	struct list_head		queue;		/* ep's requests */
queue             460 drivers/usb/gadget/udc/snps_udc_core.c 	INIT_LIST_HEAD(&ep->queue);
queue             537 drivers/usb/gadget/udc/snps_udc_core.c 	INIT_LIST_HEAD(&req->queue);
queue             598 drivers/usb/gadget/udc/snps_udc_core.c 	BUG_ON(!list_empty(&req->queue));
queue             988 drivers/usb/gadget/udc/snps_udc_core.c 	list_del_init(&req->queue);
queue            1068 drivers/usb/gadget/udc/snps_udc_core.c 			|| !list_empty(&req->queue))
queue            1099 drivers/usb/gadget/udc/snps_udc_core.c 	if (list_empty(&ep->queue)) {
queue            1206 drivers/usb/gadget/udc/snps_udc_core.c 		list_add_tail(&req->queue, &ep->queue);
queue            1246 drivers/usb/gadget/udc/snps_udc_core.c 	while (!list_empty(&ep->queue)) {
queue            1247 drivers/usb/gadget/udc/snps_udc_core.c 		req = list_entry(ep->queue.next,
queue            1249 drivers/usb/gadget/udc/snps_udc_core.c 			queue);
queue            1274 drivers/usb/gadget/udc/snps_udc_core.c 	if (ep->queue.next == &req->queue) {
queue            1380 drivers/usb/gadget/udc/snps_udc_core.c 	.queue		= udc_queue,
queue            2122 drivers/usb/gadget/udc/snps_udc_core.c 	if (!list_empty(&ep->queue)) {
queue            2125 drivers/usb/gadget/udc/snps_udc_core.c 		req = list_entry(ep->queue.next,
queue            2126 drivers/usb/gadget/udc/snps_udc_core.c 			struct udc_request, queue);
queue            2142 drivers/usb/gadget/udc/snps_udc_core.c 			if (!list_empty(&ep->queue) && !ep->halted) {
queue            2143 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
queue            2144 drivers/usb/gadget/udc/snps_udc_core.c 					struct udc_request, queue);
queue            2219 drivers/usb/gadget/udc/snps_udc_core.c 			if (!list_empty(&ep->queue) && !ep->halted) {
queue            2220 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
queue            2222 drivers/usb/gadget/udc/snps_udc_core.c 					queue);
queue            2340 drivers/usb/gadget/udc/snps_udc_core.c 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
queue            2341 drivers/usb/gadget/udc/snps_udc_core.c 			req = list_entry(ep->queue.next,
queue            2342 drivers/usb/gadget/udc/snps_udc_core.c 					struct udc_request, queue);
queue            2361 drivers/usb/gadget/udc/snps_udc_core.c 				if (list_empty(&ep->queue)) {
queue            2379 drivers/usb/gadget/udc/snps_udc_core.c 		if (!list_empty(&ep->queue)) {
queue            2381 drivers/usb/gadget/udc/snps_udc_core.c 			req = list_entry(ep->queue.next,
queue            2382 drivers/usb/gadget/udc/snps_udc_core.c 					struct udc_request, queue);
queue            2613 drivers/usb/gadget/udc/snps_udc_core.c 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
queue            2705 drivers/usb/gadget/udc/snps_udc_core.c 			if (!list_empty(&ep->queue)) {
queue            2707 drivers/usb/gadget/udc/snps_udc_core.c 				req = list_entry(ep->queue.next,
queue            2708 drivers/usb/gadget/udc/snps_udc_core.c 						struct udc_request, queue);
queue             117 drivers/usb/gadget/udc/udc-xilinx.c 	struct list_head queue;
queue             142 drivers/usb/gadget/udc/udc-xilinx.c 	struct list_head queue;
queue             550 drivers/usb/gadget/udc/udc-xilinx.c 	list_del_init(&req->queue);
queue             733 drivers/usb/gadget/udc/udc-xilinx.c 	while (!list_empty(&ep->queue)) {
queue             734 drivers/usb/gadget/udc/udc-xilinx.c 		req = list_first_entry(&ep->queue, struct xusb_req, queue);
queue             759 drivers/usb/gadget/udc/udc-xilinx.c 	if (ep->is_in && (!list_empty(&ep->queue)) && value) {
queue             973 drivers/usb/gadget/udc/udc-xilinx.c 	INIT_LIST_HEAD(&req->queue);
queue            1006 drivers/usb/gadget/udc/udc-xilinx.c 	if (!list_empty(&ep0->queue)) {
queue            1014 drivers/usb/gadget/udc/udc-xilinx.c 	list_add_tail(&req->queue, &ep0->queue);
queue            1107 drivers/usb/gadget/udc/udc-xilinx.c 	if (list_empty(&ep->queue)) {
queue            1120 drivers/usb/gadget/udc/udc-xilinx.c 		list_add_tail(&req->queue, &ep->queue);
queue            1142 drivers/usb/gadget/udc/udc-xilinx.c 	list_for_each_entry(req, &ep->queue, queue) {
queue            1189 drivers/usb/gadget/udc/udc-xilinx.c 	.queue		= xudc_ep0_queue,
queue            1199 drivers/usb/gadget/udc/udc-xilinx.c 	.queue		= xudc_ep_queue,
queue            1340 drivers/usb/gadget/udc/udc-xilinx.c 		INIT_LIST_HEAD(&ep->queue);
queue            1809 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep0->queue, struct xusb_req, queue);
queue            1862 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep0->queue, struct xusb_req, queue);
queue            1959 drivers/usb/gadget/udc/udc-xilinx.c 	if (list_empty(&ep->queue))
queue            1962 drivers/usb/gadget/udc/udc-xilinx.c 	req = list_first_entry(&ep->queue, struct xusb_req, queue);
queue             220 drivers/usb/host/imx21-dbg.c 	list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
queue             223 drivers/usb/host/imx21-dbg.c 	list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
queue             439 drivers/usb/host/imx21-hcd.c 	list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
queue             442 drivers/usb/host/imx21-hcd.c 			list_del(&etd->queue);
queue             492 drivers/usb/host/imx21-hcd.c 			struct ep_priv, queue);
queue             493 drivers/usb/host/imx21-hcd.c 		list_del(&ep_priv->queue);
queue            1013 drivers/usb/host/imx21-hcd.c 		list_add_tail(&etd->queue, &imx21->queue_for_dmem);
queue            1223 drivers/usb/host/imx21-hcd.c 			list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
queue             331 drivers/usb/host/imx21-hcd.h 	struct list_head queue;
queue             353 drivers/usb/host/imx21-hcd.h 	struct list_head queue;
queue             844 drivers/usb/host/r8a66597-hcd.c 	list_for_each_entry_safe(td, next, list, queue) {
queue             849 drivers/usb/host/r8a66597-hcd.c 		list_del(&td->queue);
queue            1283 drivers/usb/host/r8a66597-hcd.c 		list_del(&td->queue);
queue            1780 drivers/usb/host/r8a66597-hcd.c 			list_move_tail(&new_td->queue,
queue            1880 drivers/usb/host/r8a66597-hcd.c 	INIT_LIST_HEAD(&td->queue);
queue            1928 drivers/usb/host/r8a66597-hcd.c 	list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
queue            1940 drivers/usb/host/r8a66597-hcd.c 				list_del(&td->queue);
queue              58 drivers/usb/host/r8a66597.h 	struct list_head queue;
queue             148 drivers/usb/host/r8a66597.h 			  struct r8a66597_td, queue);
queue             211 drivers/usb/host/uhci-debug.c 	if (list_empty(&qh->queue)) {
queue             220 drivers/usb/host/uhci-debug.c 		struct urb_priv *urbp = list_entry(qh->queue.next,
queue             229 drivers/usb/host/uhci-debug.c 		list_for_each_entry(urbp, &qh->queue, node) {
queue             162 drivers/usb/host/uhci-hcd.h 	struct list_head queue;		/* Queue of urbps for this QH */
queue             260 drivers/usb/host/uhci-q.c 	INIT_LIST_HEAD(&qh->queue);
queue             295 drivers/usb/host/uhci-q.c 	if (!list_empty(&qh->queue))
queue             334 drivers/usb/host/uhci-q.c 	if (qh->queue.next != &urbp->node) {
queue             383 drivers/usb/host/uhci-q.c 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
queue             393 drivers/usb/host/uhci-q.c 	urbp = list_prepare_entry(urbp, &qh->queue, node);
queue             394 drivers/usb/host/uhci-q.c 	list_for_each_entry_continue(urbp, &qh->queue, node) {
queue             415 drivers/usb/host/uhci-q.c 	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
queue             482 drivers/usb/host/uhci-q.c 	WARN_ON(list_empty(&qh->queue));
queue             487 drivers/usb/host/uhci-q.c 		struct urb_priv *urbp = list_entry(qh->queue.next,
queue            1292 drivers/usb/host/uhci-q.c 		if (list_empty(&qh->queue)) {
queue            1297 drivers/usb/host/uhci-q.c 			lurb = list_entry(qh->queue.prev,
queue            1358 drivers/usb/host/uhci-q.c 	if (list_empty(&qh->queue)) {
queue            1456 drivers/usb/host/uhci-q.c 	list_add_tail(&urbp->node, &qh->queue);
queue            1462 drivers/usb/host/uhci-q.c 	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
queue            1534 drivers/usb/host/uhci-q.c 			urbp->node.prev == &qh->queue &&
queue            1535 drivers/usb/host/uhci-q.c 			urbp->node.next != &qh->queue) {
queue            1546 drivers/usb/host/uhci-q.c 	if (list_empty(&qh->queue) && qh->needs_fixup) {
queue            1561 drivers/usb/host/uhci-q.c 	if (list_empty(&qh->queue)) {
queue            1581 drivers/usb/host/uhci-q.c 	while (!list_empty(&qh->queue)) {
queue            1582 drivers/usb/host/uhci-q.c 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
queue            1615 drivers/usb/host/uhci-q.c 	list_for_each_entry(urbp, &qh->queue, node) {
queue            1634 drivers/usb/host/uhci-q.c 	if (!list_empty(&qh->queue)) {
queue            1641 drivers/usb/host/uhci-q.c 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
queue            1691 drivers/usb/host/uhci-q.c 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
queue            1772 drivers/usb/host/uhci-q.c 	list_entry(qh->queue.next, struct urb_priv, node));
queue             339 drivers/usb/host/xhci-dbgtty.c 	struct list_head	*queue = &port->read_queue;
queue             343 drivers/usb/host/xhci-dbgtty.c 	while (!list_empty(queue)) {
queue             344 drivers/usb/host/xhci-dbgtty.c 		req = list_first_entry(queue, struct dbc_request, list_pool);
queue             389 drivers/usb/host/xhci-dbgtty.c 	if (!list_empty(queue) && tty) {
queue              28 drivers/usb/isp1760/isp1760-udc.c 	struct list_head queue;
queue             235 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
queue             300 drivers/usb/isp1760/isp1760-udc.c 	if (list_empty(&ep->queue)) {
queue             308 drivers/usb/isp1760/isp1760-udc.c 	req = list_first_entry(&ep->queue, struct isp1760_request,
queue             309 drivers/usb/isp1760/isp1760-udc.c 			       queue);
queue             334 drivers/usb/isp1760/isp1760-udc.c 	if (list_empty(&ep->queue)) {
queue             352 drivers/usb/isp1760/isp1760-udc.c 	req = list_first_entry(&ep->queue, struct isp1760_request,
queue             353 drivers/usb/isp1760/isp1760-udc.c 			       queue);
queue             371 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
queue             376 drivers/usb/isp1760/isp1760-udc.c 		if (!list_empty(&ep->queue))
queue             377 drivers/usb/isp1760/isp1760-udc.c 			req = list_first_entry(&ep->queue,
queue             378 drivers/usb/isp1760/isp1760-udc.c 					       struct isp1760_request, queue);
queue             432 drivers/usb/isp1760/isp1760-udc.c 		if ((ep->addr & USB_DIR_IN) && !list_empty(&ep->queue)) {
queue             435 drivers/usb/isp1760/isp1760-udc.c 			req = list_first_entry(&ep->queue,
queue             436 drivers/usb/isp1760/isp1760-udc.c 					       struct isp1760_request, queue);
queue             794 drivers/usb/isp1760/isp1760-udc.c 	list_splice_init(&uep->queue, &req_list);
queue             798 drivers/usb/isp1760/isp1760-udc.c 	list_for_each_entry_safe(req, nreq, &req_list, queue) {
queue             799 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
queue             861 drivers/usb/isp1760/isp1760-udc.c 			list_add_tail(&req->queue, &uep->queue);
queue             866 drivers/usb/isp1760/isp1760-udc.c 			list_add_tail(&req->queue, &uep->queue);
queue             882 drivers/usb/isp1760/isp1760-udc.c 		bool empty = list_empty(&uep->queue);
queue             884 drivers/usb/isp1760/isp1760-udc.c 		list_add_tail(&req->queue, &uep->queue);
queue             922 drivers/usb/isp1760/isp1760-udc.c 		list_del(&req->queue);
queue             959 drivers/usb/isp1760/isp1760-udc.c 		if (!list_empty(&uep->queue)) {
queue            1045 drivers/usb/isp1760/isp1760-udc.c 	.queue = isp1760_ep_queue,
queue            1364 drivers/usb/isp1760/isp1760-udc.c 		INIT_LIST_HEAD(&ep->queue);
queue              34 drivers/usb/isp1760/isp1760-udc.h 	struct list_head queue;
queue             440 drivers/usb/mtu3/mtu3_gadget.c 	.queue = mtu3_gadget_queue,
queue             904 drivers/usb/mtu3/mtu3_gadget_ep0.c 	.queue = mtu3_ep0_queue,
queue            1380 drivers/usb/musb/cppi_dma.c 	struct cppi_descriptor	*queue;
queue            1407 drivers/usb/musb/cppi_dma.c 	queue = cppi_ch->head;
queue            1527 drivers/usb/musb/cppi_dma.c 		while (queue) {
queue            1528 drivers/usb/musb/cppi_dma.c 			struct cppi_descriptor	*tmp = queue->next;
queue            1530 drivers/usb/musb/cppi_dma.c 			cppi_bd_free(cppi_ch, queue);
queue            1531 drivers/usb/musb/cppi_dma.c 			queue = tmp;
queue            1494 drivers/usb/musb/musb_gadget.c 	.queue		= musb_gadget_queue,
queue             103 drivers/usb/musb/musb_gadget.h 	struct list_head	*queue = &ep->req_list;
queue             105 drivers/usb/musb/musb_gadget.h 	if (list_empty(queue))
queue             107 drivers/usb/musb/musb_gadget.h 	return container_of(queue->next, struct musb_request, list);
queue            1059 drivers/usb/musb/musb_gadget_ep0.c 	.queue		= musb_g_ep0_queue,
queue             119 drivers/usb/musb/musb_host.h 	struct list_head	*queue;
queue             123 drivers/usb/musb/musb_host.h 	queue = &qh->hep->urb_list;
queue             124 drivers/usb/musb/musb_host.h 	if (list_empty(queue))
queue             126 drivers/usb/musb/musb_host.h 	return list_entry(queue->next, struct urb, urb_list);
queue             785 drivers/usb/renesas_usbhs/mod_gadget.c 	.queue		= usbhsg_ep_queue,
queue             419 drivers/usb/usbip/vudc_dev.c 	.queue		= vep_queue,
queue             102 drivers/vhost/net.c 	void **queue;
queue             154 drivers/vhost/net.c 		return rxq->queue[rxq->head];
queue             181 drivers/vhost/net.c 	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
queue             191 drivers/vhost/net.c 		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
queue            1278 drivers/vhost/net.c 	void **queue;
queue            1291 drivers/vhost/net.c 	queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
queue            1293 drivers/vhost/net.c 	if (!queue) {
queue            1298 drivers/vhost/net.c 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
queue            1304 drivers/vhost/net.c 		kfree(queue);
queue            1406 drivers/vhost/net.c 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
queue             390 drivers/video/fbdev/core/fbcon.c 	struct fb_info *info = container_of(work, struct fb_info, queue);
queue             427 drivers/video/fbdev/core/fbcon.c 	queue_work(system_power_efficient_wq, &info->queue);
queue             435 drivers/video/fbdev/core/fbcon.c 	if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
queue             438 drivers/video/fbdev/core/fbcon.c 		if (!info->queue.func)
queue             439 drivers/video/fbdev/core/fbcon.c 			INIT_WORK(&info->queue, fb_flashcursor);
queue             451 drivers/video/fbdev/core/fbcon.c 	if (info->queue.func == fb_flashcursor &&
queue            3651 drivers/video/fbdev/core/fbcon.c 		if (info->queue.func)
queue            3652 drivers/video/fbdev/core/fbcon.c 			pending = cancel_work_sync(&info->queue);
queue            3678 drivers/video/fbdev/core/fbcon.c 			if (info->queue.func == fb_flashcursor)
queue            3679 drivers/video/fbdev/core/fbcon.c 				info->queue.func = NULL;
queue             415 drivers/video/fbdev/mx3fb.c 		       mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
queue             427 drivers/video/fbdev/mx3fb.c 		       mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
queue             909 drivers/video/fbdev/mx3fb.c 	dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
queue            1147 drivers/video/fbdev/mx3fb.c 		list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+');
queue             279 drivers/virtio/virtio_ring.c 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
queue             281 drivers/virtio/virtio_ring.c 		if (queue) {
queue             282 drivers/virtio/virtio_ring.c 			phys_addr_t phys_addr = virt_to_phys(queue);
queue             297 drivers/virtio/virtio_ring.c 				free_pages_exact(queue, PAGE_ALIGN(size));
queue             301 drivers/virtio/virtio_ring.c 		return queue;
queue             306 drivers/virtio/virtio_ring.c 			     void *queue, dma_addr_t dma_handle)
queue             309 drivers/virtio/virtio_ring.c 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
queue             311 drivers/virtio/virtio_ring.c 		free_pages_exact(queue, PAGE_ALIGN(size));
queue             858 drivers/virtio/virtio_ring.c 	void *queue = NULL;
queue             871 drivers/virtio/virtio_ring.c 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
queue             874 drivers/virtio/virtio_ring.c 		if (queue)
queue             883 drivers/virtio/virtio_ring.c 	if (!queue) {
queue             885 drivers/virtio/virtio_ring.c 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
queue             888 drivers/virtio/virtio_ring.c 	if (!queue)
queue             892 drivers/virtio/virtio_ring.c 	vring_init(&vring, num, queue, vring_align);
queue             897 drivers/virtio/virtio_ring.c 		vring_free_queue(vdev, queue_size_in_bytes, queue,
queue             169 drivers/visorbus/visorchannel.c #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
queue             171 drivers/visorbus/visorchannel.c 			   sig_queue_offset(&channel->chan_hdr, queue) + \
queue             176 drivers/visorbus/visorchannel.c static int sig_read_header(struct visorchannel *channel, u32 queue,
queue             184 drivers/visorbus/visorchannel.c 				 sig_queue_offset(&channel->chan_hdr, queue),
queue             188 drivers/visorbus/visorchannel.c static int sig_read_data(struct visorchannel *channel, u32 queue,
queue             192 drivers/visorbus/visorchannel.c 	int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
queue             199 drivers/visorbus/visorchannel.c static int sig_write_data(struct visorchannel *channel, u32 queue,
queue             203 drivers/visorbus/visorchannel.c 	int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue,
queue             210 drivers/visorbus/visorchannel.c static int signalremove_inner(struct visorchannel *channel, u32 queue,
queue             216 drivers/visorbus/visorchannel.c 	error = sig_read_header(channel, queue, &sig_hdr);
queue             223 drivers/visorbus/visorchannel.c 	error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
queue             232 drivers/visorbus/visorchannel.c 	error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
queue             235 drivers/visorbus/visorchannel.c 	error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
queue             250 drivers/visorbus/visorchannel.c int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
queue             258 drivers/visorbus/visorchannel.c 		rc = signalremove_inner(channel, queue, msg);
queue             261 drivers/visorbus/visorchannel.c 		rc = signalremove_inner(channel, queue, msg);
queue             268 drivers/visorbus/visorchannel.c static bool queue_empty(struct visorchannel *channel, u32 queue)
queue             272 drivers/visorbus/visorchannel.c 	if (sig_read_header(channel, queue, &sig_hdr))
queue             286 drivers/visorbus/visorchannel.c bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
queue             292 drivers/visorbus/visorchannel.c 		return queue_empty(channel, queue);
queue             294 drivers/visorbus/visorchannel.c 	rc = queue_empty(channel, queue);
queue             300 drivers/visorbus/visorchannel.c static int signalinsert_inner(struct visorchannel *channel, u32 queue,
queue             306 drivers/visorbus/visorchannel.c 	err = sig_read_header(channel, queue, &sig_hdr);
queue             312 drivers/visorbus/visorchannel.c 		err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
queue             317 drivers/visorbus/visorchannel.c 	err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
queue             326 drivers/visorbus/visorchannel.c 	err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
queue             329 drivers/visorbus/visorchannel.c 	err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
queue             418 drivers/visorbus/visorchannel.c int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
queue             426 drivers/visorbus/visorchannel.c 		rc = signalinsert_inner(channel, queue, msg);
queue             429 drivers/visorbus/visorchannel.c 		rc = signalinsert_inner(channel, queue, msg);
queue              51 drivers/watchdog/cpu5wdt.c 	int queue;
queue              71 drivers/watchdog/cpu5wdt.c 	if (cpu5wdt_device.queue && ticks)
queue              95 drivers/watchdog/cpu5wdt.c 	if (!cpu5wdt_device.queue) {
queue              96 drivers/watchdog/cpu5wdt.c 		cpu5wdt_device.queue = 1;
queue             212 drivers/watchdog/cpu5wdt.c 	cpu5wdt_device.queue = 0;
queue             251 drivers/watchdog/cpu5wdt.c 	if (cpu5wdt_device.queue) {
queue             252 drivers/watchdog/cpu5wdt.c 		cpu5wdt_device.queue = 0;
queue              55 drivers/watchdog/mtx-1_wdt.c 	int queue;
queue              72 drivers/watchdog/mtx-1_wdt.c 	if (mtx1_wdt_device.queue && ticks)
queue              90 drivers/watchdog/mtx-1_wdt.c 	if (!mtx1_wdt_device.queue) {
queue              91 drivers/watchdog/mtx-1_wdt.c 		mtx1_wdt_device.queue = 1;
queue             105 drivers/watchdog/mtx-1_wdt.c 	if (mtx1_wdt_device.queue) {
queue             106 drivers/watchdog/mtx-1_wdt.c 		mtx1_wdt_device.queue = 0;
queue             210 drivers/watchdog/mtx-1_wdt.c 	mtx1_wdt_device.queue = 0;
queue             228 drivers/watchdog/mtx-1_wdt.c 	if (mtx1_wdt_device.queue) {
queue             229 drivers/watchdog/mtx-1_wdt.c 		mtx1_wdt_device.queue = 0;
queue              46 drivers/watchdog/rdc321x_wdt.c 	int queue;
queue              74 drivers/watchdog/rdc321x_wdt.c 	if (rdc321x_wdt_device.queue && ticks)
queue              93 drivers/watchdog/rdc321x_wdt.c 	if (!rdc321x_wdt_device.queue) {
queue              94 drivers/watchdog/rdc321x_wdt.c 		rdc321x_wdt_device.queue = 1;
queue             247 drivers/watchdog/rdc321x_wdt.c 	rdc321x_wdt_device.queue = 0;
queue             262 drivers/watchdog/rdc321x_wdt.c 	if (rdc321x_wdt_device.queue) {
queue             263 drivers/watchdog/rdc321x_wdt.c 		rdc321x_wdt_device.queue = 0;
queue             751 drivers/xen/pvcalls-back.c 	struct request_sock_queue *queue;
queue             777 drivers/xen/pvcalls-back.c 	queue = &icsk->icsk_accept_queue;
queue             778 drivers/xen/pvcalls-back.c 	data = READ_ONCE(queue->rskq_accept_head) != NULL;
queue             189 drivers/xen/xenbus/xenbus_dev_frontend.c static int queue_reply(struct list_head *queue, const void *data, size_t len)
queue             207 drivers/xen/xenbus/xenbus_dev_frontend.c 	list_add_tail(&rb->list, queue);
queue              85 fs/autofs/autofs_i.h 	wait_queue_head_t queue;
queue              36 fs/autofs/waitq.c 		wake_up_interruptible(&wq->queue);
queue             431 fs/autofs/waitq.c 		init_waitqueue_head(&wq->queue);
queue             479 fs/autofs/waitq.c 	wait_event_killable(wq->queue, wq->name.name == NULL);
queue             546 fs/autofs/waitq.c 	wake_up(&wq->queue);
queue            1567 fs/block_dev.c 		bdev->bd_queue = disk->queue;
queue            1635 fs/block_dev.c 			bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
queue             521 fs/direct-io.c 		    !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
queue             401 fs/dlm/recover.c static void set_lock_master(struct list_head *queue, int nodeid)
queue             405 fs/dlm/recover.c 	list_for_each_entry(lkb, queue, lkb_statequeue) {
queue             231 fs/gfs2/trace_gfs2.h 	TP_PROTO(const struct gfs2_holder *gh, int queue),
queue             233 fs/gfs2/trace_gfs2.h 	TP_ARGS(gh, queue),
queue             239 fs/gfs2/trace_gfs2.h 		__field(	int,	queue			)
queue             247 fs/gfs2/trace_gfs2.h 		__entry->queue	= queue;
queue             254 fs/gfs2/trace_gfs2.h 		  __entry->queue ? "" : "de",
queue             241 fs/lockd/clntproc.c static int nlm_wait_on_grace(wait_queue_head_t *queue)
queue             246 fs/lockd/clntproc.c 	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
queue             253 fs/lockd/clntproc.c 	finish_wait(queue, &wait);
queue              27 fs/nfs/nfs4session.c static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
queue              31 fs/nfs/nfs4session.c 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
queue             351 fs/nfs/nfs4session.c 		const char *queue)
queue             353 fs/nfs/nfs4session.c 	nfs4_init_slot_table(tbl, queue);
queue              83 fs/nfs/nfs4session.h 		unsigned int max_reqs, const char *queue);
queue             217 fs/nfsd/blocklayout.c 	struct request_queue *q = bdev->bd_disk->queue;
queue             149 fs/nfsd/nfs4layouts.c 		blk_queue_scsi_passthrough(sb->s_bdev->bd_disk->queue))
queue            6120 fs/nfsd/nfs4state.c 	bool queue = false;
queue            6127 fs/nfsd/nfs4state.c 		queue = true;
queue            6131 fs/nfsd/nfs4state.c 	if (queue)
queue             110 fs/notify/notification.c 		goto queue;
queue             121 fs/notify/notification.c queue:
queue            2489 fs/ocfs2/dlm/dlmmaster.c 	struct list_head *queue;
queue            2508 fs/ocfs2/dlm/dlmmaster.c 		queue = dlm_list_idx_to_ptr(res, idx);
queue            2509 fs/ocfs2/dlm/dlmmaster.c 		list_for_each_entry(lock, queue, list) {
queue            2930 fs/ocfs2/dlm/dlmmaster.c 	struct list_head *queue = &res->granted;
queue            2939 fs/ocfs2/dlm/dlmmaster.c 		list_for_each_entry_safe(lock, next, queue, list) {
queue            2957 fs/ocfs2/dlm/dlmmaster.c 		queue++;
queue            2985 fs/ocfs2/dlm/dlmmaster.c 	struct list_head *queue = &res->granted;
queue            2995 fs/ocfs2/dlm/dlmmaster.c 		queue = dlm_list_idx_to_ptr(res, idx);
queue            2996 fs/ocfs2/dlm/dlmmaster.c 		list_for_each_entry(lock, queue, list) {
queue            1092 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *iter, *queue = &res->granted;
queue            1096 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each(iter, queue)
queue            1098 fs/ocfs2/dlm/dlmrecovery.c 		queue++;
queue            1182 fs/ocfs2/dlm/dlmrecovery.c 					  int queue)
queue            1188 fs/ocfs2/dlm/dlmrecovery.c 	if (queue == DLM_BLOCKED_LIST)
queue            1217 fs/ocfs2/dlm/dlmrecovery.c 				 struct dlm_migratable_lockres *mres, int queue)
queue            1227 fs/ocfs2/dlm/dlmrecovery.c 	ml->list = queue;
queue            1230 fs/ocfs2/dlm/dlmrecovery.c 		dlm_prepare_lvb_for_migration(lock, mres, queue);
queue            1273 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *queue;
queue            1298 fs/ocfs2/dlm/dlmrecovery.c 		queue = dlm_list_idx_to_ptr(res, i);
queue            1299 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each_entry(lock, queue, list) {
queue            1801 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *queue, *iter;
queue            1830 fs/ocfs2/dlm/dlmrecovery.c 		queue = dlm_list_num_to_pointer(res, ml->list);
queue            1891 fs/ocfs2/dlm/dlmrecovery.c 			if (tmpq != queue) {
queue            1909 fs/ocfs2/dlm/dlmrecovery.c 			list_move_tail(&lock->list, queue);
queue            1927 fs/ocfs2/dlm/dlmrecovery.c 			BUG_ON(queue != &res->converting);
queue            2000 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each_entry(lock, queue, list) {
queue            2029 fs/ocfs2/dlm/dlmrecovery.c 				list_add(&newlock->list, queue);
queue            2031 fs/ocfs2/dlm/dlmrecovery.c 				list_add_tail(&newlock->list, queue);
queue            2057 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *queue;
queue            2076 fs/ocfs2/dlm/dlmrecovery.c 		queue = dlm_list_idx_to_ptr(res, i);
queue            2077 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each_entry_safe(lock, next, queue, list) {
queue            2221 fs/ocfs2/dlm/dlmrecovery.c 	struct list_head *queue;
queue            2242 fs/ocfs2/dlm/dlmrecovery.c 		queue = dlm_list_idx_to_ptr(res, i);
queue            2243 fs/ocfs2/dlm/dlmrecovery.c 		list_for_each_entry(lock, queue, list) {
queue             403 fs/ocfs2/dlm/dlmunlock.c 	struct list_head *queue;
queue             441 fs/ocfs2/dlm/dlmunlock.c 	queue=&res->granted;
queue             466 fs/ocfs2/dlm/dlmunlock.c 		list_for_each_entry(lock, queue, list) {
queue             477 fs/ocfs2/dlm/dlmunlock.c 		queue++;
queue             179 fs/ocfs2/dlmfs/userdlm.c 	int queue = 0;
queue             187 fs/ocfs2/dlmfs/userdlm.c 			queue = 1;
queue             191 fs/ocfs2/dlmfs/userdlm.c 			queue = 1;
queue             197 fs/ocfs2/dlmfs/userdlm.c 	if (queue)
queue             356 fs/xfs/xfs_buf.h 	return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
queue             188 include/crypto/algapi.h void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
queue             189 include/crypto/algapi.h int crypto_enqueue_request(struct crypto_queue *queue,
queue             191 include/crypto/algapi.h struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
queue             192 include/crypto/algapi.h static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
queue             194 include/crypto/algapi.h 	return queue->qlen;
queue             350 include/crypto/algapi.h 	struct crypto_queue *queue)
queue             352 include/crypto/algapi.h 	return queue->backlog == &queue->list ? NULL :
queue             353 include/crypto/algapi.h 	       container_of(queue->backlog, struct crypto_async_request, list);
queue             356 include/crypto/algapi.h static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
queue             359 include/crypto/algapi.h 	return crypto_enqueue_request(queue, &request->base);
queue             363 include/crypto/algapi.h 	struct crypto_queue *queue)
queue             365 include/crypto/algapi.h 	return ablkcipher_request_cast(crypto_dequeue_request(queue));
queue              52 include/crypto/engine.h 	struct crypto_queue	queue;
queue             126 include/crypto/internal/aead.h static inline void aead_init_queue(struct aead_queue *queue,
queue             129 include/crypto/internal/aead.h 	crypto_init_queue(&queue->base, max_qlen);
queue             132 include/crypto/internal/aead.h static inline int aead_enqueue_request(struct aead_queue *queue,
queue             135 include/crypto/internal/aead.h 	return crypto_enqueue_request(&queue->base, &request->base);
queue             139 include/crypto/internal/aead.h 	struct aead_queue *queue)
queue             143 include/crypto/internal/aead.h 	req = crypto_dequeue_request(&queue->base);
queue             148 include/crypto/internal/aead.h static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
queue             152 include/crypto/internal/aead.h 	req = crypto_get_backlog(&queue->base);
queue             187 include/crypto/internal/hash.h static inline int ahash_enqueue_request(struct crypto_queue *queue,
queue             190 include/crypto/internal/hash.h 	return crypto_enqueue_request(queue, &request->base);
queue             194 include/crypto/internal/hash.h 	struct crypto_queue *queue)
queue             196 include/crypto/internal/hash.h 	return ahash_request_cast(crypto_dequeue_request(queue));
queue              33 include/drm/drm_os_linux.h #define DRM_WAIT_ON( ret, queue, timeout, condition )		\
queue              37 include/drm/drm_os_linux.h 	add_wait_queue(&(queue), &entry);			\
queue              54 include/drm/drm_os_linux.h 	remove_wait_queue(&(queue), &entry);			\
queue              97 include/drm/drm_vblank.h 	wait_queue_head_t queue;
queue              48 include/drm/spsc_queue.h static inline void spsc_queue_init(struct spsc_queue *queue)
queue              50 include/drm/spsc_queue.h 	queue->head = NULL;
queue              51 include/drm/spsc_queue.h 	atomic_long_set(&queue->tail, (long)&queue->head);
queue              52 include/drm/spsc_queue.h 	atomic_set(&queue->job_count, 0);
queue              55 include/drm/spsc_queue.h static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
queue              57 include/drm/spsc_queue.h 	return queue->head;
queue              60 include/drm/spsc_queue.h static inline int spsc_queue_count(struct spsc_queue *queue)
queue              62 include/drm/spsc_queue.h 	return atomic_read(&queue->job_count);
queue              65 include/drm/spsc_queue.h static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
queue              73 include/drm/spsc_queue.h 	tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
queue              75 include/drm/spsc_queue.h 	atomic_inc(&queue->job_count);
queue              85 include/drm/spsc_queue.h 	return tail == &queue->head;
queue              89 include/drm/spsc_queue.h static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
queue              96 include/drm/spsc_queue.h 	node = READ_ONCE(queue->head);
queue             102 include/drm/spsc_queue.h 	WRITE_ONCE(queue->head, next);
queue             107 include/drm/spsc_queue.h 		if (atomic_long_cmpxchg(&queue->tail,
queue             108 include/drm/spsc_queue.h 				(long)&node->next, (long) &queue->head) != (long)&node->next) {
queue             112 include/drm/spsc_queue.h 			} while (unlikely(!(queue->head = READ_ONCE(node->next))));
queue             116 include/drm/spsc_queue.h 	atomic_dec(&queue->job_count);
queue              30 include/linux/blk-mq.h 	struct request_queue	*queue;
queue             900 include/linux/blkdev.h 	return bdev->bd_disk->queue;	/* this is never NULL */
queue            1551 include/linux/blkdev.h 	struct blk_integrity *bi = &disk->queue->integrity;
queue              21 include/linux/bsg.h 	struct request_queue *queue;
queue             162 include/linux/dma/ipu-dma.h 	struct list_head	queue;		/* queued tx-descriptors	   */
queue             453 include/linux/fb.h 	struct work_struct queue;	/* Framebuffer event queue */
queue             206 include/linux/genhd.h 	struct request_queue *queue;
queue             531 include/linux/ide.h 	struct request_queue	*queue;	/* request queue */
queue              43 include/linux/if_eql.h 	slave_queue_t		queue;
queue              64 include/linux/iio/buffer-dma.h 	struct iio_dma_buffer_queue *queue;
queue             126 include/linux/iio/buffer-dma.h 	int (*submit)(struct iio_dma_buffer_queue *queue,
queue             128 include/linux/iio/buffer-dma.h 	void (*abort)(struct iio_dma_buffer_queue *queue);
queue             132 include/linux/iio/buffer-dma.h void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
queue             146 include/linux/iio/buffer-dma.h int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
queue             148 include/linux/iio/buffer-dma.h void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
queue             149 include/linux/iio/buffer-dma.h void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
queue             204 include/linux/kvm_host.h 	struct list_head queue;
queue             302 include/linux/kvm_host.h 		struct list_head queue;
queue             118 include/linux/mfd/ipaq-micro.h 	struct list_head queue;
queue             756 include/linux/netdevice.h 	ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
queue             757 include/linux/netdevice.h 	ssize_t (*store)(struct netdev_rx_queue *queue,
queue            1297 include/linux/netdevice.h 						  int queue, u8 *mac);
queue            1299 include/linux/netdevice.h 						   int queue, u16 vlan,
queue            3568 include/linux/netdevice.h 		struct netdev_rx_queue *queue)
queue            3570 include/linux/netdevice.h 	struct net_device *dev = queue->dev;
queue            3571 include/linux/netdevice.h 	int index = queue - dev->_rx;
queue              55 include/linux/pps_kernel.h 	wait_queue_head_t queue;		/* PPS event queue */
queue              39 include/linux/ptr_ring.h 	void **queue;
queue              50 include/linux/ptr_ring.h 	return r->queue[r->producer];
queue             105 include/linux/ptr_ring.h 	if (unlikely(!r->size) || r->queue[r->producer])
queue             112 include/linux/ptr_ring.h 	WRITE_ONCE(r->queue[r->producer++], ptr);
queue             171 include/linux/ptr_ring.h 		return READ_ONCE(r->queue[r->consumer_head]);
queue             196 include/linux/ptr_ring.h 		return !r->queue[READ_ONCE(r->consumer_head)];
queue             279 include/linux/ptr_ring.h 			r->queue[head--] = NULL;
queue             476 include/linux/ptr_ring.h 	r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
queue             488 include/linux/ptr_ring.h 	r->queue = __ptr_ring_init_queue_alloc(size, gfp);
queue             489 include/linux/ptr_ring.h 	if (!r->queue)
queue             528 include/linux/ptr_ring.h 		r->queue[head--] = NULL;
queue             539 include/linux/ptr_ring.h 		if (r->queue[head]) {
queue             543 include/linux/ptr_ring.h 		r->queue[head] = batch[--n];
queue             557 include/linux/ptr_ring.h static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
queue             567 include/linux/ptr_ring.h 			queue[producer++] = ptr;
queue             577 include/linux/ptr_ring.h 	old = r->queue;
queue             578 include/linux/ptr_ring.h 	r->queue = queue;
queue             593 include/linux/ptr_ring.h 	void **queue = __ptr_ring_init_queue_alloc(size, gfp);
queue             596 include/linux/ptr_ring.h 	if (!queue)
queue             602 include/linux/ptr_ring.h 	old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
queue             670 include/linux/ptr_ring.h 	kvfree(r->queue);
queue             257 include/linux/signal.h extern void flush_sigqueue(struct sigpending *queue);
queue            3404 include/linux/skbuff.h #define skb_queue_walk(queue, skb) \
queue            3405 include/linux/skbuff.h 		for (skb = (queue)->next;					\
queue            3406 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3409 include/linux/skbuff.h #define skb_queue_walk_safe(queue, skb, tmp)					\
queue            3410 include/linux/skbuff.h 		for (skb = (queue)->next, tmp = skb->next;			\
queue            3411 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3414 include/linux/skbuff.h #define skb_queue_walk_from(queue, skb)						\
queue            3415 include/linux/skbuff.h 		for (; skb != (struct sk_buff *)(queue);			\
queue            3430 include/linux/skbuff.h #define skb_queue_walk_from_safe(queue, skb, tmp)				\
queue            3432 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3435 include/linux/skbuff.h #define skb_queue_reverse_walk(queue, skb) \
queue            3436 include/linux/skbuff.h 		for (skb = (queue)->prev;					\
queue            3437 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3440 include/linux/skbuff.h #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
queue            3441 include/linux/skbuff.h 		for (skb = (queue)->prev, tmp = skb->prev;			\
queue            3442 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3445 include/linux/skbuff.h #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
queue            3447 include/linux/skbuff.h 		     skb != (struct sk_buff *)(queue);				\
queue            3467 include/linux/skbuff.h 					  struct sk_buff_head *queue,
queue              57 include/linux/soc/ixp4xx/qmgr.h void qmgr_put_entry(unsigned int queue, u32 val);
queue              58 include/linux/soc/ixp4xx/qmgr.h u32 qmgr_get_entry(unsigned int queue);
queue              59 include/linux/soc/ixp4xx/qmgr.h int qmgr_stat_empty(unsigned int queue);
queue              60 include/linux/soc/ixp4xx/qmgr.h int qmgr_stat_below_low_watermark(unsigned int queue);
queue              61 include/linux/soc/ixp4xx/qmgr.h int qmgr_stat_full(unsigned int queue);
queue              62 include/linux/soc/ixp4xx/qmgr.h int qmgr_stat_overflow(unsigned int queue);
queue              63 include/linux/soc/ixp4xx/qmgr.h void qmgr_release_queue(unsigned int queue);
queue              64 include/linux/soc/ixp4xx/qmgr.h void qmgr_set_irq(unsigned int queue, int src,
queue              66 include/linux/soc/ixp4xx/qmgr.h void qmgr_enable_irq(unsigned int queue);
queue              67 include/linux/soc/ixp4xx/qmgr.h void qmgr_disable_irq(unsigned int queue);
queue              74 include/linux/soc/ixp4xx/qmgr.h int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
queue              79 include/linux/soc/ixp4xx/qmgr.h int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
queue              82 include/linux/soc/ixp4xx/qmgr.h #define qmgr_request_queue(queue, len, nearly_empty_watermark,		\
queue              84 include/linux/soc/ixp4xx/qmgr.h 	__qmgr_request_queue(queue, len, nearly_empty_watermark,	\
queue             555 include/linux/spi/spi.h 	struct list_head		queue;
queue             907 include/linux/spi/spi.h 	struct list_head	queue;
queue             109 include/linux/sunrpc/cache.h 	struct list_head	queue;
queue             232 include/linux/sunrpc/sched.h void		rpc_sleep_on_timeout(struct rpc_wait_queue *queue,
queue             238 include/linux/sunrpc/sched.h void		rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
queue             456 include/linux/tcp.h 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
queue             459 include/linux/tcp.h 	queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
queue             489 include/linux/thunderbolt.h 	struct list_head queue;
queue             142 include/linux/usb/gadget.h 	int (*queue) (struct usb_ep *ep, struct usb_request *req,
queue             334 include/linux/visorbus.h int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
queue             336 include/linux/visorbus.h int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
queue             338 include/linux/visorbus.h bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
queue              75 include/media/drv-intf/saa7146_vv.h 	struct list_head	queue;
queue              48 include/media/dvb_ringbuffer.h 	wait_queue_head_t queue;
queue             250 include/media/media-request.h 	void (*queue)(struct media_request_object *object);
queue             276 include/media/v4l2-dev.h 	struct vb2_queue *queue;
queue             100 include/media/v4l2-mem2mem.h 	struct list_head		queue;
queue              77 include/media/videobuf-core.h 	struct list_head        queue;
queue             185 include/net/flow_offload.h 		} queue;
queue              33 include/net/fq.h 	struct sk_buff_head queue;
queue              52 include/net/fq_impl.h 	skb = __skb_dequeue(&flow->queue);
queue             184 include/net/fq_impl.h 	__skb_queue_tail(&flow->queue, skb);
queue             219 include/net/fq_impl.h 	skb_queue_walk_safe(&flow->queue, skb, tmp) {
queue             223 include/net/fq_impl.h 		__skb_unlink(skb, &flow->queue);
queue             294 include/net/fq_impl.h 	__skb_queue_head_init(&flow->queue);
queue             105 include/net/garp.h 	struct sk_buff_head	queue;
queue             222 include/net/ip.h 			      struct sk_buff_head *queue,
queue            1001 include/net/ipv6.h struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
queue            5145 include/net/mac80211.h void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
queue            5154 include/net/mac80211.h void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
queue            5166 include/net/mac80211.h int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
queue             119 include/net/mrp.h 	struct sk_buff_head	queue;
queue             103 include/net/netfilter/nf_queue.h nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
queue             108 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
queue             112 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
queue             116 include/net/netfilter/nf_queue.h 		queue += reciprocal_scale(hash_bridge(skb, initval),
queue             121 include/net/netfilter/nf_queue.h 	return queue;
queue             702 include/net/pkt_cls.h 	unsigned long queue;
queue             157 include/net/pkt_sched.h 	s32 queue;
queue             166 include/net/pkt_sched.h 	s32 queue;
queue             181 include/net/request_sock.h void reqsk_queue_alloc(struct request_sock_queue *queue);
queue             186 include/net/request_sock.h static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
queue             188 include/net/request_sock.h 	return READ_ONCE(queue->rskq_accept_head) == NULL;
queue             191 include/net/request_sock.h static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
queue             196 include/net/request_sock.h 	spin_lock_bh(&queue->rskq_lock);
queue             197 include/net/request_sock.h 	req = queue->rskq_accept_head;
queue             200 include/net/request_sock.h 		WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
queue             201 include/net/request_sock.h 		if (queue->rskq_accept_head == NULL)
queue             202 include/net/request_sock.h 			queue->rskq_accept_tail = NULL;
queue             204 include/net/request_sock.h 	spin_unlock_bh(&queue->rskq_lock);
queue             208 include/net/request_sock.h static inline void reqsk_queue_removed(struct request_sock_queue *queue,
queue             212 include/net/request_sock.h 		atomic_dec(&queue->young);
queue             213 include/net/request_sock.h 	atomic_dec(&queue->qlen);
queue             216 include/net/request_sock.h static inline void reqsk_queue_added(struct request_sock_queue *queue)
queue             218 include/net/request_sock.h 	atomic_inc(&queue->young);
queue             219 include/net/request_sock.h 	atomic_inc(&queue->qlen);
queue             222 include/net/request_sock.h static inline int reqsk_queue_len(const struct request_sock_queue *queue)
queue             224 include/net/request_sock.h 	return atomic_read(&queue->qlen);
queue             227 include/net/request_sock.h static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
queue             229 include/net/request_sock.h 	return atomic_read(&queue->young);
queue             103 include/net/rose.h 	struct sk_buff_head	queue;
queue             140 include/net/sctp/ulpevent.h 	struct net *net, struct sk_buff_head *queue,
queue             142 include/net/x25.h 	struct sk_buff_head	queue;
queue             100 include/rdma/rdmavt_cq.h 	struct rvt_cq_wc *queue;
queue             941 include/rdma/rdmavt_qp.h 	       RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
queue             957 include/rdma/rdmavt_qp.h 	       RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
queue             246 include/scsi/libiscsi.h 	struct kfifo		queue;		/* FIFO Queue */
queue              44 include/sound/asequencer.h #define snd_seq_ev_is_direct(ev)	((ev)->queue == SNDRV_SEQ_QUEUE_DIRECT)
queue             252 include/uapi/sound/asequencer.h 	unsigned char queue;			/* affected queue */
queue             278 include/uapi/sound/asequencer.h 	unsigned char queue;		/* schedule queue */
queue             291 include/uapi/sound/asequencer.h 		struct snd_seq_ev_queue_control queue;
queue             397 include/uapi/sound/asequencer.h 	unsigned char queue;	/* Queue for REMOVE_DEST */
queue             476 include/uapi/sound/asequencer.h 	int queue;		/* queue id */
queue             492 include/uapi/sound/asequencer.h 	int queue;			/* queue id */
queue             504 include/uapi/sound/asequencer.h 	int queue;			/* sequencer queue */
queue             520 include/uapi/sound/asequencer.h 	int queue;			/* sequencer queue */
queue             533 include/uapi/sound/asequencer.h 	int queue;		/* sequencer queue */
queue             551 include/uapi/sound/asequencer.h 	unsigned char queue;		/* input time-stamp queue (optional) */
queue             566 include/uapi/sound/asequencer.h 	unsigned char queue;	/* R/O: result */
queue             769 include/uapi/sound/asound.h 	unsigned int queue;		/* used queue size */
queue            1976 ipc/sem.c      	struct sem_queue queue;
queue            2088 ipc/sem.c      	queue.sops = sops;
queue            2089 ipc/sem.c      	queue.nsops = nsops;
queue            2090 ipc/sem.c      	queue.undo = un;
queue            2091 ipc/sem.c      	queue.pid = task_tgid(current);
queue            2092 ipc/sem.c      	queue.alter = alter;
queue            2093 ipc/sem.c      	queue.dupsop = dupsop;
queue            2095 ipc/sem.c      	error = perform_atomic_semop(sma, &queue);
queue            2128 ipc/sem.c      				list_add_tail(&queue.list,
queue            2132 ipc/sem.c      				list_add_tail(&queue.list,
queue            2136 ipc/sem.c      			list_add_tail(&queue.list, &curr->pending_const);
queue            2143 ipc/sem.c      			list_add_tail(&queue.list, &sma->pending_alter);
queue            2145 ipc/sem.c      			list_add_tail(&queue.list, &sma->pending_const);
queue            2151 ipc/sem.c      		WRITE_ONCE(queue.status, -EINTR);
queue            2152 ipc/sem.c      		queue.sleeper = current;
queue            2174 ipc/sem.c      		error = READ_ONCE(queue.status);
queue            2192 ipc/sem.c      		error = READ_ONCE(queue.status);
queue            2208 ipc/sem.c      	unlink_queue(sma, &queue);
queue             708 kernel/audit.c 			      struct sk_buff_head *queue,
queue             720 kernel/audit.c 	while ((skb = skb_dequeue(queue))) {
queue             749 kernel/audit.c 				skb_queue_head(queue, skb);
queue              63 kernel/bpf/cpumap.c 	struct ptr_ring *queue;
queue             240 kernel/bpf/cpumap.c 		__cpu_map_ring_cleanup(rcpu->queue);
queue             241 kernel/bpf/cpumap.c 		ptr_ring_cleanup(rcpu->queue, NULL);
queue             242 kernel/bpf/cpumap.c 		kfree(rcpu->queue);
queue             260 kernel/bpf/cpumap.c 	while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
queue             268 kernel/bpf/cpumap.c 		if (__ptr_ring_empty(rcpu->queue)) {
queue             271 kernel/bpf/cpumap.c 			if (__ptr_ring_empty(rcpu->queue)) {
queue             286 kernel/bpf/cpumap.c 		n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
queue             361 kernel/bpf/cpumap.c 	rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
queue             362 kernel/bpf/cpumap.c 	if (!rcpu->queue)
queue             365 kernel/bpf/cpumap.c 	err = ptr_ring_init(rcpu->queue, qsize, gfp);
queue             389 kernel/bpf/cpumap.c 	ptr_ring_cleanup(rcpu->queue, NULL);
queue             391 kernel/bpf/cpumap.c 	kfree(rcpu->queue);
queue             613 kernel/bpf/cpumap.c 	q = rcpu->queue;
queue             344 kernel/locking/qspinlock.c 		goto queue;
queue             366 kernel/locking/qspinlock.c 		goto queue;
queue             396 kernel/locking/qspinlock.c queue:
queue            1011 kernel/locking/rwsem.c 		goto queue;
queue            1038 kernel/locking/rwsem.c queue:
queue             108 kernel/padata.c 	struct padata_parallel_queue *queue;
queue             148 kernel/padata.c 	queue = per_cpu_ptr(pd->pqueue, target_cpu);
queue             150 kernel/padata.c 	spin_lock(&queue->parallel.lock);
queue             151 kernel/padata.c 	list_add_tail(&padata->list, &queue->parallel.list);
queue             152 kernel/padata.c 	spin_unlock(&queue->parallel.lock);
queue             154 kernel/padata.c 	queue_work(pinst->parallel_wq, &queue->work);
queue              83 kernel/sched/rt.c 		INIT_LIST_HEAD(array->queue + i);
queue            1225 kernel/sched/rt.c 	if (list_empty(array->queue + rt_se_prio(rt_se)))
queue            1236 kernel/sched/rt.c 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
queue            1253 kernel/sched/rt.c 			list_add(&rt_se->run_list, queue);
queue            1255 kernel/sched/rt.c 			list_add_tail(&rt_se->run_list, queue);
queue            1361 kernel/sched/rt.c 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
queue            1364 kernel/sched/rt.c 			list_move(&rt_se->run_list, queue);
queue            1366 kernel/sched/rt.c 			list_move_tail(&rt_se->run_list, queue);
queue            1544 kernel/sched/rt.c 	struct list_head *queue;
queue            1550 kernel/sched/rt.c 	queue = array->queue + idx;
queue            1551 kernel/sched/rt.c 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
queue             236 kernel/sched/sched.h 	struct list_head queue[MAX_RT_PRIO];
queue             460 kernel/signal.c void flush_sigqueue(struct sigpending *queue)
queue             464 kernel/signal.c 	sigemptyset(&queue->signal);
queue             465 kernel/signal.c 	while (!list_empty(&queue->list)) {
queue             466 kernel/signal.c 		q = list_entry(queue->list.next, struct sigqueue , list);
queue             414 mm/page_io.c   		if (!blk_poll(disk->queue, qc, true))
queue             250 net/802/garp.c 	skb_queue_tail(&app->queue, app->pdu);
queue             258 net/802/garp.c 	while ((skb = skb_dequeue(&app->queue)))
queue             291 net/802/garp.c 			goto queue;
queue             293 net/802/garp.c 			goto queue;
queue             298 net/802/garp.c 		goto queue;
queue             305 net/802/garp.c queue:
queue             582 net/802/garp.c 	skb_queue_head_init(&app->queue);
queue             342 net/802/mrp.c  	skb_queue_tail(&app->queue, app->pdu);
queue             350 net/802/mrp.c  	while ((skb = skb_dequeue(&app->queue)))
queue             412 net/802/mrp.c  			goto queue;
queue             422 net/802/mrp.c  			goto queue;
queue             433 net/802/mrp.c  			goto queue;
queue             462 net/802/mrp.c  queue:
queue             863 net/802/mrp.c  	skb_queue_head_init(&app->queue);
queue             227 net/atm/common.c 	struct sk_buff_head queue, *rq;
queue             231 net/atm/common.c 	__skb_queue_head_init(&queue);
queue             235 net/atm/common.c 	skb_queue_splice_init(rq, &queue);
queue             238 net/atm/common.c 	skb_queue_walk_safe(&queue, skb, tmp) {
queue             239 net/atm/common.c 		__skb_unlink(skb, &queue);
queue            3720 net/bluetooth/hci_core.c static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
queue            3749 net/bluetooth/hci_core.c 		skb_queue_tail(queue, skb);
queue            3761 net/bluetooth/hci_core.c 		spin_lock_bh(&queue->lock);
queue            3763 net/bluetooth/hci_core.c 		__skb_queue_tail(queue, skb);
queue            3775 net/bluetooth/hci_core.c 			__skb_queue_tail(queue, skb);
queue            3778 net/bluetooth/hci_core.c 		spin_unlock_bh(&queue->lock);
queue             167 net/core/datagram.c 					  struct sk_buff_head *queue,
queue             183 net/core/datagram.c 	*last = queue->prev;
queue             184 net/core/datagram.c 	skb_queue_walk(queue, skb) {
queue             200 net/core/datagram.c 			__skb_unlink(skb, queue);
queue             251 net/core/datagram.c 	struct sk_buff_head *queue = &sk->sk_receive_queue;
queue             269 net/core/datagram.c 		spin_lock_irqsave(&queue->lock, cpu_flags);
queue             270 net/core/datagram.c 		skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
queue             272 net/core/datagram.c 		spin_unlock_irqrestore(&queue->lock, cpu_flags);
queue            8895 net/core/dev.c 				  struct netdev_queue *queue, void *_unused)
queue            8898 net/core/dev.c 	spin_lock_init(&queue->_xmit_lock);
queue            8899 net/core/dev.c 	lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
queue            8900 net/core/dev.c 	queue->xmit_lock_owner = -1;
queue            8901 net/core/dev.c 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
queue            8902 net/core/dev.c 	queue->dev = dev;
queue            8904 net/core/dev.c 	dql_init(&queue->dql, HZ);
queue            9422 net/core/dev.c 	struct netdev_queue *queue = dev_ingress_queue(dev);
queue            9425 net/core/dev.c 	if (queue)
queue            9426 net/core/dev.c 		return queue;
queue            9427 net/core/dev.c 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
queue            9428 net/core/dev.c 	if (!queue)
queue            9430 net/core/dev.c 	netdev_init_one_queue(dev, queue, NULL);
queue            9431 net/core/dev.c 	RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
queue            9432 net/core/dev.c 	queue->qdisc_sleeping = &noop_qdisc;
queue            9433 net/core/dev.c 	rcu_assign_pointer(dev->ingress_queue, queue);
queue            9435 net/core/dev.c 	return queue;
queue            3100 net/core/ethtool.c 			act->queue.ctx = input->rss_ctx;
queue            3102 net/core/ethtool.c 		act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
queue            3103 net/core/ethtool.c 		act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
queue             660 net/core/net-sysfs.c 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
queue             665 net/core/net-sysfs.c 	return attribute->show(queue, buf);
queue             672 net/core/net-sysfs.c 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
queue             677 net/core/net-sysfs.c 	return attribute->store(queue, buf, count);
queue             686 net/core/net-sysfs.c static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
queue             696 net/core/net-sysfs.c 	map = rcu_dereference(queue->rps_map);
queue             708 net/core/net-sysfs.c static ssize_t store_rps_map(struct netdev_rx_queue *queue,
queue             748 net/core/net-sysfs.c 	old_map = rcu_dereference_protected(queue->rps_map,
queue             750 net/core/net-sysfs.c 	rcu_assign_pointer(queue->rps_map, map);
queue             766 net/core/net-sysfs.c static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
queue             773 net/core/net-sysfs.c 	flow_table = rcu_dereference(queue->rps_flow_table);
queue             788 net/core/net-sysfs.c static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
queue             836 net/core/net-sysfs.c 	old_table = rcu_dereference_protected(queue->rps_flow_table,
queue             838 net/core/net-sysfs.c 	rcu_assign_pointer(queue->rps_flow_table, table);
queue             866 net/core/net-sysfs.c 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
queue             871 net/core/net-sysfs.c 	map = rcu_dereference_protected(queue->rps_map, 1);
queue             873 net/core/net-sysfs.c 		RCU_INIT_POINTER(queue->rps_map, NULL);
queue             877 net/core/net-sysfs.c 	flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
queue             879 net/core/net-sysfs.c 		RCU_INIT_POINTER(queue->rps_flow_table, NULL);
queue             885 net/core/net-sysfs.c 	dev_put(queue->dev);
queue             890 net/core/net-sysfs.c 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
queue             891 net/core/net-sysfs.c 	struct device *dev = &queue->dev->dev;
queue             918 net/core/net-sysfs.c 	struct netdev_rx_queue *queue = dev->_rx + index;
queue             919 net/core/net-sysfs.c 	struct kobject *kobj = &queue->kobj;
queue             925 net/core/net-sysfs.c 	dev_hold(queue->dev);
queue             990 net/core/net-sysfs.c 	ssize_t (*show)(struct netdev_queue *queue, char *buf);
queue             991 net/core/net-sysfs.c 	ssize_t (*store)(struct netdev_queue *queue,
queue            1004 net/core/net-sysfs.c 	struct netdev_queue *queue = to_netdev_queue(kobj);
queue            1009 net/core/net-sysfs.c 	return attribute->show(queue, buf);
queue            1018 net/core/net-sysfs.c 	struct netdev_queue *queue = to_netdev_queue(kobj);
queue            1023 net/core/net-sysfs.c 	return attribute->store(queue, buf, count);
queue            1031 net/core/net-sysfs.c static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
queue            1035 net/core/net-sysfs.c 	spin_lock_irq(&queue->_xmit_lock);
queue            1036 net/core/net-sysfs.c 	trans_timeout = queue->trans_timeout;
queue            1037 net/core/net-sysfs.c 	spin_unlock_irq(&queue->_xmit_lock);
queue            1042 net/core/net-sysfs.c static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
queue            1044 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1047 net/core/net-sysfs.c 	i = queue - dev->_tx;
queue            1053 net/core/net-sysfs.c static ssize_t traffic_class_show(struct netdev_queue *queue,
queue            1056 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1063 net/core/net-sysfs.c 	index = get_netdev_queue_index(queue);
queue            1084 net/core/net-sysfs.c static ssize_t tx_maxrate_show(struct netdev_queue *queue,
queue            1087 net/core/net-sysfs.c 	return sprintf(buf, "%lu\n", queue->tx_maxrate);
queue            1090 net/core/net-sysfs.c static ssize_t tx_maxrate_store(struct netdev_queue *queue,
queue            1093 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1094 net/core/net-sysfs.c 	int err, index = get_netdev_queue_index(queue);
queue            1113 net/core/net-sysfs.c 		queue->tx_maxrate = rate;
queue            1159 net/core/net-sysfs.c static ssize_t bql_show_hold_time(struct netdev_queue *queue,
queue            1162 net/core/net-sysfs.c 	struct dql *dql = &queue->dql;
queue            1167 net/core/net-sysfs.c static ssize_t bql_set_hold_time(struct netdev_queue *queue,
queue            1170 net/core/net-sysfs.c 	struct dql *dql = &queue->dql;
queue            1187 net/core/net-sysfs.c static ssize_t bql_show_inflight(struct netdev_queue *queue,
queue            1190 net/core/net-sysfs.c 	struct dql *dql = &queue->dql;
queue            1199 net/core/net-sysfs.c static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,		\
queue            1202 net/core/net-sysfs.c 	return bql_show(buf, queue->dql.FIELD);				\
queue            1205 net/core/net-sysfs.c static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,		\
queue            1208 net/core/net-sysfs.c 	return bql_set(buf, len, &queue->dql.FIELD);			\
queue            1235 net/core/net-sysfs.c static ssize_t xps_cpus_show(struct netdev_queue *queue,
queue            1238 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1247 net/core/net-sysfs.c 	index = get_netdev_queue_index(queue);
queue            1292 net/core/net-sysfs.c static ssize_t xps_cpus_store(struct netdev_queue *queue,
queue            1295 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1309 net/core/net-sysfs.c 	index = get_netdev_queue_index(queue);
queue            1327 net/core/net-sysfs.c static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
queue            1329 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1334 net/core/net-sysfs.c 	index = get_netdev_queue_index(queue);
queue            1376 net/core/net-sysfs.c static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
queue            1379 net/core/net-sysfs.c 	struct net_device *dev = queue->dev;
queue            1391 net/core/net-sysfs.c 	index = get_netdev_queue_index(queue);
queue            1425 net/core/net-sysfs.c 	struct netdev_queue *queue = to_netdev_queue(kobj);
queue            1428 net/core/net-sysfs.c 	dev_put(queue->dev);
queue            1433 net/core/net-sysfs.c 	struct netdev_queue *queue = to_netdev_queue(kobj);
queue            1434 net/core/net-sysfs.c 	struct device *dev = &queue->dev->dev;
queue            1461 net/core/net-sysfs.c 	struct netdev_queue *queue = dev->_tx + index;
queue            1462 net/core/net-sysfs.c 	struct kobject *kobj = &queue->kobj;
queue            1468 net/core/net-sysfs.c 	dev_hold(queue->dev);
queue            1507 net/core/net-sysfs.c 		struct netdev_queue *queue = dev->_tx + i;
queue            1510 net/core/net-sysfs.c 			queue->kobj.uevent_suppress = 1;
queue            1512 net/core/net-sysfs.c 		sysfs_remove_group(&queue->kobj, &dql_group);
queue            1514 net/core/net-sysfs.c 		kobject_put(&queue->kobj);
queue             451 net/core/pktgen.c 	wait_queue_head_t queue;
queue            3470 net/core/pktgen.c 	init_waitqueue_head(&t->queue);
queue            3483 net/core/pktgen.c 			wait_event_interruptible_timeout(t->queue,
queue              34 net/core/request_sock.c void reqsk_queue_alloc(struct request_sock_queue *queue)
queue              36 net/core/request_sock.c 	spin_lock_init(&queue->rskq_lock);
queue              38 net/core/request_sock.c 	spin_lock_init(&queue->fastopenq.lock);
queue              39 net/core/request_sock.c 	queue->fastopenq.rskq_rst_head = NULL;
queue              40 net/core/request_sock.c 	queue->fastopenq.rskq_rst_tail = NULL;
queue              41 net/core/request_sock.c 	queue->fastopenq.qlen = 0;
queue              43 net/core/request_sock.c 	queue->rskq_accept_head = NULL;
queue            1675 net/decnet/af_decnet.c 	struct sk_buff_head *queue = &sk->sk_receive_queue;
queue            1706 net/decnet/af_decnet.c 		queue = &scp->other_receive_queue;
queue            1739 net/decnet/af_decnet.c 		if (dn_data_ready(sk, queue, flags, target))
queue            1749 net/decnet/af_decnet.c 		sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
queue            1754 net/decnet/af_decnet.c 	skb_queue_walk_safe(queue, skb, n) {
queue            1773 net/decnet/af_decnet.c 			skb_unlink(skb, queue);
queue            1821 net/decnet/af_decnet.c static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
queue            1824 net/decnet/af_decnet.c 	if (skb_queue_len(queue) >= scp->snd_window)
queue            1911 net/decnet/af_decnet.c 	struct sk_buff_head *queue = &scp->data_xmit_queue;
queue            1965 net/decnet/af_decnet.c 		queue = &scp->other_xmit_queue;
queue            1996 net/decnet/af_decnet.c 		if (dn_queue_too_long(scp, queue, flags)) {
queue            2007 net/decnet/af_decnet.c 				      !dn_queue_too_long(scp, queue, flags), &wait);
queue             578 net/decnet/dn_nsp_in.c static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
queue             596 net/decnet/dn_nsp_in.c 	skb_queue_tail(queue, skb);
queue              66 net/dsa/tag_brcm.c 	u16 queue = skb_get_queue_mapping(skb);
queue              95 net/dsa/tag_brcm.c 		       ((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT);
queue             105 net/dsa/tag_brcm.c 	skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
queue             445 net/ipv4/inet_connection_sock.c 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
queue             460 net/ipv4/inet_connection_sock.c 	if (reqsk_queue_empty(queue)) {
queue             472 net/ipv4/inet_connection_sock.c 	req = reqsk_queue_remove(queue, sk);
queue             477 net/ipv4/inet_connection_sock.c 		spin_lock_bh(&queue->fastopenq.lock);
queue             488 net/ipv4/inet_connection_sock.c 		spin_unlock_bh(&queue->fastopenq.lock);
queue             716 net/ipv4/inet_connection_sock.c 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
queue             743 net/ipv4/inet_connection_sock.c 	qlen = reqsk_queue_len(queue);
queue             745 net/ipv4/inet_connection_sock.c 		int young = reqsk_queue_len_young(queue) << 1;
queue             754 net/ipv4/inet_connection_sock.c 	defer_accept = READ_ONCE(queue->rskq_defer_accept);
queue             767 net/ipv4/inet_connection_sock.c 			atomic_dec(&queue->young);
queue             952 net/ipv4/inet_connection_sock.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
queue             954 net/ipv4/inet_connection_sock.c 	spin_lock(&queue->rskq_lock);
queue             961 net/ipv4/inet_connection_sock.c 		if (queue->rskq_accept_head == NULL)
queue             962 net/ipv4/inet_connection_sock.c 			WRITE_ONCE(queue->rskq_accept_head, req);
queue             964 net/ipv4/inet_connection_sock.c 			queue->rskq_accept_tail->dl_next = req;
queue             965 net/ipv4/inet_connection_sock.c 		queue->rskq_accept_tail = req;
queue             968 net/ipv4/inet_connection_sock.c 	spin_unlock(&queue->rskq_lock);
queue             996 net/ipv4/inet_connection_sock.c 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
queue            1007 net/ipv4/inet_connection_sock.c 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
queue            1023 net/ipv4/inet_connection_sock.c 	if (queue->fastopenq.rskq_rst_head) {
queue            1025 net/ipv4/inet_connection_sock.c 		spin_lock_bh(&queue->fastopenq.lock);
queue            1026 net/ipv4/inet_connection_sock.c 		req = queue->fastopenq.rskq_rst_head;
queue            1027 net/ipv4/inet_connection_sock.c 		queue->fastopenq.rskq_rst_head = NULL;
queue            1028 net/ipv4/inet_connection_sock.c 		spin_unlock_bh(&queue->fastopenq.lock);
queue             955 net/ipv4/ip_output.c 			    struct sk_buff_head *queue,
queue             981 net/ipv4/ip_output.c 	skb = skb_peek_tail(queue);
queue            1163 net/ipv4/ip_output.c 			__skb_queue_tail(queue, skb);
queue            1467 net/ipv4/ip_output.c 			      struct sk_buff_head *queue,
queue            1480 net/ipv4/ip_output.c 	skb = __skb_dequeue(queue);
queue            1488 net/ipv4/ip_output.c 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
queue            1589 net/ipv4/ip_output.c 				      struct sk_buff_head *queue,
queue            1594 net/ipv4/ip_output.c 	while ((skb = __skb_dequeue_tail(queue)) != NULL)
queue            1613 net/ipv4/ip_output.c 	struct sk_buff_head queue;
queue            1619 net/ipv4/ip_output.c 	__skb_queue_head_init(&queue);
queue            1628 net/ipv4/ip_output.c 	err = __ip_append_data(sk, fl4, &queue, cork,
queue            1632 net/ipv4/ip_output.c 		__ip_flush_pending_frames(sk, &queue, cork);
queue            1636 net/ipv4/ip_output.c 	return __ip_make_skb(sk, fl4, &queue, cork);
queue             237 net/ipv4/tcp_fastopen.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
queue             246 net/ipv4/tcp_fastopen.c 	spin_lock(&queue->fastopenq.lock);
queue             247 net/ipv4/tcp_fastopen.c 	queue->fastopenq.qlen++;
queue             248 net/ipv4/tcp_fastopen.c 	spin_unlock(&queue->fastopenq.lock);
queue            6488 net/ipv4/tcp_input.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
queue            6502 net/ipv4/tcp_input.c 	if (!queue->synflood_warned &&
queue            6504 net/ipv4/tcp_input.c 	    xchg(&queue->synflood_warned, 1) == 0)
queue             133 net/ipv4/tcp_yeah.c 			u32 rtt, queue;
queue             155 net/ipv4/tcp_yeah.c 			queue = bw;
queue             157 net/ipv4/tcp_yeah.c 			if (queue > TCP_YEAH_ALPHA ||
queue             159 net/ipv4/tcp_yeah.c 				if (queue > TCP_YEAH_ALPHA &&
queue             161 net/ipv4/tcp_yeah.c 					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
queue             190 net/ipv4/tcp_yeah.c 			yeah->lastQ = queue;
queue            1656 net/ipv4/udp.c 	struct sk_buff_head *queue;
queue            1661 net/ipv4/udp.c 	queue = &udp_sk(sk)->reader_queue;
queue            1673 net/ipv4/udp.c 			spin_lock_bh(&queue->lock);
queue            1674 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
queue            1678 net/ipv4/udp.c 				spin_unlock_bh(&queue->lock);
queue            1683 net/ipv4/udp.c 				spin_unlock_bh(&queue->lock);
queue            1693 net/ipv4/udp.c 			skb_queue_splice_tail_init(sk_queue, queue);
queue            1695 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
queue            1699 net/ipv4/udp.c 			spin_unlock_bh(&queue->lock);
queue            1314 net/ipv6/ip6_output.c 			     struct sk_buff_head *queue,
queue            1340 net/ipv6/ip6_output.c 	skb = skb_peek_tail(queue);
queue            1584 net/ipv6/ip6_output.c 			__skb_queue_tail(queue, skb);
queue            1714 net/ipv6/ip6_output.c 			       struct sk_buff_head *queue,
queue            1729 net/ipv6/ip6_output.c 	skb = __skb_dequeue(queue);
queue            1737 net/ipv6/ip6_output.c 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
queue            1820 net/ipv6/ip6_output.c 				       struct sk_buff_head *queue,
queue            1826 net/ipv6/ip6_output.c 	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
queue            1852 net/ipv6/ip6_output.c 	struct sk_buff_head queue;
queue            1859 net/ipv6/ip6_output.c 	__skb_queue_head_init(&queue);
queue            1874 net/ipv6/ip6_output.c 	err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
queue            1879 net/ipv6/ip6_output.c 		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
queue            1883 net/ipv6/ip6_output.c 	return __ip6_make_skb(sk, &queue, cork, &v6_cork);
queue             164 net/mac80211/agg-tx.c 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
queue             168 net/mac80211/agg-tx.c 	if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
queue             170 net/mac80211/agg-tx.c 			&sdata->local->hw, queue,
queue             179 net/mac80211/agg-tx.c 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
queue             181 net/mac80211/agg-tx.c 	if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
queue             183 net/mac80211/agg-tx.c 			&sdata->local->hw, queue,
queue             243 net/mac80211/agg-tx.c 	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
queue             257 net/mac80211/agg-tx.c 					   &local->pending[queue]);
queue            2029 net/mac80211/ieee80211_i.h void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
queue            2032 net/mac80211/ieee80211_i.h void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
queue            2035 net/mac80211/ieee80211_i.h void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
queue            2206 net/mac80211/rx.c 			int queue = rx->security_idx;
queue            2213 net/mac80211/rx.c 			       rx->key->u.ccmp.rx_pn[queue],
queue            2219 net/mac80211/rx.c 			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
queue            2220 net/mac80211/rx.c 				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
queue            2245 net/mac80211/rx.c 		int queue;
queue            2259 net/mac80211/rx.c 		queue = rx->security_idx;
queue            2260 net/mac80211/rx.c 		rpn = rx->key->u.ccmp.rx_pn[queue];
queue            3175 net/mac80211/rx.c 		goto queue;
queue            3193 net/mac80211/rx.c 			goto queue;
queue            3198 net/mac80211/rx.c 			goto queue;
queue            3236 net/mac80211/rx.c 		goto queue;
queue            3279 net/mac80211/rx.c 			goto queue;
queue            3310 net/mac80211/rx.c 			goto queue;
queue            3328 net/mac80211/rx.c 		goto queue;
queue            3344 net/mac80211/rx.c  queue:
queue             242 net/mac80211/tkip.c 				u8 *ra, int only_iv, int queue,
queue             250 net/mac80211/tkip.c 	struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue];
queue              27 net/mac80211/tkip.h 				u8 *ra, int only_iv, int queue,
queue            2331 net/mac80211/trace.h 	TP_PROTO(struct ieee80211_local *local, u16 queue,
queue            2334 net/mac80211/trace.h 	TP_ARGS(local, queue, reason),
queue            2338 net/mac80211/trace.h 		__field(u16, queue)
queue            2344 net/mac80211/trace.h 		__entry->queue = queue;
queue            2350 net/mac80211/trace.h 		LOCAL_PR_ARG, __entry->queue, __entry->reason
queue            2355 net/mac80211/trace.h 	TP_PROTO(struct ieee80211_local *local, u16 queue,
queue            2358 net/mac80211/trace.h 	TP_ARGS(local, queue, reason),
queue            2362 net/mac80211/trace.h 		__field(u16, queue)
queue            2368 net/mac80211/trace.h 		__entry->queue = queue;
queue            2374 net/mac80211/trace.h 		LOCAL_PR_ARG, __entry->queue, __entry->reason
queue            3284 net/mac80211/tx.c 	head = skb_peek_tail(&flow->queue);
queue            3863 net/mac80211/tx.c 		u16 queue = __ieee80211_select_queue(sdata, sta, skb);
queue            3864 net/mac80211/tx.c 		skb_set_queue_mapping(skb, queue);
queue            3992 net/mac80211/tx.c 			     struct sk_buff_head *queue)
queue            4020 net/mac80211/tx.c 		__skb_queue_tail(queue, cloned_skb);
queue            4026 net/mac80211/tx.c 		__skb_queue_tail(queue, skb);
queue            4035 net/mac80211/tx.c 	__skb_queue_purge(queue);
queue            4036 net/mac80211/tx.c 	__skb_queue_tail(queue, skb);
queue            4052 net/mac80211/tx.c 		struct sk_buff_head queue;
queue            4054 net/mac80211/tx.c 		__skb_queue_head_init(&queue);
queue            4055 net/mac80211/tx.c 		ieee80211_convert_to_unicast(skb, dev, &queue);
queue            4056 net/mac80211/tx.c 		while ((skb = __skb_dequeue(&queue)))
queue             348 net/mac80211/util.c void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
queue             372 net/mac80211/util.c 			if (ac_queue == queue ||
queue             373 net/mac80211/util.c 			    (sdata->vif.cab_queue == queue &&
queue             381 net/mac80211/util.c static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
queue             388 net/mac80211/util.c 	trace_wake_queue(local, queue, reason);
queue             390 net/mac80211/util.c 	if (WARN_ON(queue >= hw->queues))
queue             393 net/mac80211/util.c 	if (!test_bit(reason, &local->queue_stop_reasons[queue]))
queue             397 net/mac80211/util.c 		local->q_stop_reasons[queue][reason] = 0;
queue             399 net/mac80211/util.c 		local->q_stop_reasons[queue][reason]--;
queue             400 net/mac80211/util.c 		if (WARN_ON(local->q_stop_reasons[queue][reason] < 0))
queue             401 net/mac80211/util.c 			local->q_stop_reasons[queue][reason] = 0;
queue             404 net/mac80211/util.c 	if (local->q_stop_reasons[queue][reason] == 0)
queue             405 net/mac80211/util.c 		__clear_bit(reason, &local->queue_stop_reasons[queue]);
queue             407 net/mac80211/util.c 	if (local->queue_stop_reasons[queue] != 0)
queue             411 net/mac80211/util.c 	if (skb_queue_empty(&local->pending[queue])) {
queue             413 net/mac80211/util.c 		ieee80211_propagate_queue_wake(local, queue);
queue             433 net/mac80211/util.c void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
queue             441 net/mac80211/util.c 	__ieee80211_wake_queue(hw, queue, reason, refcounted, &flags);
queue             445 net/mac80211/util.c void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
queue             447 net/mac80211/util.c 	ieee80211_wake_queue_by_reason(hw, queue,
queue             453 net/mac80211/util.c static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
queue             461 net/mac80211/util.c 	trace_stop_queue(local, queue, reason);
queue             463 net/mac80211/util.c 	if (WARN_ON(queue >= hw->queues))
queue             467 net/mac80211/util.c 		local->q_stop_reasons[queue][reason] = 1;
queue             469 net/mac80211/util.c 		local->q_stop_reasons[queue][reason]++;
queue             471 net/mac80211/util.c 	if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
queue             485 net/mac80211/util.c 			if (sdata->vif.hw_queue[ac] == queue ||
queue             486 net/mac80211/util.c 			    sdata->vif.cab_queue == queue) {
queue             500 net/mac80211/util.c void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
queue             508 net/mac80211/util.c 	__ieee80211_stop_queue(hw, queue, reason, refcounted);
queue             512 net/mac80211/util.c void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
queue             514 net/mac80211/util.c 	ieee80211_stop_queue_by_reason(hw, queue,
queue             526 net/mac80211/util.c 	int queue = info->hw_queue;
queue             534 net/mac80211/util.c 	__ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
queue             536 net/mac80211/util.c 	__skb_queue_tail(&local->pending[queue], skb);
queue             537 net/mac80211/util.c 	__ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
queue             548 net/mac80211/util.c 	int queue, i;
queue             559 net/mac80211/util.c 		queue = info->hw_queue;
queue             561 net/mac80211/util.c 		__ieee80211_stop_queue(hw, queue,
queue             565 net/mac80211/util.c 		__skb_queue_tail(&local->pending[queue], skb);
queue             600 net/mac80211/util.c int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
queue             606 net/mac80211/util.c 	if (WARN_ON(queue >= hw->queues))
queue             611 net/mac80211/util.c 		       &local->queue_stop_reasons[queue]);
queue             504 net/mac80211/wpa.c 	int queue;
queue             531 net/mac80211/wpa.c 		queue = rx->security_idx;
queue             533 net/mac80211/wpa.c 		res = memcmp(pn, key->u.ccmp.rx_pn[queue],
queue             555 net/mac80211/wpa.c 		memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
queue             731 net/mac80211/wpa.c 	int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
queue             758 net/mac80211/wpa.c 		queue = rx->security_idx;
queue             760 net/mac80211/wpa.c 		res = memcmp(pn, key->u.gcmp.rx_pn[queue],
queue             783 net/mac80211/wpa.c 		memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
queue             161 net/netfilter/nfnetlink_queue.c static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
queue             191 net/netfilter/nfnetlink_queue.c __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
queue             193 net/netfilter/nfnetlink_queue.c        list_add_tail(&entry->list, &queue->queue_list);
queue             194 net/netfilter/nfnetlink_queue.c        queue->queue_total++;
queue             198 net/netfilter/nfnetlink_queue.c __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
queue             201 net/netfilter/nfnetlink_queue.c 	queue->queue_total--;
queue             205 net/netfilter/nfnetlink_queue.c find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
queue             209 net/netfilter/nfnetlink_queue.c 	spin_lock_bh(&queue->lock);
queue             211 net/netfilter/nfnetlink_queue.c 	list_for_each_entry(i, &queue->queue_list, list) {
queue             219 net/netfilter/nfnetlink_queue.c 		__dequeue_entry(queue, entry);
queue             221 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue             247 net/netfilter/nfnetlink_queue.c nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
queue             251 net/netfilter/nfnetlink_queue.c 	spin_lock_bh(&queue->lock);
queue             252 net/netfilter/nfnetlink_queue.c 	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
queue             255 net/netfilter/nfnetlink_queue.c 			queue->queue_total--;
queue             259 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue             375 net/netfilter/nfnetlink_queue.c nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
queue             423 net/netfilter/nfnetlink_queue.c 	switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
queue             429 net/netfilter/nfnetlink_queue.c 		if (!(queue->flags & NFQA_CFG_F_GSO) &&
queue             434 net/netfilter/nfnetlink_queue.c 		data_len = READ_ONCE(queue->copy_range);
queue             447 net/netfilter/nfnetlink_queue.c 	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
queue             455 net/netfilter/nfnetlink_queue.c 	if (queue->flags & NFQA_CFG_F_UID_GID) {
queue             460 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
queue             483 net/netfilter/nfnetlink_queue.c 	nfmsg->res_id = htons(queue->queue_num);
queue             592 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
queue             651 net/netfilter/nfnetlink_queue.c __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
queue             659 net/netfilter/nfnetlink_queue.c 	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
queue             664 net/netfilter/nfnetlink_queue.c 	spin_lock_bh(&queue->lock);
queue             669 net/netfilter/nfnetlink_queue.c 	if (queue->queue_total >= queue->queue_maxlen) {
queue             670 net/netfilter/nfnetlink_queue.c 		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
queue             674 net/netfilter/nfnetlink_queue.c 			queue->queue_dropped++;
queue             676 net/netfilter/nfnetlink_queue.c 					     queue->queue_total);
queue             680 net/netfilter/nfnetlink_queue.c 	entry->id = ++queue->id_sequence;
queue             684 net/netfilter/nfnetlink_queue.c 	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
queue             686 net/netfilter/nfnetlink_queue.c 		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
queue             690 net/netfilter/nfnetlink_queue.c 			queue->queue_user_dropped++;
queue             695 net/netfilter/nfnetlink_queue.c 	__enqueue_entry(queue, entry);
queue             697 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue             703 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue             747 net/netfilter/nfnetlink_queue.c __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
queue             758 net/netfilter/nfnetlink_queue.c 		ret = __nfqnl_enqueue_packet(net, queue, entry);
queue             769 net/netfilter/nfnetlink_queue.c 		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
queue             780 net/netfilter/nfnetlink_queue.c 	struct nfqnl_instance *queue;
queue             787 net/netfilter/nfnetlink_queue.c 	queue = instance_lookup(q, queuenum);
queue             788 net/netfilter/nfnetlink_queue.c 	if (!queue)
queue             791 net/netfilter/nfnetlink_queue.c 	if (queue->copy_mode == NFQNL_COPY_NONE)
queue             805 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
queue             806 net/netfilter/nfnetlink_queue.c 		return __nfqnl_enqueue_packet(net, queue, entry);
queue             821 net/netfilter/nfnetlink_queue.c 			err = __nfqnl_enqueue_packet_gso(net, queue,
queue             870 net/netfilter/nfnetlink_queue.c nfqnl_set_mode(struct nfqnl_instance *queue,
queue             875 net/netfilter/nfnetlink_queue.c 	spin_lock_bh(&queue->lock);
queue             879 net/netfilter/nfnetlink_queue.c 		queue->copy_mode = mode;
queue             880 net/netfilter/nfnetlink_queue.c 		queue->copy_range = 0;
queue             884 net/netfilter/nfnetlink_queue.c 		queue->copy_mode = mode;
queue             886 net/netfilter/nfnetlink_queue.c 			queue->copy_range = NFQNL_MAX_COPY_RANGE;
queue             888 net/netfilter/nfnetlink_queue.c 			queue->copy_range = range;
queue             895 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue            1026 net/netfilter/nfnetlink_queue.c 	struct nfqnl_instance *queue;
queue            1028 net/netfilter/nfnetlink_queue.c 	queue = instance_lookup(q, queue_num);
queue            1029 net/netfilter/nfnetlink_queue.c 	if (!queue)
queue            1032 net/netfilter/nfnetlink_queue.c 	if (queue->peer_portid != nlportid)
queue            1035 net/netfilter/nfnetlink_queue.c 	return queue;
queue            1069 net/netfilter/nfnetlink_queue.c 	struct nfqnl_instance *queue;
queue            1074 net/netfilter/nfnetlink_queue.c 	queue = verdict_instance_lookup(q, queue_num,
queue            1076 net/netfilter/nfnetlink_queue.c 	if (IS_ERR(queue))
queue            1077 net/netfilter/nfnetlink_queue.c 		return PTR_ERR(queue);
queue            1086 net/netfilter/nfnetlink_queue.c 	spin_lock_bh(&queue->lock);
queue            1088 net/netfilter/nfnetlink_queue.c 	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
queue            1091 net/netfilter/nfnetlink_queue.c 		__dequeue_entry(queue, entry);
queue            1095 net/netfilter/nfnetlink_queue.c 	spin_unlock_bh(&queue->lock);
queue            1176 net/netfilter/nfnetlink_queue.c 	struct nfqnl_instance *queue;
queue            1185 net/netfilter/nfnetlink_queue.c 	queue = verdict_instance_lookup(q, queue_num,
queue            1187 net/netfilter/nfnetlink_queue.c 	if (IS_ERR(queue))
queue            1188 net/netfilter/nfnetlink_queue.c 		return PTR_ERR(queue);
queue            1196 net/netfilter/nfnetlink_queue.c 	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
queue            1261 net/netfilter/nfnetlink_queue.c 	struct nfqnl_instance *queue;
queue            1312 net/netfilter/nfnetlink_queue.c 	queue = instance_lookup(q, queue_num);
queue            1313 net/netfilter/nfnetlink_queue.c 	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
queue            1321 net/netfilter/nfnetlink_queue.c 			if (queue) {
queue            1325 net/netfilter/nfnetlink_queue.c 			queue = instance_create(q, queue_num,
queue            1327 net/netfilter/nfnetlink_queue.c 			if (IS_ERR(queue)) {
queue            1328 net/netfilter/nfnetlink_queue.c 				ret = PTR_ERR(queue);
queue            1333 net/netfilter/nfnetlink_queue.c 			if (!queue) {
queue            1337 net/netfilter/nfnetlink_queue.c 			instance_destroy(q, queue);
queue            1348 net/netfilter/nfnetlink_queue.c 	if (!queue) {
queue            1357 net/netfilter/nfnetlink_queue.c 		nfqnl_set_mode(queue, params->copy_mode,
queue            1364 net/netfilter/nfnetlink_queue.c 		spin_lock_bh(&queue->lock);
queue            1365 net/netfilter/nfnetlink_queue.c 		queue->queue_maxlen = ntohl(*queue_maxlen);
queue            1366 net/netfilter/nfnetlink_queue.c 		spin_unlock_bh(&queue->lock);
queue            1370 net/netfilter/nfnetlink_queue.c 		spin_lock_bh(&queue->lock);
queue            1371 net/netfilter/nfnetlink_queue.c 		queue->flags &= ~mask;
queue            1372 net/netfilter/nfnetlink_queue.c 		queue->flags |= flags & mask;
queue            1373 net/netfilter/nfnetlink_queue.c 		spin_unlock_bh(&queue->lock);
queue              33 net/netfilter/nft_queue.c 	u32 queue = priv->queuenum;
queue              40 net/netfilter/nft_queue.c 			queue = priv->queuenum + cpu % priv->queues_total;
queue              42 net/netfilter/nft_queue.c 			queue = nfqueue_hash(pkt->skb, queue,
queue              48 net/netfilter/nft_queue.c 	ret = NF_QUEUE_NR(queue);
queue              60 net/netfilter/nft_queue.c 	u32 queue, ret;
queue              62 net/netfilter/nft_queue.c 	queue = regs->data[priv->sreg_qnum];
queue              64 net/netfilter/nft_queue.c 	ret = NF_QUEUE_NR(queue);
queue              40 net/netfilter/xt_NFQUEUE.c 	u32 queue = info->queuenum;
queue              43 net/netfilter/xt_NFQUEUE.c 		queue = nfqueue_hash(skb, queue, info->queues_total,
queue              46 net/netfilter/xt_NFQUEUE.c 	return NF_QUEUE_NR(queue);
queue              89 net/netfilter/xt_NFQUEUE.c 	u32 queue = info->queuenum;
queue              96 net/netfilter/xt_NFQUEUE.c 			queue = info->queuenum + cpu % info->queues_total;
queue              98 net/netfilter/xt_NFQUEUE.c 			queue = nfqueue_hash(skb, queue, info->queues_total,
queue             103 net/netfilter/xt_NFQUEUE.c 	ret = NF_QUEUE_NR(queue);
queue              28 net/nfc/digital_core.c 	struct list_head queue;
queue             120 net/nfc/digital_core.c 				       queue);
queue             126 net/nfc/digital_core.c 	list_del(&cmd->queue);
queue             164 net/nfc/digital_core.c 				       queue);
queue             217 net/nfc/digital_core.c 	list_del(&cmd->queue);
queue             244 net/nfc/digital_core.c 	INIT_LIST_HEAD(&cmd->queue);
queue             247 net/nfc/digital_core.c 	list_add_tail(&cmd->queue, &ddev->cmd_queue);
queue             837 net/nfc/digital_core.c 	list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) {
queue             838 net/nfc/digital_core.c 		list_del(&cmd->queue);
queue             337 net/phonet/pep.c 	struct sk_buff_head *queue;
queue             383 net/phonet/pep.c 		queue = &pn->ctrlreq_queue;
queue             384 net/phonet/pep.c 		goto queue;
queue             405 net/phonet/pep.c 		queue = &sk->sk_receive_queue;
queue             406 net/phonet/pep.c 		goto queue;
queue             450 net/phonet/pep.c queue:
queue             453 net/phonet/pep.c 	skb_queue_tail(queue, skb);
queue              15 net/qrtr/tun.c 	struct sk_buff_head queue;
queue              23 net/qrtr/tun.c 	skb_queue_tail(&tun->queue, skb);
queue              39 net/qrtr/tun.c 	skb_queue_head_init(&tun->queue);
queue              56 net/qrtr/tun.c 	while (!(skb = skb_dequeue(&tun->queue))) {
queue              62 net/qrtr/tun.c 					     !skb_queue_empty(&tun->queue)))
queue             105 net/qrtr/tun.c 	if (!skb_queue_empty(&tun->queue))
queue             119 net/qrtr/tun.c 	while (!skb_queue_empty(&tun->queue)) {
queue             120 net/qrtr/tun.c 		skb = skb_dequeue(&tun->queue);
queue             167 net/rose/rose_link.c 		while ((skbn = skb_dequeue(&neigh->queue)) != NULL)
queue             278 net/rose/rose_link.c 		skb_queue_tail(&neigh->queue, skb);
queue             102 net/rose/rose_route.c 		skb_queue_head_init(&rose_neigh->queue);
queue             233 net/rose/rose_route.c 	skb_queue_purge(&rose_neigh->queue);
queue             389 net/rose/rose_route.c 	skb_queue_head_init(&sn->queue);
queue             772 net/rose/rose_route.c 	skb_queue_purge(&rose_neigh->queue);
queue              71 net/sched/sch_cbs.c 	int queue;
queue             266 net/sched/sch_cbs.c 	cbs.queue = q->queue;
queue             272 net/sched/sch_cbs.c 			cbs.queue);
queue             288 net/sched/sch_cbs.c 	cbs.queue = q->queue;
queue             422 net/sched/sch_cbs.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
queue              32 net/sched/sch_etf.c 	int queue;
queue             310 net/sched/sch_etf.c 	etf.queue = q->queue;
queue             316 net/sched/sch_etf.c 			etf.queue);
queue             334 net/sched/sch_etf.c 	etf.queue = q->queue;
queue             382 net/sched/sch_etf.c 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
queue             693 net/sched/sch_generic.c 		if (!q->ring.queue)
queue             760 net/sched/sch_generic.c 		if (!q->ring.queue)
queue             207 net/sched/sch_mq.c 	graft_offload.graft_params.queue = cl - 1;
queue             418 net/sched/sch_taprio.c 	int queue;
queue             420 net/sched/sch_taprio.c 	queue = skb_get_queue_mapping(skb);
queue             422 net/sched/sch_taprio.c 	child = q->qdiscs[queue];
queue              32 net/sctp/inqueue.c void sctp_inq_init(struct sctp_inq *queue)
queue              34 net/sctp/inqueue.c 	INIT_LIST_HEAD(&queue->in_chunk_list);
queue              35 net/sctp/inqueue.c 	queue->in_progress = NULL;
queue              38 net/sctp/inqueue.c 	INIT_WORK(&queue->immediate, NULL);
queue              42 net/sctp/inqueue.c void sctp_inq_free(struct sctp_inq *queue)
queue              47 net/sctp/inqueue.c 	list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
queue              55 net/sctp/inqueue.c 	if (queue->in_progress) {
queue              56 net/sctp/inqueue.c 		sctp_chunk_free(queue->in_progress);
queue              57 net/sctp/inqueue.c 		queue->in_progress = NULL;
queue              84 net/sctp/inqueue.c struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
queue              89 net/sctp/inqueue.c 	chunk = queue->in_progress;
queue             107 net/sctp/inqueue.c struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
queue             116 net/sctp/inqueue.c 	chunk = queue->in_progress;
queue             136 net/sctp/inqueue.c 			chunk = queue->in_progress = NULL;
queue             152 net/sctp/inqueue.c 		entry = sctp_list_dequeue(&queue->in_chunk_list);
queue             179 net/sctp/inqueue.c 		queue->in_progress = chunk;
queue             343 net/sctp/outqueue.c 				  struct list_head *queue, int msg_len)
queue             347 net/sctp/outqueue.c 	list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
queue             365 net/sctp/outqueue.c 		if (queue != &asoc->outqueue.retransmit &&
queue            9433 net/sctp/socket.c 		struct sk_buff_head *queue;
queue            9437 net/sctp/socket.c 			queue = &newsp->pd_lobby;
queue            9439 net/sctp/socket.c 			queue = &newsk->sk_receive_queue;
queue            9448 net/sctp/socket.c 				__skb_queue_tail(queue, skb);
queue             187 net/sctp/ulpqueue.c 	struct sk_buff_head *queue;
queue             216 net/sctp/ulpqueue.c 		queue = &sk->sk_receive_queue;
queue             227 net/sctp/ulpqueue.c 				queue = &sp->pd_lobby;
queue             230 net/sctp/ulpqueue.c 				queue = &sk->sk_receive_queue;
queue             239 net/sctp/ulpqueue.c 				queue = &sk->sk_receive_queue;
queue             241 net/sctp/ulpqueue.c 				queue = &sp->pd_lobby;
queue             245 net/sctp/ulpqueue.c 	skb_queue_splice_tail_init(skb_list, queue);
queue             254 net/sctp/ulpqueue.c 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
queue             319 net/sctp/ulpqueue.c 						  struct sk_buff_head *queue,
queue             363 net/sctp/ulpqueue.c 	__skb_unlink(f_frag, queue);
queue             380 net/sctp/ulpqueue.c 		__skb_unlink(pos, queue);
queue             366 net/sunrpc/cache.c 	INIT_LIST_HEAD(&cd->queue);
queue             812 net/sunrpc/cache.c 	while (rp->q.list.next != &cd->queue &&
queue             818 net/sunrpc/cache.c 	if (rp->q.list.next == &cd->queue) {
queue             971 net/sunrpc/cache.c 	for (cq= &rp->q; &cq->list != &cd->queue;
queue             997 net/sunrpc/cache.c 	for (cq= &rp->q; &cq->list != &cd->queue;
queue            1028 net/sunrpc/cache.c 		list_add(&rp->q.list, &cd->queue);
queue            1046 net/sunrpc/cache.c 			for (cq= &rp->q; &cq->list != &cd->queue;
queue            1080 net/sunrpc/cache.c 	list_for_each_entry_safe(cq, tmp, &detail->queue, list)
queue            1228 net/sunrpc/cache.c 		list_add_tail(&crq->q.list, &detail->queue);
queue              84 net/sunrpc/sched.c __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
queue              91 net/sunrpc/sched.c 	if (list_empty(&queue->timer_list.list))
queue              92 net/sunrpc/sched.c 		cancel_delayed_work(&queue->timer_list.dwork);
queue              96 net/sunrpc/sched.c rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
queue              99 net/sunrpc/sched.c 	queue->timer_list.expires = expires;
queue             104 net/sunrpc/sched.c 	mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
queue             111 net/sunrpc/sched.c __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
queue             118 net/sunrpc/sched.c 	if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
queue             119 net/sunrpc/sched.c 		rpc_set_queue_timer(queue, timeout);
queue             120 net/sunrpc/sched.c 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
queue             123 net/sunrpc/sched.c static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
queue             125 net/sunrpc/sched.c 	if (queue->priority != priority) {
queue             126 net/sunrpc/sched.c 		queue->priority = priority;
queue             127 net/sunrpc/sched.c 		queue->nr = 1U << priority;
queue             131 net/sunrpc/sched.c static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
queue             133 net/sunrpc/sched.c 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
queue             186 net/sunrpc/sched.c static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
queue             190 net/sunrpc/sched.c 	if (unlikely(queue_priority > queue->maxpriority))
queue             191 net/sunrpc/sched.c 		queue_priority = queue->maxpriority;
queue             192 net/sunrpc/sched.c 	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
queue             203 net/sunrpc/sched.c static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
queue             212 net/sunrpc/sched.c 	if (RPC_IS_PRIORITY(queue))
queue             213 net/sunrpc/sched.c 		__rpc_add_wait_queue_priority(queue, task, queue_priority);
queue             215 net/sunrpc/sched.c 		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
queue             217 net/sunrpc/sched.c 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
queue             218 net/sunrpc/sched.c 	task->tk_waitqueue = queue;
queue             219 net/sunrpc/sched.c 	queue->qlen++;
queue             225 net/sunrpc/sched.c 			task->tk_pid, queue, rpc_qname(queue));
queue             240 net/sunrpc/sched.c static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
queue             242 net/sunrpc/sched.c 	__rpc_disable_timer(queue, task);
queue             243 net/sunrpc/sched.c 	if (RPC_IS_PRIORITY(queue))
queue             247 net/sunrpc/sched.c 	queue->qlen--;
queue             249 net/sunrpc/sched.c 			task->tk_pid, queue, rpc_qname(queue));
queue             252 net/sunrpc/sched.c static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
queue             256 net/sunrpc/sched.c 	spin_lock_init(&queue->lock);
queue             257 net/sunrpc/sched.c 	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
queue             258 net/sunrpc/sched.c 		INIT_LIST_HEAD(&queue->tasks[i]);
queue             259 net/sunrpc/sched.c 	queue->maxpriority = nr_queues - 1;
queue             260 net/sunrpc/sched.c 	rpc_reset_waitqueue_priority(queue);
queue             261 net/sunrpc/sched.c 	queue->qlen = 0;
queue             262 net/sunrpc/sched.c 	queue->timer_list.expires = 0;
queue             263 net/sunrpc/sched.c 	INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
queue             264 net/sunrpc/sched.c 	INIT_LIST_HEAD(&queue->timer_list.list);
queue             265 net/sunrpc/sched.c 	rpc_assign_waitqueue_name(queue, qname);
queue             268 net/sunrpc/sched.c void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
queue             270 net/sunrpc/sched.c 	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
queue             274 net/sunrpc/sched.c void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
queue             276 net/sunrpc/sched.c 	__rpc_init_priority_wait_queue(queue, qname, 1);
queue             280 net/sunrpc/sched.c void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
queue             282 net/sunrpc/sched.c 	cancel_delayed_work_sync(&queue->timer_list.dwork);
queue             503 net/sunrpc/sched.c 		struct rpc_wait_queue *queue,
queue             515 net/sunrpc/sched.c 	trace_rpc_task_wakeup(task, queue);
queue             517 net/sunrpc/sched.c 	__rpc_remove_wait_queue(queue, task);
queue             529 net/sunrpc/sched.c 		struct rpc_wait_queue *queue, struct rpc_task *task,
queue             534 net/sunrpc/sched.c 		if (task->tk_waitqueue == queue) {
queue             536 net/sunrpc/sched.c 				__rpc_do_wake_up_task_on_wq(wq, queue, task);
queue             547 net/sunrpc/sched.c static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
queue             550 net/sunrpc/sched.c 	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
queue             557 net/sunrpc/sched.c void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
queue             561 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             562 net/sunrpc/sched.c 	rpc_wake_up_task_queue_locked(queue, task);
queue             563 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             574 net/sunrpc/sched.c rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
queue             577 net/sunrpc/sched.c 	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
queue             591 net/sunrpc/sched.c rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
queue             596 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             597 net/sunrpc/sched.c 	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
queue             598 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             604 net/sunrpc/sched.c static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
queue             612 net/sunrpc/sched.c 	q = &queue->tasks[queue->priority];
queue             613 net/sunrpc/sched.c 	if (!list_empty(q) && --queue->nr) {
queue             622 net/sunrpc/sched.c 		if (q == &queue->tasks[0])
queue             623 net/sunrpc/sched.c 			q = &queue->tasks[queue->maxpriority];
queue             630 net/sunrpc/sched.c 	} while (q != &queue->tasks[queue->priority]);
queue             632 net/sunrpc/sched.c 	rpc_reset_waitqueue_priority(queue);
queue             636 net/sunrpc/sched.c 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
queue             641 net/sunrpc/sched.c static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
queue             643 net/sunrpc/sched.c 	if (RPC_IS_PRIORITY(queue))
queue             644 net/sunrpc/sched.c 		return __rpc_find_next_queued_priority(queue);
queue             645 net/sunrpc/sched.c 	if (!list_empty(&queue->tasks[0]))
queue             646 net/sunrpc/sched.c 		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
queue             654 net/sunrpc/sched.c 		struct rpc_wait_queue *queue,
queue             660 net/sunrpc/sched.c 			queue, rpc_qname(queue));
queue             661 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             662 net/sunrpc/sched.c 	task = __rpc_find_next_queued(queue);
queue             664 net/sunrpc/sched.c 		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
queue             666 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             674 net/sunrpc/sched.c struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
queue             677 net/sunrpc/sched.c 	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
queue             689 net/sunrpc/sched.c struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
queue             691 net/sunrpc/sched.c 	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
queue             701 net/sunrpc/sched.c void rpc_wake_up(struct rpc_wait_queue *queue)
queue             705 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             706 net/sunrpc/sched.c 	head = &queue->tasks[queue->maxpriority];
queue             713 net/sunrpc/sched.c 			rpc_wake_up_task_queue_locked(queue, task);
queue             715 net/sunrpc/sched.c 		if (head == &queue->tasks[0])
queue             719 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             730 net/sunrpc/sched.c void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
queue             734 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             735 net/sunrpc/sched.c 	head = &queue->tasks[queue->maxpriority];
queue             743 net/sunrpc/sched.c 			rpc_wake_up_task_queue_locked(queue, task);
queue             745 net/sunrpc/sched.c 		if (head == &queue->tasks[0])
queue             749 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             755 net/sunrpc/sched.c 	struct rpc_wait_queue *queue = container_of(work,
queue             761 net/sunrpc/sched.c 	spin_lock(&queue->lock);
queue             763 net/sunrpc/sched.c 	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
queue             768 net/sunrpc/sched.c 			rpc_wake_up_task_queue_locked(queue, task);
queue             774 net/sunrpc/sched.c 	if (!list_empty(&queue->timer_list.list))
queue             775 net/sunrpc/sched.c 		rpc_set_queue_timer(queue, expires);
queue             776 net/sunrpc/sched.c 	spin_unlock(&queue->lock);
queue             844 net/sunrpc/sched.c 	struct rpc_wait_queue *queue;
queue             850 net/sunrpc/sched.c 	queue = READ_ONCE(task->tk_waitqueue);
queue             851 net/sunrpc/sched.c 	if (queue)
queue             852 net/sunrpc/sched.c 		rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
queue             874 net/sunrpc/sched.c 	struct rpc_wait_queue *queue;
queue             928 net/sunrpc/sched.c 		queue = task->tk_waitqueue;
queue             929 net/sunrpc/sched.c 		spin_lock(&queue->lock);
queue             931 net/sunrpc/sched.c 			spin_unlock(&queue->lock);
queue             935 net/sunrpc/sched.c 		spin_unlock(&queue->lock);
queue            1720 net/tipc/link.c 	struct sk_buff_head *queue = &l->transmq;
queue            1783 net/tipc/link.c 	skb_queue_walk(queue, skb) {
queue            1785 net/tipc/link.c 		if (queue == &l->backlogq)
queue            1831 net/tipc/link.c 	if (queue != &l->backlogq) {
queue            1832 net/tipc/link.c 		queue = &l->backlogq;
queue             249 net/tipc/topsrv.c 	struct list_head *queue = &con->outqueue;
queue             260 net/tipc/topsrv.c 	while (!list_empty(queue)) {
queue             261 net/tipc/topsrv.c 		e = list_first_entry(queue, struct outqueue_entry, list);
queue             105 net/x25/x25_link.c 		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
queue             196 net/x25/x25_link.c 		skb_queue_tail(&nb->queue, skb);
queue             202 net/x25/x25_link.c 		skb_queue_tail(&nb->queue, skb);
queue             249 net/x25/x25_link.c 	skb_queue_head_init(&nb->queue);
queue             280 net/x25/x25_link.c 	skb_queue_purge(&nb->queue);
queue             460 net/xdp/xsk.c  static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
queue             465 net/xdp/xsk.c  	if (entries == 0 || *queue || !is_power_of_2(entries))
queue             474 net/xdp/xsk.c  	WRITE_ONCE(*queue, q);
queue              26 net/xdp/xsk_diag.c static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
queue              31 net/xdp/xsk_diag.c 	dr.entries = queue->nentries;
queue              28 net/xfrm/xfrm_input.c 	struct sk_buff_head queue;
queue             762 net/xfrm/xfrm_input.c 	struct sk_buff_head queue;
queue             765 net/xfrm/xfrm_input.c 	__skb_queue_head_init(&queue);
queue             766 net/xfrm/xfrm_input.c 	skb_queue_splice_init(&trans->queue, &queue);
queue             768 net/xfrm/xfrm_input.c 	while ((skb = __skb_dequeue(&queue)))
queue             780 net/xfrm/xfrm_input.c 	if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
queue             784 net/xfrm/xfrm_input.c 	__skb_queue_tail(&trans->queue, skb);
queue             804 net/xfrm/xfrm_input.c 		__skb_queue_head_init(&trans->queue);
queue              75 samples/v4l/v4l2-pci-skeleton.c 	struct vb2_queue queue;
queue             383 samples/v4l/v4l2-pci-skeleton.c 	if (vb2_is_busy(&skel->queue))
queue             430 samples/v4l/v4l2-pci-skeleton.c 	if (vb2_is_busy(&skel->queue))
queue             514 samples/v4l/v4l2-pci-skeleton.c 	if (vb2_is_busy(&skel->queue))
queue             633 samples/v4l/v4l2-pci-skeleton.c 	if (vb2_is_busy(&skel->queue))
queue             821 samples/v4l/v4l2-pci-skeleton.c 	q = &skel->queue;
queue             876 samples/v4l/v4l2-pci-skeleton.c 	vdev->queue = q;
queue             174 security/integrity/ima/ima.h 	struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
queue              40 security/integrity/ima/ima_queue.c 	.queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
queue              59 security/integrity/ima/ima_queue.c 	hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
queue             113 security/integrity/ima/ima_queue.c 		hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
queue              75 sound/core/seq/oss/seq_oss_device.h 	int queue;	/* sequencer queue number */
queue             158 sound/core/seq/oss/seq_oss_device.h 	ev->queue = dp->queue;
queue              45 sound/core/seq/oss/seq_oss_init.c static int delete_seq_queue(int queue);
queue             183 sound/core/seq/oss/seq_oss_init.c 	dp->queue = -1;
queue             273 sound/core/seq/oss/seq_oss_init.c 	delete_seq_queue(dp->queue);
queue             359 sound/core/seq/oss/seq_oss_init.c 	dp->queue = qinfo.queue;
queue             367 sound/core/seq/oss/seq_oss_init.c delete_seq_queue(int queue)
queue             372 sound/core/seq/oss/seq_oss_init.c 	if (queue < 0)
queue             375 sound/core/seq/oss/seq_oss_init.c 	qinfo.queue = queue;
queue             378 sound/core/seq/oss/seq_oss_init.c 		pr_err("ALSA: seq_oss: unable to delete queue %d (%d)\n", queue, rc);
queue             407 sound/core/seq/oss/seq_oss_init.c 	int queue;
queue             418 sound/core/seq/oss/seq_oss_init.c 	queue = dp->queue;
queue             421 sound/core/seq/oss/seq_oss_init.c 	delete_seq_queue(queue);
queue             492 sound/core/seq/oss/seq_oss_init.c 		snd_iprintf(buf, "port %d : queue %d\n", dp->port, dp->queue);
queue             363 sound/core/seq/oss/seq_oss_midi.c 		subs.queue = dp->queue;		/* queue for timestamps */
queue             460 sound/core/seq/oss/seq_oss_midi.c 		ev.queue = dp->queue;
queue             137 sound/core/seq/oss/seq_oss_timer.c 	ev.queue = dp->queue;
queue             138 sound/core/seq/oss/seq_oss_timer.c 	ev.data.queue.queue = dp->queue;
queue             139 sound/core/seq/oss/seq_oss_timer.c 	ev.data.queue.param.value = value;
queue             156 sound/core/seq/oss/seq_oss_timer.c 	tmprec.queue = dp->queue;
queue             549 sound/core/seq/seq_clientmgr.c 	bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
queue             573 sound/core/seq/seq_clientmgr.c 				     int queue, int real_time)
queue             577 sound/core/seq/seq_clientmgr.c 	q = queueptr(queue);
queue             580 sound/core/seq/seq_clientmgr.c 	event->queue = queue;
queue             691 sound/core/seq/seq_clientmgr.c 			update_timestamp_of_queue(event, subs->info.queue,
queue             826 sound/core/seq/seq_clientmgr.c 	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
queue             830 sound/core/seq/seq_clientmgr.c 	else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
queue             936 sound/core/seq/seq_clientmgr.c 	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
queue             938 sound/core/seq/seq_clientmgr.c 		event->queue = SNDRV_SEQ_QUEUE_DIRECT;
queue             941 sound/core/seq/seq_clientmgr.c 		if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
queue             943 sound/core/seq/seq_clientmgr.c 			event->queue = SNDRV_SEQ_QUEUE_DIRECT;
queue             962 sound/core/seq/seq_clientmgr.c 	if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
queue            1552 sound/core/seq/seq_clientmgr.c 	info->queue = q->queue;
queue            1558 sound/core/seq/seq_clientmgr.c 		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
queue            1570 sound/core/seq/seq_clientmgr.c 	return snd_seq_queue_delete(client->number, info->queue);
queue            1580 sound/core/seq/seq_clientmgr.c 	q = queueptr(info->queue);
queue            1585 sound/core/seq/seq_clientmgr.c 	info->queue = q->queue;
queue            1605 sound/core/seq/seq_clientmgr.c 	if (snd_seq_queue_check_access(info->queue, client->number)) {
queue            1606 sound/core/seq/seq_clientmgr.c 		if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0)
queue            1609 sound/core/seq/seq_clientmgr.c 			snd_seq_queue_use(info->queue, client->number, 1);
queue            1614 sound/core/seq/seq_clientmgr.c 	q = queueptr(info->queue);
queue            1637 sound/core/seq/seq_clientmgr.c 	info->queue = q->queue;
queue            1650 sound/core/seq/seq_clientmgr.c 	struct snd_seq_queue *queue;
queue            1653 sound/core/seq/seq_clientmgr.c 	queue = queueptr(status->queue);
queue            1654 sound/core/seq/seq_clientmgr.c 	if (queue == NULL)
queue            1657 sound/core/seq/seq_clientmgr.c 	status->queue = queue->queue;
queue            1659 sound/core/seq/seq_clientmgr.c 	tmr = queue->timer;
queue            1660 sound/core/seq/seq_clientmgr.c 	status->events = queue->tickq->cells + queue->timeq->cells;
queue            1667 sound/core/seq/seq_clientmgr.c 	status->flags = queue->flags;
queue            1668 sound/core/seq/seq_clientmgr.c 	queuefree(queue);
queue            1679 sound/core/seq/seq_clientmgr.c 	struct snd_seq_queue *queue;
queue            1682 sound/core/seq/seq_clientmgr.c 	queue = queueptr(tempo->queue);
queue            1683 sound/core/seq/seq_clientmgr.c 	if (queue == NULL)
queue            1686 sound/core/seq/seq_clientmgr.c 	tempo->queue = queue->queue;
queue            1688 sound/core/seq/seq_clientmgr.c 	tmr = queue->timer;
queue            1694 sound/core/seq/seq_clientmgr.c 	queuefree(queue);
queue            1703 sound/core/seq/seq_clientmgr.c 	if (!snd_seq_queue_check_access(tempo->queue, client))
queue            1705 sound/core/seq/seq_clientmgr.c 	return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
queue            1725 sound/core/seq/seq_clientmgr.c 	struct snd_seq_queue *queue;
queue            1728 sound/core/seq/seq_clientmgr.c 	queue = queueptr(timer->queue);
queue            1729 sound/core/seq/seq_clientmgr.c 	if (queue == NULL)
queue            1732 sound/core/seq/seq_clientmgr.c 	mutex_lock(&queue->timer_mutex);
queue            1733 sound/core/seq/seq_clientmgr.c 	tmr = queue->timer;
queue            1735 sound/core/seq/seq_clientmgr.c 	timer->queue = queue->queue;
queue            1742 sound/core/seq/seq_clientmgr.c 	mutex_unlock(&queue->timer_mutex);
queue            1743 sound/core/seq/seq_clientmgr.c 	queuefree(queue);
queue            1759 sound/core/seq/seq_clientmgr.c 	if (snd_seq_queue_check_access(timer->queue, client->number)) {
queue            1763 sound/core/seq/seq_clientmgr.c 		q = queueptr(timer->queue);
queue            1768 sound/core/seq/seq_clientmgr.c 		snd_seq_queue_timer_close(timer->queue);
queue            1774 sound/core/seq/seq_clientmgr.c 		result = snd_seq_queue_timer_open(timer->queue);
queue            1792 sound/core/seq/seq_clientmgr.c 	used = snd_seq_queue_is_used(info->queue, client->number);
queue            1810 sound/core/seq/seq_clientmgr.c 		err = snd_seq_queue_use(info->queue, client->number, info->used);
queue            1992 sound/core/seq/seq_clientmgr.c 			subs->queue = s->info.queue;
queue            2310 sound/core/seq/seq_clientmgr.c 	ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
queue            2410 sound/core/seq/seq_clientmgr.c 			snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
queue             461 sound/core/seq/seq_ports.c 			return r->queue == s->queue;
queue              59 sound/core/seq/seq_queue.c 			q->queue = i;
queue             108 sound/core/seq/seq_queue.c 	q->queue = -1;
queue             161 sound/core/seq/seq_queue.c static void queue_use(struct snd_seq_queue *queue, int client, int use);
queue             296 sound/core/seq/seq_queue.c 	dest = cell->event.queue;	/* destination queue */
queue             428 sound/core/seq/seq_queue.c 	struct snd_seq_queue *queue;
queue             431 sound/core/seq/seq_queue.c 	queue = queueptr(queueid);
queue             432 sound/core/seq/seq_queue.c 	if (queue == NULL)
queue             434 sound/core/seq/seq_queue.c 	tmr = queue->timer;
queue             435 sound/core/seq/seq_queue.c 	if ((result = snd_seq_timer_open(queue)) < 0) {
queue             437 sound/core/seq/seq_queue.c 		result = snd_seq_timer_open(queue);
queue             439 sound/core/seq/seq_queue.c 	queuefree(queue);
queue             448 sound/core/seq/seq_queue.c 	struct snd_seq_queue *queue;
queue             451 sound/core/seq/seq_queue.c 	queue = queueptr(queueid);
queue             452 sound/core/seq/seq_queue.c 	if (queue == NULL)
queue             454 sound/core/seq/seq_queue.c 	snd_seq_timer_close(queue);
queue             455 sound/core/seq/seq_queue.c 	queuefree(queue);
queue             483 sound/core/seq/seq_queue.c static void queue_use(struct snd_seq_queue *queue, int client, int use)
queue             486 sound/core/seq/seq_queue.c 		if (!test_and_set_bit(client, queue->clients_bitmap))
queue             487 sound/core/seq/seq_queue.c 			queue->clients++;
queue             489 sound/core/seq/seq_queue.c 		if (test_and_clear_bit(client, queue->clients_bitmap))
queue             490 sound/core/seq/seq_queue.c 			queue->clients--;
queue             492 sound/core/seq/seq_queue.c 	if (queue->clients) {
queue             493 sound/core/seq/seq_queue.c 		if (use && queue->clients == 1)
queue             494 sound/core/seq/seq_queue.c 			snd_seq_timer_defaults(queue->timer);
queue             495 sound/core/seq/seq_queue.c 		snd_seq_timer_open(queue);
queue             497 sound/core/seq/seq_queue.c 		snd_seq_timer_close(queue);
queue             507 sound/core/seq/seq_queue.c 	struct snd_seq_queue *queue;
queue             509 sound/core/seq/seq_queue.c 	queue = queueptr(queueid);
queue             510 sound/core/seq/seq_queue.c 	if (queue == NULL)
queue             512 sound/core/seq/seq_queue.c 	mutex_lock(&queue->timer_mutex);
queue             513 sound/core/seq/seq_queue.c 	queue_use(queue, client, use);
queue             514 sound/core/seq/seq_queue.c 	mutex_unlock(&queue->timer_mutex);
queue             515 sound/core/seq/seq_queue.c 	queuefree(queue);
queue             591 sound/core/seq/seq_queue.c 			snd_seq_queue_use(q->queue, client, 0);
queue             627 sound/core/seq/seq_queue.c 		     q->queue == info->queue)) {
queue             649 sound/core/seq/seq_queue.c 	sev.queue = q->queue;
queue             650 sound/core/seq/seq_queue.c 	sev.data.queue.queue = q->queue;
queue             686 sound/core/seq/seq_queue.c 		snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
queue             691 sound/core/seq/seq_queue.c 		if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
queue             697 sound/core/seq/seq_queue.c 		if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
queue             703 sound/core/seq/seq_queue.c 					   ev->data.queue.param.skew.value,
queue             704 sound/core/seq/seq_queue.c 					   ev->data.queue.param.skew.base) == 0) {
queue             722 sound/core/seq/seq_queue.c 	q = queueptr(ev->data.queue.queue);
queue             768 sound/core/seq/seq_queue.c 		snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
queue              20 sound/core/seq/seq_queue.h 	int queue;		/* queue number */
queue             270 sound/core/seq/seq_timer.c 	sprintf(str, "sequencer queue %i", q->queue);
queue             275 sound/core/seq/seq_timer.c 	err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue);
queue             285 sound/core/seq/seq_timer.c 			err = snd_timer_open(&t, str, &tid, q->queue);
queue             482 sound/core/seq/seq_timer.c 		snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
queue              57 sound/core/timer.c 	struct snd_timer_read *queue;
queue            1303 sound/core/timer.c 		r = &tu->queue[prev];
queue            1312 sound/core/timer.c 		r = &tu->queue[tu->qtail++];
queue            1430 sound/core/timer.c 	struct snd_timer_read *queue = NULL;
queue            1438 sound/core/timer.c 		queue = kcalloc(size, sizeof(*queue), GFP_KERNEL);
queue            1439 sound/core/timer.c 		if (!queue)
queue            1444 sound/core/timer.c 	kfree(tu->queue);
queue            1447 sound/core/timer.c 	tu->queue = queue;
queue            1490 sound/core/timer.c 		kfree(tu->queue);
queue            1867 sound/core/timer.c 			struct snd_timer_read *r = &tu->queue[0];
queue            1899 sound/core/timer.c 	status.queue = tu->qused;
queue            2109 sound/core/timer.c 			if (copy_to_user(buffer, &tu->queue[qhead],
queue              77 sound/core/timer_compat.c 	u32 queue;
queue              97 sound/core/timer_compat.c 	status.queue = tu->qused;
queue             159 sound/firewire/tascam/amdtp-tascam.c 						&tscm->queue[tscm->push_pos];
queue              42 sound/firewire/tascam/tascam-hwdep.c 	struct snd_firewire_tascam_change *entries = tscm->queue;
queue              97 sound/firewire/tascam/tascam.h 	struct snd_firewire_tascam_change queue[SND_TSCM_QUEUE_COUNT];
queue             243 sound/oss/dmasound/dmasound.h #define WAKE_UP(queue)		(wake_up_interruptible(&queue))
queue             769 tools/include/uapi/sound/asound.h 	unsigned int queue;		/* used queue size */
queue            1369 tools/perf/util/annotate.c 		       int max_lines, struct annotation_line *queue, int addr_fmt_width,
queue            1401 tools/perf/util/annotate.c 		if (queue != NULL) {
queue            1402 tools/perf/util/annotate.c 			list_for_each_entry_from(queue, &notes->src->source, node) {
queue            1403 tools/perf/util/annotate.c 				if (queue == al)
queue            1405 tools/perf/util/annotate.c 				annotation_line__print(queue, sym, start, evsel, len,
queue            1453 tools/perf/util/annotate.c 		if (queue)
queue            2276 tools/perf/util/annotate.c 	struct annotation_line *pos, *queue = NULL;
queue            2321 tools/perf/util/annotate.c 		if (context && queue == NULL) {
queue            2322 tools/perf/util/annotate.c 			queue = pos;
queue            2328 tools/perf/util/annotate.c 					     queue, addr_fmt_width, opts->percent_type);
queue            2335 tools/perf/util/annotate.c 				queue = NULL;
queue            2352 tools/perf/util/annotate.c 				queue = list_entry(queue->node.next, typeof(*queue), node);
queue             240 tools/perf/util/auxtrace.c 	struct auxtrace_queue *queue;
queue             249 tools/perf/util/auxtrace.c 	queue = &queues->queue_array[idx];
queue             251 tools/perf/util/auxtrace.c 	if (!queue->set) {
queue             252 tools/perf/util/auxtrace.c 		queue->set = true;
queue             253 tools/perf/util/auxtrace.c 		queue->tid = buffer->tid;
queue             254 tools/perf/util/auxtrace.c 		queue->cpu = buffer->cpu;
queue             255 tools/perf/util/auxtrace.c 	} else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
queue             257 tools/perf/util/auxtrace.c 		       queue->cpu, queue->tid, buffer->cpu, buffer->tid);
queue             263 tools/perf/util/auxtrace.c 	list_add_tail(&buffer->list, &queue->head);
queue             798 tools/perf/util/auxtrace.c struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
queue             802 tools/perf/util/auxtrace.c 		if (list_is_last(&buffer->list, &queue->head))
queue             807 tools/perf/util/auxtrace.c 		if (list_empty(&queue->head))
queue             809 tools/perf/util/auxtrace.c 		return list_entry(queue->head.next, struct auxtrace_buffer,
queue             468 tools/perf/util/auxtrace.h struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
queue             459 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
queue             466 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	cs_etm_decoder__reset_timestamp(queue);
queue             467 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
queue             472 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
queue             478 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
queue             483 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	packet = &queue->packet_buffer[queue->tail];
queue             490 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
queue             493 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c 	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
queue             192 tools/perf/util/cs-etm.c static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
queue             196 tools/perf/util/cs-etm.c 	queue->head = 0;
queue             197 tools/perf/util/cs-etm.c 	queue->tail = 0;
queue             198 tools/perf/util/cs-etm.c 	queue->packet_count = 0;
queue             200 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
queue             201 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
queue             202 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
queue             203 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].instr_count = 0;
queue             204 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].last_instr_taken_branch = false;
queue             205 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].last_instr_size = 0;
queue             206 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].last_instr_type = 0;
queue             207 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].last_instr_subtype = 0;
queue             208 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].last_instr_cond = 0;
queue             209 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].flags = 0;
queue             210 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].exception_number = UINT32_MAX;
queue             211 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
queue             212 tools/perf/util/cs-etm.c 		queue->packet_buffer[i].cpu = INT_MIN;
queue             235 tools/perf/util/cs-etm.c 	struct auxtrace_queue *queue;
queue             240 tools/perf/util/cs-etm.c 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
queue             241 tools/perf/util/cs-etm.c 	tidq->tid = queue->tid;
queue             739 tools/perf/util/cs-etm.c 			       struct auxtrace_queue *queue,
queue             746 tools/perf/util/cs-etm.c 	struct cs_etm_queue *etmq = queue->priv;
queue             748 tools/perf/util/cs-etm.c 	if (list_empty(&queue->head) || etmq)
queue             758 tools/perf/util/cs-etm.c 	queue->priv = etmq;
queue            1005 tools/perf/util/cs-etm.c 	struct auxtrace_queue *queue;
queue            1007 tools/perf/util/cs-etm.c 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
queue            1009 tools/perf/util/cs-etm.c 	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
queue            2068 tools/perf/util/cs-etm.c 		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
queue            2069 tools/perf/util/cs-etm.c 		struct cs_etm_queue *etmq = queue->priv;
queue            2096 tools/perf/util/cs-etm.c 	struct auxtrace_queue *queue;
queue            2108 tools/perf/util/cs-etm.c 		queue = &etm->queues.queue_array[queue_nr];
queue            2109 tools/perf/util/cs-etm.c 		etmq = queue->priv;
queue             167 tools/perf/util/intel-bts.c 				 struct auxtrace_queue *queue,
queue             170 tools/perf/util/intel-bts.c 	struct intel_bts_queue *btsq = queue->priv;
queue             172 tools/perf/util/intel-bts.c 	if (list_empty(&queue->head))
queue             179 tools/perf/util/intel-bts.c 		queue->priv = btsq;
queue             181 tools/perf/util/intel-bts.c 		if (queue->cpu != -1)
queue             182 tools/perf/util/intel-bts.c 			btsq->cpu = queue->cpu;
queue             183 tools/perf/util/intel-bts.c 		btsq->tid = queue->tid;
queue             192 tools/perf/util/intel-bts.c 		btsq->buffer = auxtrace_buffer__next(queue, NULL);
queue             248 tools/perf/util/intel-bts.c static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
queue             254 tools/perf/util/intel-bts.c 	if (b->list.prev == &queue->head)
queue             448 tools/perf/util/intel-bts.c 	struct auxtrace_queue *queue;
queue             465 tools/perf/util/intel-bts.c 	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
queue             468 tools/perf/util/intel-bts.c 		buffer = auxtrace_buffer__next(queue, NULL);
queue             494 tools/perf/util/intel-bts.c 	    intel_bts_do_fix_overlap(queue, buffer)) {
queue             509 tools/perf/util/intel-bts.c 	btsq->buffer = auxtrace_buffer__next(queue, buffer);
queue             543 tools/perf/util/intel-bts.c 		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
queue             544 tools/perf/util/intel-bts.c 		struct intel_bts_queue *btsq = queue->priv;
queue             556 tools/perf/util/intel-bts.c 		struct auxtrace_queue *queue;
queue             568 tools/perf/util/intel-bts.c 		queue = &bts->queues.queue_array[queue_nr];
queue             569 tools/perf/util/intel-bts.c 		btsq = queue->priv;
queue             309 tools/perf/util/intel-pt.c 	struct auxtrace_queue *queue;
queue             312 tools/perf/util/intel-pt.c 	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
queue             317 tools/perf/util/intel-pt.c 		buffer = auxtrace_buffer__next(queue, buffer);
queue             354 tools/perf/util/intel-pt.c 	struct auxtrace_queue *queue;
queue             362 tools/perf/util/intel-pt.c 	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
queue             364 tools/perf/util/intel-pt.c 	buffer = auxtrace_buffer__next(queue, buffer);
queue             971 tools/perf/util/intel-pt.c 				     struct auxtrace_queue *queue)
queue             973 tools/perf/util/intel-pt.c 	struct intel_pt_queue *ptq = queue->priv;
queue             975 tools/perf/util/intel-pt.c 	if (queue->tid == -1 || pt->have_sched_switch) {
queue             985 tools/perf/util/intel-pt.c 		if (queue->cpu == -1)
queue            1039 tools/perf/util/intel-pt.c 				struct auxtrace_queue *queue,
queue            1042 tools/perf/util/intel-pt.c 	struct intel_pt_queue *ptq = queue->priv;
queue            1044 tools/perf/util/intel-pt.c 	if (list_empty(&queue->head))
queue            1051 tools/perf/util/intel-pt.c 		queue->priv = ptq;
queue            1053 tools/perf/util/intel-pt.c 		if (queue->cpu != -1)
queue            1054 tools/perf/util/intel-pt.c 			ptq->cpu = queue->cpu;
queue            1055 tools/perf/util/intel-pt.c 		ptq->tid = queue->tid;
queue            1858 tools/perf/util/intel-pt.c 	struct auxtrace_queue *queue;
queue            1869 tools/perf/util/intel-pt.c 	queue = &pt->queues.queue_array[ptq->queue_nr];
queue            1870 tools/perf/util/intel-pt.c 	intel_pt_set_pid_tid_cpu(pt, queue);
queue            2076 tools/perf/util/intel-pt.c 		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
queue            2077 tools/perf/util/intel-pt.c 		struct intel_pt_queue *ptq = queue->priv;
queue            2255 tools/perf/util/intel-pt.c 		struct auxtrace_queue *queue;
queue            2265 tools/perf/util/intel-pt.c 		queue = &pt->queues.queue_array[queue_nr];
queue            2266 tools/perf/util/intel-pt.c 		ptq = queue->priv;
queue            2282 tools/perf/util/intel-pt.c 		intel_pt_set_pid_tid_cpu(pt, queue);
queue            2311 tools/perf/util/intel-pt.c 		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
queue            2312 tools/perf/util/intel-pt.c 		struct intel_pt_queue *ptq = queue->priv;
queue            2316 tools/perf/util/intel-pt.c 			intel_pt_set_pid_tid_cpu(pt, queue);
queue             697 tools/perf/util/s390-cpumsf.c 	struct auxtrace_queue *queue;
queue             700 tools/perf/util/s390-cpumsf.c 	queue = &sfq->sf->queues.queue_array[sfq->queue_nr];
queue             709 tools/perf/util/s390-cpumsf.c 		sfq->buffer = buffer = auxtrace_buffer__next(queue,
queue             799 tools/perf/util/s390-cpumsf.c 				   struct auxtrace_queue *queue,
queue             802 tools/perf/util/s390-cpumsf.c 	struct s390_cpumsf_queue *sfq = queue->priv;
queue             804 tools/perf/util/s390-cpumsf.c 	if (list_empty(&queue->head))
queue             811 tools/perf/util/s390-cpumsf.c 		queue->priv = sfq;
queue             813 tools/perf/util/s390-cpumsf.c 		if (queue->cpu != -1)
queue             814 tools/perf/util/s390-cpumsf.c 			sfq->cpu = queue->cpu;
queue             849 tools/perf/util/s390-cpumsf.c 		struct auxtrace_queue *queue;
queue             859 tools/perf/util/s390-cpumsf.c 		queue = &sf->queues.queue_array[queue_nr];
queue             860 tools/perf/util/s390-cpumsf.c 		sfq = queue->priv;
queue              59 tools/testing/selftests/mqueue/mq_open_tests.c mqd_t queue = -1;
queue              90 tools/testing/selftests/mqueue/mq_open_tests.c 	if (queue != -1)
queue              91 tools/testing/selftests/mqueue/mq_open_tests.c 		if (mq_close(queue))
queue             205 tools/testing/selftests/mqueue/mq_open_tests.c 	if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
queue             207 tools/testing/selftests/mqueue/mq_open_tests.c 	if (mq_getattr(queue, result))
queue             209 tools/testing/selftests/mqueue/mq_open_tests.c 	if (mq_close(queue))
queue             211 tools/testing/selftests/mqueue/mq_open_tests.c 	queue = -1;
queue             227 tools/testing/selftests/mqueue/mq_open_tests.c 	if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
queue             229 tools/testing/selftests/mqueue/mq_open_tests.c 	if (mq_getattr(queue, result))
queue             231 tools/testing/selftests/mqueue/mq_open_tests.c 	if (mq_close(queue))
queue             233 tools/testing/selftests/mqueue/mq_open_tests.c 	queue = -1;
queue              99 tools/testing/selftests/mqueue/mq_perf_tests.c mqd_t queue = -1;
queue             189 tools/testing/selftests/mqueue/mq_perf_tests.c 	if (queue != -1)
queue             190 tools/testing/selftests/mqueue/mq_perf_tests.c 		if (mq_close(queue))
queue             294 tools/testing/selftests/mqueue/mq_perf_tests.c 	queue = mq_open(queue_path, flags, perms, attr);
queue             295 tools/testing/selftests/mqueue/mq_perf_tests.c 	if (queue == -1)
queue             297 tools/testing/selftests/mqueue/mq_perf_tests.c 	if (mq_getattr(queue, &result))
queue             331 tools/testing/selftests/mqueue/mq_perf_tests.c 		while (mq_send(queue, buff, sizeof(buff), 0) == 0)
queue             333 tools/testing/selftests/mqueue/mq_perf_tests.c 		mq_receive(queue, buff, sizeof(buff), &priority);
queue             338 tools/testing/selftests/mqueue/mq_perf_tests.c 	while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
queue             342 tools/testing/selftests/mqueue/mq_perf_tests.c 		if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
queue             349 tools/testing/selftests/mqueue/mq_perf_tests.c 		if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
queue             352 tools/testing/selftests/mqueue/mq_perf_tests.c 		if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
queue              56 virt/kvm/async_pf.c 	INIT_LIST_HEAD(&vcpu->async_pf.queue);
queue             109 virt/kvm/async_pf.c 	while (!list_empty(&vcpu->async_pf.queue)) {
queue             111 virt/kvm/async_pf.c 			list_first_entry(&vcpu->async_pf.queue,
queue             112 virt/kvm/async_pf.c 					 typeof(*work), queue);
queue             113 virt/kvm/async_pf.c 		list_del(&work->queue);
queue             162 virt/kvm/async_pf.c 		list_del(&work->queue);
queue             204 virt/kvm/async_pf.c 	list_add_tail(&work->queue, &vcpu->async_pf.queue);
queue             227 virt/kvm/async_pf.c 	INIT_LIST_HEAD(&work->queue); /* for list_del to work */