Lines Matching refs:q

123 	struct request_queue *q = cmd->request->q;  in scsi_mq_requeue_cmd()  local
126 blk_mq_kick_requeue_list(q); in scsi_mq_requeue_cmd()
145 struct request_queue *q = device->request_queue; in __scsi_queue_insert() local
167 if (q->mq_ops) { in __scsi_queue_insert()
171 spin_lock_irqsave(q->queue_lock, flags); in __scsi_queue_insert()
172 blk_requeue_request(q, cmd->request); in __scsi_queue_insert()
174 spin_unlock_irqrestore(q->queue_lock, flags); in __scsi_queue_insert()
245 blk_execute_rq(req->q, NULL, req, 1); in scsi_execute()
329 static void scsi_kick_queue(struct request_queue *q) in scsi_kick_queue() argument
331 if (q->mq_ops) in scsi_kick_queue()
332 blk_mq_start_hw_queues(q); in scsi_kick_queue()
334 blk_run_queue(q); in scsi_kick_queue()
486 static void scsi_run_queue(struct request_queue *q) in scsi_run_queue() argument
488 struct scsi_device *sdev = q->queuedata; in scsi_run_queue()
495 if (q->mq_ops) in scsi_run_queue()
496 blk_mq_start_stopped_hw_queues(q, false); in scsi_run_queue()
498 blk_run_queue(q); in scsi_run_queue()
504 struct request_queue *q; in scsi_requeue_run_queue() local
507 q = sdev->request_queue; in scsi_requeue_run_queue()
508 scsi_run_queue(q); in scsi_requeue_run_queue()
529 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) in scsi_requeue_command() argument
535 spin_lock_irqsave(q->queue_lock, flags); in scsi_requeue_command()
539 blk_requeue_request(q, req); in scsi_requeue_command()
540 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_requeue_command()
542 scsi_run_queue(q); in scsi_requeue_command()
693 struct request_queue *q = sdev->request_queue; in scsi_end_request() local
703 if (blk_queue_add_random(q)) in scsi_end_request()
722 blk_mq_start_stopped_hw_queues(q, true); in scsi_end_request()
729 spin_lock_irqsave(q->queue_lock, flags); in scsi_end_request()
731 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_end_request()
736 scsi_run_queue(q); in scsi_end_request()
821 struct request_queue *q = cmd->device->request_queue; in scsi_io_completion() local
1063 if (q->mq_ops) { in scsi_io_completion()
1069 scsi_requeue_command(q, cmd); in scsi_io_completion()
1098 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); in scsi_init_sgtable()
1130 if (!rq->q->mq_ops) { in scsi_init_io()
1161 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); in scsi_init_io()
1168 count = blk_rq_map_integrity_sg(rq->q, rq->bio, in scsi_init_io()
1171 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); in scsi_init_io()
1343 scsi_prep_return(struct request_queue *q, struct request *req, int ret) in scsi_prep_return() argument
1345 struct scsi_device *sdev = q->queuedata; in scsi_prep_return()
1366 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_prep_return()
1375 static int scsi_prep_fn(struct request_queue *q, struct request *req) in scsi_prep_fn() argument
1377 struct scsi_device *sdev = q->queuedata; in scsi_prep_fn()
1393 return scsi_prep_return(q, req, ret); in scsi_prep_fn()
1396 static void scsi_unprep_fn(struct request_queue *q, struct request *req) in scsi_unprep_fn() argument
1407 static inline int scsi_dev_queue_ready(struct request_queue *q, in scsi_dev_queue_ready() argument
1424 if (!q->mq_ops) in scsi_dev_queue_ready()
1425 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_dev_queue_ready()
1500 static inline int scsi_host_queue_ready(struct request_queue *q, in scsi_host_queue_ready() argument
1562 static int scsi_lld_busy(struct request_queue *q) in scsi_lld_busy() argument
1564 struct scsi_device *sdev = q->queuedata; in scsi_lld_busy()
1567 if (blk_queue_dying(q)) in scsi_lld_busy()
1587 static void scsi_kill_request(struct request *req, struct request_queue *q) in scsi_kill_request() argument
1766 static void scsi_request_fn(struct request_queue *q) in scsi_request_fn() argument
1767 __releases(q->queue_lock) in scsi_request_fn()
1768 __acquires(q->queue_lock) in scsi_request_fn()
1770 struct scsi_device *sdev = q->queuedata; in scsi_request_fn()
1787 req = blk_peek_request(q); in scsi_request_fn()
1794 scsi_kill_request(req, q); in scsi_request_fn()
1798 if (!scsi_dev_queue_ready(q, sdev)) in scsi_request_fn()
1804 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) in scsi_request_fn()
1807 spin_unlock_irq(q->queue_lock); in scsi_request_fn()
1826 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { in scsi_request_fn()
1838 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_request_fn()
1859 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1862 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1879 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1880 blk_requeue_request(q, req); in scsi_request_fn()
1884 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_request_fn()
1902 struct scsi_device *sdev = req->q->queuedata; in scsi_mq_prep_fn()
1970 struct request_queue *q = req->q; in scsi_queue_rq() local
1971 struct scsi_device *sdev = q->queuedata; in scsi_queue_rq()
1985 if (!scsi_dev_queue_ready(q, sdev)) in scsi_queue_rq()
1989 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_queue_rq()
2101 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) in __scsi_init_queue() argument
2108 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, in __scsi_init_queue()
2116 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); in __scsi_init_queue()
2119 blk_queue_max_hw_sectors(q, shost->max_sectors); in __scsi_init_queue()
2120 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); in __scsi_init_queue()
2121 blk_queue_segment_boundary(q, shost->dma_boundary); in __scsi_init_queue()
2124 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); in __scsi_init_queue()
2127 q->limits.cluster = 0; in __scsi_init_queue()
2134 blk_queue_dma_alignment(q, 0x03); in __scsi_init_queue()
2140 struct request_queue *q; in __scsi_alloc_queue() local
2142 q = blk_init_queue(request_fn, NULL); in __scsi_alloc_queue()
2143 if (!q) in __scsi_alloc_queue()
2145 __scsi_init_queue(shost, q); in __scsi_alloc_queue()
2146 return q; in __scsi_alloc_queue()
2152 struct request_queue *q; in scsi_alloc_queue() local
2154 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); in scsi_alloc_queue()
2155 if (!q) in scsi_alloc_queue()
2158 blk_queue_prep_rq(q, scsi_prep_fn); in scsi_alloc_queue()
2159 blk_queue_unprep_rq(q, scsi_unprep_fn); in scsi_alloc_queue()
2160 blk_queue_softirq_done(q, scsi_softirq_done); in scsi_alloc_queue()
2161 blk_queue_rq_timed_out(q, scsi_times_out); in scsi_alloc_queue()
2162 blk_queue_lld_busy(q, scsi_lld_busy); in scsi_alloc_queue()
2163 return q; in scsi_alloc_queue()
2954 struct request_queue *q = sdev->request_queue; in scsi_internal_device_block() local
2971 if (q->mq_ops) { in scsi_internal_device_block()
2972 blk_mq_stop_hw_queues(q); in scsi_internal_device_block()
2974 spin_lock_irqsave(q->queue_lock, flags); in scsi_internal_device_block()
2975 blk_stop_queue(q); in scsi_internal_device_block()
2976 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_internal_device_block()
3003 struct request_queue *q = sdev->request_queue; in scsi_internal_device_unblock() local
3023 if (q->mq_ops) { in scsi_internal_device_unblock()
3024 blk_mq_start_stopped_hw_queues(q, false); in scsi_internal_device_unblock()
3026 spin_lock_irqsave(q->queue_lock, flags); in scsi_internal_device_unblock()
3027 blk_start_queue(q); in scsi_internal_device_unblock()
3028 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_internal_device_unblock()