1/* 2 * Copyright (C) 1999 Eric Youngdale 3 * Copyright (C) 2014 Christoph Hellwig 4 * 5 * SCSI queueing library. 6 * Initial versions: Eric Youngdale (eric@andante.org). 7 * Based upon conversations with large numbers 8 * of people at Linux Expo. 9 */ 10 11#include <linux/bio.h> 12#include <linux/bitops.h> 13#include <linux/blkdev.h> 14#include <linux/completion.h> 15#include <linux/kernel.h> 16#include <linux/export.h> 17#include <linux/mempool.h> 18#include <linux/slab.h> 19#include <linux/init.h> 20#include <linux/pci.h> 21#include <linux/delay.h> 22#include <linux/hardirq.h> 23#include <linux/scatterlist.h> 24#include <linux/blk-mq.h> 25#include <linux/ratelimit.h> 26 27#include <scsi/scsi.h> 28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_dbg.h> 30#include <scsi/scsi_device.h> 31#include <scsi/scsi_driver.h> 32#include <scsi/scsi_eh.h> 33#include <scsi/scsi_host.h> 34 35#include <trace/events/scsi.h> 36 37#include "scsi_priv.h" 38#include "scsi_logging.h" 39 40 41#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 42#define SG_MEMPOOL_SIZE 2 43 44struct scsi_host_sg_pool { 45 size_t size; 46 char *name; 47 struct kmem_cache *slab; 48 mempool_t *pool; 49}; 50 51#define SP(x) { .size = x, "sgpool-" __stringify(x) } 52#if (SCSI_MAX_SG_SEGMENTS < 32) 53#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 54#endif 55static struct scsi_host_sg_pool scsi_sg_pools[] = { 56 SP(8), 57 SP(16), 58#if (SCSI_MAX_SG_SEGMENTS > 32) 59 SP(32), 60#if (SCSI_MAX_SG_SEGMENTS > 64) 61 SP(64), 62#if (SCSI_MAX_SG_SEGMENTS > 128) 63 SP(128), 64#if (SCSI_MAX_SG_SEGMENTS > 256) 65#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 66#endif 67#endif 68#endif 69#endif 70 SP(SCSI_MAX_SG_SEGMENTS) 71}; 72#undef SP 73 74struct kmem_cache *scsi_sdb_cache; 75 76/* 77 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 78 * not change behaviour from the previous unplug mechanism, experimentation 79 * may prove this needs changing. 80 */ 81#define SCSI_QUEUE_DELAY 3 82 83static void 84scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 85{ 86 struct Scsi_Host *host = cmd->device->host; 87 struct scsi_device *device = cmd->device; 88 struct scsi_target *starget = scsi_target(device); 89 90 /* 91 * Set the appropriate busy bit for the device/host. 92 * 93 * If the host/device isn't busy, assume that something actually 94 * completed, and that we should be able to queue a command now. 95 * 96 * Note that the prior mid-layer assumption that any host could 97 * always queue at least one command is now broken. The mid-layer 98 * will implement a user specifiable stall (see 99 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 100 * if a command is requeued with no other commands outstanding 101 * either for the device or for the host. 102 */ 103 switch (reason) { 104 case SCSI_MLQUEUE_HOST_BUSY: 105 atomic_set(&host->host_blocked, host->max_host_blocked); 106 break; 107 case SCSI_MLQUEUE_DEVICE_BUSY: 108 case SCSI_MLQUEUE_EH_RETRY: 109 atomic_set(&device->device_blocked, 110 device->max_device_blocked); 111 break; 112 case SCSI_MLQUEUE_TARGET_BUSY: 113 atomic_set(&starget->target_blocked, 114 starget->max_target_blocked); 115 break; 116 } 117} 118 119static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) 120{ 121 struct scsi_device *sdev = cmd->device; 122 struct request_queue *q = cmd->request->q; 123 124 blk_mq_requeue_request(cmd->request); 125 blk_mq_kick_requeue_list(q); 126 put_device(&sdev->sdev_gendev); 127} 128 129/** 130 * __scsi_queue_insert - private queue insertion 131 * @cmd: The SCSI command being requeued 132 * @reason: The reason for the requeue 133 * @unbusy: Whether the queue should be unbusied 134 * 135 * This is a private queue insertion. The public interface 136 * scsi_queue_insert() always assumes the queue should be unbusied 137 * because it's always called before the completion. This function is 138 * for a requeue after completion, which should only occur in this 139 * file. 140 */ 141static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 142{ 143 struct scsi_device *device = cmd->device; 144 struct request_queue *q = device->request_queue; 145 unsigned long flags; 146 147 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 148 "Inserting command %p into mlqueue\n", cmd)); 149 150 scsi_set_blocked(cmd, reason); 151 152 /* 153 * Decrement the counters, since these commands are no longer 154 * active on the host/device. 155 */ 156 if (unbusy) 157 scsi_device_unbusy(device); 158 159 /* 160 * Requeue this command. It will go before all other commands 161 * that are already in the queue. Schedule requeue work under 162 * lock such that the kblockd_schedule_work() call happens 163 * before blk_cleanup_queue() finishes. 164 */ 165 cmd->result = 0; 166 if (q->mq_ops) { 167 scsi_mq_requeue_cmd(cmd); 168 return; 169 } 170 spin_lock_irqsave(q->queue_lock, flags); 171 blk_requeue_request(q, cmd->request); 172 kblockd_schedule_work(&device->requeue_work); 173 spin_unlock_irqrestore(q->queue_lock, flags); 174} 175 176/* 177 * Function: scsi_queue_insert() 178 * 179 * Purpose: Insert a command in the midlevel queue. 180 * 181 * Arguments: cmd - command that we are adding to queue. 182 * reason - why we are inserting command to queue. 183 * 184 * Lock status: Assumed that lock is not held upon entry. 185 * 186 * Returns: Nothing. 187 * 188 * Notes: We do this for one of two cases. Either the host is busy 189 * and it cannot accept any more commands for the time being, 190 * or the device returned QUEUE_FULL and can accept no more 191 * commands. 192 * Notes: This could be called either from an interrupt context or a 193 * normal process context. 194 */ 195void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 196{ 197 __scsi_queue_insert(cmd, reason, 1); 198} 199/** 200 * scsi_execute - insert request and wait for the result 201 * @sdev: scsi device 202 * @cmd: scsi command 203 * @data_direction: data direction 204 * @buffer: data buffer 205 * @bufflen: len of buffer 206 * @sense: optional sense buffer 207 * @timeout: request timeout in seconds 208 * @retries: number of times to retry request 209 * @flags: or into request flags; 210 * @resid: optional residual length 211 * 212 * returns the req->errors value which is the scsi_cmnd result 213 * field. 214 */ 215int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 216 int data_direction, void *buffer, unsigned bufflen, 217 unsigned char *sense, int timeout, int retries, u64 flags, 218 int *resid) 219{ 220 struct request *req; 221 int write = (data_direction == DMA_TO_DEVICE); 222 int ret = DRIVER_ERROR << 24; 223 224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 225 if (IS_ERR(req)) 226 return ret; 227 blk_rq_set_block_pc(req); 228 229 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 230 buffer, bufflen, __GFP_WAIT)) 231 goto out; 232 233 req->cmd_len = COMMAND_SIZE(cmd[0]); 234 memcpy(req->cmd, cmd, req->cmd_len); 235 req->sense = sense; 236 req->sense_len = 0; 237 req->retries = retries; 238 req->timeout = timeout; 239 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 240 241 /* 242 * head injection *required* here otherwise quiesce won't work 243 */ 244 blk_execute_rq(req->q, NULL, req, 1); 245 246 /* 247 * Some devices (USB mass-storage in particular) may transfer 248 * garbage data together with a residue indicating that the data 249 * is invalid. Prevent the garbage from being misinterpreted 250 * and prevent security leaks by zeroing out the excess data. 251 */ 252 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 253 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 254 255 if (resid) 256 *resid = req->resid_len; 257 ret = req->errors; 258 out: 259 blk_put_request(req); 260 261 return ret; 262} 263EXPORT_SYMBOL(scsi_execute); 264 265int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 266 int data_direction, void *buffer, unsigned bufflen, 267 struct scsi_sense_hdr *sshdr, int timeout, int retries, 268 int *resid, u64 flags) 269{ 270 char *sense = NULL; 271 int result; 272 273 if (sshdr) { 274 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 275 if (!sense) 276 return DRIVER_ERROR << 24; 277 } 278 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 279 sense, timeout, retries, flags, resid); 280 if (sshdr) 281 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 282 283 kfree(sense); 284 return result; 285} 286EXPORT_SYMBOL(scsi_execute_req_flags); 287 288/* 289 * Function: scsi_init_cmd_errh() 290 * 291 * Purpose: Initialize cmd fields related to error handling. 292 * 293 * Arguments: cmd - command that is ready to be queued. 294 * 295 * Notes: This function has the job of initializing a number of 296 * fields related to error handling. Typically this will 297 * be called once for each command, as required. 298 */ 299static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 300{ 301 cmd->serial_number = 0; 302 scsi_set_resid(cmd, 0); 303 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 304 if (cmd->cmd_len == 0) 305 cmd->cmd_len = scsi_command_size(cmd->cmnd); 306} 307 308void scsi_device_unbusy(struct scsi_device *sdev) 309{ 310 struct Scsi_Host *shost = sdev->host; 311 struct scsi_target *starget = scsi_target(sdev); 312 unsigned long flags; 313 314 atomic_dec(&shost->host_busy); 315 if (starget->can_queue > 0) 316 atomic_dec(&starget->target_busy); 317 318 if (unlikely(scsi_host_in_recovery(shost) && 319 (shost->host_failed || shost->host_eh_scheduled))) { 320 spin_lock_irqsave(shost->host_lock, flags); 321 scsi_eh_wakeup(shost); 322 spin_unlock_irqrestore(shost->host_lock, flags); 323 } 324 325 atomic_dec(&sdev->device_busy); 326} 327 328static void scsi_kick_queue(struct request_queue *q) 329{ 330 if (q->mq_ops) 331 blk_mq_start_hw_queues(q); 332 else 333 blk_run_queue(q); 334} 335 336/* 337 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 338 * and call blk_run_queue for all the scsi_devices on the target - 339 * including current_sdev first. 340 * 341 * Called with *no* scsi locks held. 342 */ 343static void scsi_single_lun_run(struct scsi_device *current_sdev) 344{ 345 struct Scsi_Host *shost = current_sdev->host; 346 struct scsi_device *sdev, *tmp; 347 struct scsi_target *starget = scsi_target(current_sdev); 348 unsigned long flags; 349 350 spin_lock_irqsave(shost->host_lock, flags); 351 starget->starget_sdev_user = NULL; 352 spin_unlock_irqrestore(shost->host_lock, flags); 353 354 /* 355 * Call blk_run_queue for all LUNs on the target, starting with 356 * current_sdev. We race with others (to set starget_sdev_user), 357 * but in most cases, we will be first. Ideally, each LU on the 358 * target would get some limited time or requests on the target. 359 */ 360 scsi_kick_queue(current_sdev->request_queue); 361 362 spin_lock_irqsave(shost->host_lock, flags); 363 if (starget->starget_sdev_user) 364 goto out; 365 list_for_each_entry_safe(sdev, tmp, &starget->devices, 366 same_target_siblings) { 367 if (sdev == current_sdev) 368 continue; 369 if (scsi_device_get(sdev)) 370 continue; 371 372 spin_unlock_irqrestore(shost->host_lock, flags); 373 scsi_kick_queue(sdev->request_queue); 374 spin_lock_irqsave(shost->host_lock, flags); 375 376 scsi_device_put(sdev); 377 } 378 out: 379 spin_unlock_irqrestore(shost->host_lock, flags); 380} 381 382static inline bool scsi_device_is_busy(struct scsi_device *sdev) 383{ 384 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) 385 return true; 386 if (atomic_read(&sdev->device_blocked) > 0) 387 return true; 388 return false; 389} 390 391static inline bool scsi_target_is_busy(struct scsi_target *starget) 392{ 393 if (starget->can_queue > 0) { 394 if (atomic_read(&starget->target_busy) >= starget->can_queue) 395 return true; 396 if (atomic_read(&starget->target_blocked) > 0) 397 return true; 398 } 399 return false; 400} 401 402static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 403{ 404 if (shost->can_queue > 0 && 405 atomic_read(&shost->host_busy) >= shost->can_queue) 406 return true; 407 if (atomic_read(&shost->host_blocked) > 0) 408 return true; 409 if (shost->host_self_blocked) 410 return true; 411 return false; 412} 413 414static void scsi_starved_list_run(struct Scsi_Host *shost) 415{ 416 LIST_HEAD(starved_list); 417 struct scsi_device *sdev; 418 unsigned long flags; 419 420 spin_lock_irqsave(shost->host_lock, flags); 421 list_splice_init(&shost->starved_list, &starved_list); 422 423 while (!list_empty(&starved_list)) { 424 struct request_queue *slq; 425 426 /* 427 * As long as shost is accepting commands and we have 428 * starved queues, call blk_run_queue. scsi_request_fn 429 * drops the queue_lock and can add us back to the 430 * starved_list. 431 * 432 * host_lock protects the starved_list and starved_entry. 433 * scsi_request_fn must get the host_lock before checking 434 * or modifying starved_list or starved_entry. 435 */ 436 if (scsi_host_is_busy(shost)) 437 break; 438 439 sdev = list_entry(starved_list.next, 440 struct scsi_device, starved_entry); 441 list_del_init(&sdev->starved_entry); 442 if (scsi_target_is_busy(scsi_target(sdev))) { 443 list_move_tail(&sdev->starved_entry, 444 &shost->starved_list); 445 continue; 446 } 447 448 /* 449 * Once we drop the host lock, a racing scsi_remove_device() 450 * call may remove the sdev from the starved list and destroy 451 * it and the queue. Mitigate by taking a reference to the 452 * queue and never touching the sdev again after we drop the 453 * host lock. Note: if __scsi_remove_device() invokes 454 * blk_cleanup_queue() before the queue is run from this 455 * function then blk_run_queue() will return immediately since 456 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 457 */ 458 slq = sdev->request_queue; 459 if (!blk_get_queue(slq)) 460 continue; 461 spin_unlock_irqrestore(shost->host_lock, flags); 462 463 scsi_kick_queue(slq); 464 blk_put_queue(slq); 465 466 spin_lock_irqsave(shost->host_lock, flags); 467 } 468 /* put any unprocessed entries back */ 469 list_splice(&starved_list, &shost->starved_list); 470 spin_unlock_irqrestore(shost->host_lock, flags); 471} 472 473/* 474 * Function: scsi_run_queue() 475 * 476 * Purpose: Select a proper request queue to serve next 477 * 478 * Arguments: q - last request's queue 479 * 480 * Returns: Nothing 481 * 482 * Notes: The previous command was completely finished, start 483 * a new one if possible. 484 */ 485static void scsi_run_queue(struct request_queue *q) 486{ 487 struct scsi_device *sdev = q->queuedata; 488 489 if (scsi_target(sdev)->single_lun) 490 scsi_single_lun_run(sdev); 491 if (!list_empty(&sdev->host->starved_list)) 492 scsi_starved_list_run(sdev->host); 493 494 if (q->mq_ops) 495 blk_mq_start_stopped_hw_queues(q, false); 496 else 497 blk_run_queue(q); 498} 499 500void scsi_requeue_run_queue(struct work_struct *work) 501{ 502 struct scsi_device *sdev; 503 struct request_queue *q; 504 505 sdev = container_of(work, struct scsi_device, requeue_work); 506 q = sdev->request_queue; 507 scsi_run_queue(q); 508} 509 510/* 511 * Function: scsi_requeue_command() 512 * 513 * Purpose: Handle post-processing of completed commands. 514 * 515 * Arguments: q - queue to operate on 516 * cmd - command that may need to be requeued. 517 * 518 * Returns: Nothing 519 * 520 * Notes: After command completion, there may be blocks left 521 * over which weren't finished by the previous command 522 * this can be for a number of reasons - the main one is 523 * I/O errors in the middle of the request, in which case 524 * we need to request the blocks that come after the bad 525 * sector. 526 * Notes: Upon return, cmd is a stale pointer. 527 */ 528static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 529{ 530 struct scsi_device *sdev = cmd->device; 531 struct request *req = cmd->request; 532 unsigned long flags; 533 534 spin_lock_irqsave(q->queue_lock, flags); 535 blk_unprep_request(req); 536 req->special = NULL; 537 scsi_put_command(cmd); 538 blk_requeue_request(q, req); 539 spin_unlock_irqrestore(q->queue_lock, flags); 540 541 scsi_run_queue(q); 542 543 put_device(&sdev->sdev_gendev); 544} 545 546void scsi_run_host_queues(struct Scsi_Host *shost) 547{ 548 struct scsi_device *sdev; 549 550 shost_for_each_device(sdev, shost) 551 scsi_run_queue(sdev->request_queue); 552} 553 554static inline unsigned int scsi_sgtable_index(unsigned short nents) 555{ 556 unsigned int index; 557 558 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 559 560 if (nents <= 8) 561 index = 0; 562 else 563 index = get_count_order(nents) - 3; 564 565 return index; 566} 567 568static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 569{ 570 struct scsi_host_sg_pool *sgp; 571 572 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 573 mempool_free(sgl, sgp->pool); 574} 575 576static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 577{ 578 struct scsi_host_sg_pool *sgp; 579 580 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 581 return mempool_alloc(sgp->pool, gfp_mask); 582} 583 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 585{ 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) 587 return; 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 589} 590 591static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 592{ 593 struct scatterlist *first_chunk = NULL; 594 int ret; 595 596 BUG_ON(!nents); 597 598 if (mq) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 600 sdb->table.nents = sdb->table.orig_nents = nents; 601 sg_init_table(sdb->table.sgl, nents); 602 return 0; 603 } 604 first_chunk = sdb->table.sgl; 605 } 606 607 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 608 first_chunk, GFP_ATOMIC, scsi_sg_alloc); 609 if (unlikely(ret)) 610 scsi_free_sgtable(sdb, mq); 611 return ret; 612} 613 614static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 615{ 616 if (cmd->request->cmd_type == REQ_TYPE_FS) { 617 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 618 619 if (drv->uninit_command) 620 drv->uninit_command(cmd); 621 } 622} 623 624static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) 625{ 626 if (cmd->sdb.table.nents) 627 scsi_free_sgtable(&cmd->sdb, true); 628 if (cmd->request->next_rq && cmd->request->next_rq->special) 629 scsi_free_sgtable(cmd->request->next_rq->special, true); 630 if (scsi_prot_sg_count(cmd)) 631 scsi_free_sgtable(cmd->prot_sdb, true); 632} 633 634static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 635{ 636 struct scsi_device *sdev = cmd->device; 637 struct Scsi_Host *shost = sdev->host; 638 unsigned long flags; 639 640 scsi_mq_free_sgtables(cmd); 641 scsi_uninit_cmd(cmd); 642 643 if (shost->use_cmd_list) { 644 BUG_ON(list_empty(&cmd->list)); 645 spin_lock_irqsave(&sdev->list_lock, flags); 646 list_del_init(&cmd->list); 647 spin_unlock_irqrestore(&sdev->list_lock, flags); 648 } 649} 650 651/* 652 * Function: scsi_release_buffers() 653 * 654 * Purpose: Free resources allocate for a scsi_command. 655 * 656 * Arguments: cmd - command that we are bailing. 657 * 658 * Lock status: Assumed that no lock is held upon entry. 659 * 660 * Returns: Nothing 661 * 662 * Notes: In the event that an upper level driver rejects a 663 * command, we must release resources allocated during 664 * the __init_io() function. Primarily this would involve 665 * the scatter-gather table. 666 */ 667static void scsi_release_buffers(struct scsi_cmnd *cmd) 668{ 669 if (cmd->sdb.table.nents) 670 scsi_free_sgtable(&cmd->sdb, false); 671 672 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 673 674 if (scsi_prot_sg_count(cmd)) 675 scsi_free_sgtable(cmd->prot_sdb, false); 676} 677 678static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 679{ 680 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 681 682 scsi_free_sgtable(bidi_sdb, false); 683 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 684 cmd->request->next_rq->special = NULL; 685} 686 687static bool scsi_end_request(struct request *req, int error, 688 unsigned int bytes, unsigned int bidi_bytes) 689{ 690 struct scsi_cmnd *cmd = req->special; 691 struct scsi_device *sdev = cmd->device; 692 struct request_queue *q = sdev->request_queue; 693 694 if (blk_update_request(req, error, bytes)) 695 return true; 696 697 /* Bidi request must be completed as a whole */ 698 if (unlikely(bidi_bytes) && 699 blk_update_request(req->next_rq, error, bidi_bytes)) 700 return true; 701 702 if (blk_queue_add_random(q)) 703 add_disk_randomness(req->rq_disk); 704 705 if (req->mq_ctx) { 706 /* 707 * In the MQ case the command gets freed by __blk_mq_end_request, 708 * so we have to do all cleanup that depends on it earlier. 709 * 710 * We also can't kick the queues from irq context, so we 711 * will have to defer it to a workqueue. 712 */ 713 scsi_mq_uninit_cmd(cmd); 714 715 __blk_mq_end_request(req, error); 716 717 if (scsi_target(sdev)->single_lun || 718 !list_empty(&sdev->host->starved_list)) 719 kblockd_schedule_work(&sdev->requeue_work); 720 else 721 blk_mq_start_stopped_hw_queues(q, true); 722 } else { 723 unsigned long flags; 724 725 if (bidi_bytes) 726 scsi_release_bidi_buffers(cmd); 727 728 spin_lock_irqsave(q->queue_lock, flags); 729 blk_finish_request(req, error); 730 spin_unlock_irqrestore(q->queue_lock, flags); 731 732 scsi_release_buffers(cmd); 733 734 scsi_put_command(cmd); 735 scsi_run_queue(q); 736 } 737 738 put_device(&sdev->sdev_gendev); 739 return false; 740} 741 742/** 743 * __scsi_error_from_host_byte - translate SCSI error code into errno 744 * @cmd: SCSI command (unused) 745 * @result: scsi error code 746 * 747 * Translate SCSI error code into standard UNIX errno. 748 * Return values: 749 * -ENOLINK temporary transport failure 750 * -EREMOTEIO permanent target failure, do not retry 751 * -EBADE permanent nexus failure, retry on other path 752 * -ENOSPC No write space available 753 * -ENODATA Medium error 754 * -EIO unspecified I/O error 755 */ 756static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 757{ 758 int error = 0; 759 760 switch(host_byte(result)) { 761 case DID_TRANSPORT_FAILFAST: 762 error = -ENOLINK; 763 break; 764 case DID_TARGET_FAILURE: 765 set_host_byte(cmd, DID_OK); 766 error = -EREMOTEIO; 767 break; 768 case DID_NEXUS_FAILURE: 769 set_host_byte(cmd, DID_OK); 770 error = -EBADE; 771 break; 772 case DID_ALLOC_FAILURE: 773 set_host_byte(cmd, DID_OK); 774 error = -ENOSPC; 775 break; 776 case DID_MEDIUM_ERROR: 777 set_host_byte(cmd, DID_OK); 778 error = -ENODATA; 779 break; 780 default: 781 error = -EIO; 782 break; 783 } 784 785 return error; 786} 787 788/* 789 * Function: scsi_io_completion() 790 * 791 * Purpose: Completion processing for block device I/O requests. 792 * 793 * Arguments: cmd - command that is finished. 794 * 795 * Lock status: Assumed that no lock is held upon entry. 796 * 797 * Returns: Nothing 798 * 799 * Notes: We will finish off the specified number of sectors. If we 800 * are done, the command block will be released and the queue 801 * function will be goosed. If we are not done then we have to 802 * figure out what to do next: 803 * 804 * a) We can call scsi_requeue_command(). The request 805 * will be unprepared and put back on the queue. Then 806 * a new command will be created for it. This should 807 * be used if we made forward progress, or if we want 808 * to switch from READ(10) to READ(6) for example. 809 * 810 * b) We can call __scsi_queue_insert(). The request will 811 * be put back on the queue and retried using the same 812 * command as before, possibly after a delay. 813 * 814 * c) We can call scsi_end_request() with -EIO to fail 815 * the remainder of the request. 816 */ 817void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 818{ 819 int result = cmd->result; 820 struct request_queue *q = cmd->device->request_queue; 821 struct request *req = cmd->request; 822 int error = 0; 823 struct scsi_sense_hdr sshdr; 824 bool sense_valid = false; 825 int sense_deferred = 0, level = 0; 826 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 827 ACTION_DELAYED_RETRY} action; 828 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 829 830 if (result) { 831 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 832 if (sense_valid) 833 sense_deferred = scsi_sense_is_deferred(&sshdr); 834 } 835 836 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 837 if (result) { 838 if (sense_valid && req->sense) { 839 /* 840 * SG_IO wants current and deferred errors 841 */ 842 int len = 8 + cmd->sense_buffer[7]; 843 844 if (len > SCSI_SENSE_BUFFERSIZE) 845 len = SCSI_SENSE_BUFFERSIZE; 846 memcpy(req->sense, cmd->sense_buffer, len); 847 req->sense_len = len; 848 } 849 if (!sense_deferred) 850 error = __scsi_error_from_host_byte(cmd, result); 851 } 852 /* 853 * __scsi_error_from_host_byte may have reset the host_byte 854 */ 855 req->errors = cmd->result; 856 857 req->resid_len = scsi_get_resid(cmd); 858 859 if (scsi_bidi_cmnd(cmd)) { 860 /* 861 * Bidi commands Must be complete as a whole, 862 * both sides at once. 863 */ 864 req->next_rq->resid_len = scsi_in(cmd)->resid; 865 if (scsi_end_request(req, 0, blk_rq_bytes(req), 866 blk_rq_bytes(req->next_rq))) 867 BUG(); 868 return; 869 } 870 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 871 /* 872 * Certain non BLOCK_PC requests are commands that don't 873 * actually transfer anything (FLUSH), so cannot use 874 * good_bytes != blk_rq_bytes(req) as the signal for an error. 875 * This sets the error explicitly for the problem case. 876 */ 877 error = __scsi_error_from_host_byte(cmd, result); 878 } 879 880 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 881 BUG_ON(blk_bidi_rq(req)); 882 883 /* 884 * Next deal with any sectors which we were able to correctly 885 * handle. 886 */ 887 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 888 "%u sectors total, %d bytes done.\n", 889 blk_rq_sectors(req), good_bytes)); 890 891 /* 892 * Recovered errors need reporting, but they're always treated 893 * as success, so fiddle the result code here. For BLOCK_PC 894 * we already took a copy of the original into rq->errors which 895 * is what gets returned to the user 896 */ 897 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 898 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 899 * print since caller wants ATA registers. Only occurs on 900 * SCSI ATA PASS_THROUGH commands when CK_COND=1 901 */ 902 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 903 ; 904 else if (!(req->cmd_flags & REQ_QUIET)) 905 scsi_print_sense(cmd); 906 result = 0; 907 /* BLOCK_PC may have set error */ 908 error = 0; 909 } 910 911 /* 912 * special case: failed zero length commands always need to 913 * drop down into the retry code. Otherwise, if we finished 914 * all bytes in the request we are done now. 915 */ 916 if (!(blk_rq_bytes(req) == 0 && error) && 917 !scsi_end_request(req, error, good_bytes, 0)) 918 return; 919 920 /* 921 * Kill remainder if no retrys. 922 */ 923 if (error && scsi_noretry_cmd(cmd)) { 924 if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) 925 BUG(); 926 return; 927 } 928 929 /* 930 * If there had been no error, but we have leftover bytes in the 931 * requeues just queue the command up again. 932 */ 933 if (result == 0) 934 goto requeue; 935 936 error = __scsi_error_from_host_byte(cmd, result); 937 938 if (host_byte(result) == DID_RESET) { 939 /* Third party bus reset or reset for error recovery 940 * reasons. Just retry the command and see what 941 * happens. 942 */ 943 action = ACTION_RETRY; 944 } else if (sense_valid && !sense_deferred) { 945 switch (sshdr.sense_key) { 946 case UNIT_ATTENTION: 947 if (cmd->device->removable) { 948 /* Detected disc change. Set a bit 949 * and quietly refuse further access. 950 */ 951 cmd->device->changed = 1; 952 action = ACTION_FAIL; 953 } else { 954 /* Must have been a power glitch, or a 955 * bus reset. Could not have been a 956 * media change, so we just retry the 957 * command and see what happens. 958 */ 959 action = ACTION_RETRY; 960 } 961 break; 962 case ILLEGAL_REQUEST: 963 /* If we had an ILLEGAL REQUEST returned, then 964 * we may have performed an unsupported 965 * command. The only thing this should be 966 * would be a ten byte read where only a six 967 * byte read was supported. Also, on a system 968 * where READ CAPACITY failed, we may have 969 * read past the end of the disk. 970 */ 971 if ((cmd->device->use_10_for_rw && 972 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 973 (cmd->cmnd[0] == READ_10 || 974 cmd->cmnd[0] == WRITE_10)) { 975 /* This will issue a new 6-byte command. */ 976 cmd->device->use_10_for_rw = 0; 977 action = ACTION_REPREP; 978 } else if (sshdr.asc == 0x10) /* DIX */ { 979 action = ACTION_FAIL; 980 error = -EILSEQ; 981 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 982 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 983 action = ACTION_FAIL; 984 error = -EREMOTEIO; 985 } else 986 action = ACTION_FAIL; 987 break; 988 case ABORTED_COMMAND: 989 action = ACTION_FAIL; 990 if (sshdr.asc == 0x10) /* DIF */ 991 error = -EILSEQ; 992 break; 993 case NOT_READY: 994 /* If the device is in the process of becoming 995 * ready, or has a temporary blockage, retry. 996 */ 997 if (sshdr.asc == 0x04) { 998 switch (sshdr.ascq) { 999 case 0x01: /* becoming ready */ 1000 case 0x04: /* format in progress */ 1001 case 0x05: /* rebuild in progress */ 1002 case 0x06: /* recalculation in progress */ 1003 case 0x07: /* operation in progress */ 1004 case 0x08: /* Long write in progress */ 1005 case 0x09: /* self test in progress */ 1006 case 0x14: /* space allocation in progress */ 1007 action = ACTION_DELAYED_RETRY; 1008 break; 1009 default: 1010 action = ACTION_FAIL; 1011 break; 1012 } 1013 } else 1014 action = ACTION_FAIL; 1015 break; 1016 case VOLUME_OVERFLOW: 1017 /* See SSC3rXX or current. */ 1018 action = ACTION_FAIL; 1019 break; 1020 default: 1021 action = ACTION_FAIL; 1022 break; 1023 } 1024 } else 1025 action = ACTION_FAIL; 1026 1027 if (action != ACTION_FAIL && 1028 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 1029 action = ACTION_FAIL; 1030 1031 switch (action) { 1032 case ACTION_FAIL: 1033 /* Give up and fail the remainder of the request */ 1034 if (!(req->cmd_flags & REQ_QUIET)) { 1035 static DEFINE_RATELIMIT_STATE(_rs, 1036 DEFAULT_RATELIMIT_INTERVAL, 1037 DEFAULT_RATELIMIT_BURST); 1038 1039 if (unlikely(scsi_logging_level)) 1040 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 1041 SCSI_LOG_MLCOMPLETE_BITS); 1042 1043 /* 1044 * if logging is enabled the failure will be printed 1045 * in scsi_log_completion(), so avoid duplicate messages 1046 */ 1047 if (!level && __ratelimit(&_rs)) { 1048 scsi_print_result(cmd, NULL, FAILED); 1049 if (driver_byte(result) & DRIVER_SENSE) 1050 scsi_print_sense(cmd); 1051 scsi_print_command(cmd); 1052 } 1053 } 1054 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) 1055 return; 1056 /*FALLTHRU*/ 1057 case ACTION_REPREP: 1058 requeue: 1059 /* Unprep the request and put it back at the head of the queue. 1060 * A new command will be prepared and issued. 1061 */ 1062 if (q->mq_ops) { 1063 cmd->request->cmd_flags &= ~REQ_DONTPREP; 1064 scsi_mq_uninit_cmd(cmd); 1065 scsi_mq_requeue_cmd(cmd); 1066 } else { 1067 scsi_release_buffers(cmd); 1068 scsi_requeue_command(q, cmd); 1069 } 1070 break; 1071 case ACTION_RETRY: 1072 /* Retry the same command immediately */ 1073 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1074 break; 1075 case ACTION_DELAYED_RETRY: 1076 /* Retry the same command after a delay */ 1077 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1078 break; 1079 } 1080} 1081 1082static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) 1083{ 1084 int count; 1085 1086 /* 1087 * If sg table allocation fails, requeue request later. 1088 */ 1089 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 1090 req->mq_ctx != NULL))) 1091 return BLKPREP_DEFER; 1092 1093 /* 1094 * Next, walk the list, and fill in the addresses and sizes of 1095 * each segment. 1096 */ 1097 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1098 BUG_ON(count > sdb->table.nents); 1099 sdb->table.nents = count; 1100 sdb->length = blk_rq_bytes(req); 1101 return BLKPREP_OK; 1102} 1103 1104/* 1105 * Function: scsi_init_io() 1106 * 1107 * Purpose: SCSI I/O initialize function. 1108 * 1109 * Arguments: cmd - Command descriptor we wish to initialize 1110 * 1111 * Returns: 0 on success 1112 * BLKPREP_DEFER if the failure is retryable 1113 * BLKPREP_KILL if the failure is fatal 1114 */ 1115int scsi_init_io(struct scsi_cmnd *cmd) 1116{ 1117 struct scsi_device *sdev = cmd->device; 1118 struct request *rq = cmd->request; 1119 bool is_mq = (rq->mq_ctx != NULL); 1120 int error; 1121 1122 BUG_ON(!rq->nr_phys_segments); 1123 1124 error = scsi_init_sgtable(rq, &cmd->sdb); 1125 if (error) 1126 goto err_exit; 1127 1128 if (blk_bidi_rq(rq)) { 1129 if (!rq->q->mq_ops) { 1130 struct scsi_data_buffer *bidi_sdb = 1131 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC); 1132 if (!bidi_sdb) { 1133 error = BLKPREP_DEFER; 1134 goto err_exit; 1135 } 1136 1137 rq->next_rq->special = bidi_sdb; 1138 } 1139 1140 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1141 if (error) 1142 goto err_exit; 1143 } 1144 1145 if (blk_integrity_rq(rq)) { 1146 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1147 int ivecs, count; 1148 1149 if (prot_sdb == NULL) { 1150 /* 1151 * This can happen if someone (e.g. multipath) 1152 * queues a command to a device on an adapter 1153 * that does not support DIX. 1154 */ 1155 WARN_ON_ONCE(1); 1156 error = BLKPREP_KILL; 1157 goto err_exit; 1158 } 1159 1160 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1161 1162 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { 1163 error = BLKPREP_DEFER; 1164 goto err_exit; 1165 } 1166 1167 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1168 prot_sdb->table.sgl); 1169 BUG_ON(unlikely(count > ivecs)); 1170 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1171 1172 cmd->prot_sdb = prot_sdb; 1173 cmd->prot_sdb->table.nents = count; 1174 } 1175 1176 return BLKPREP_OK; 1177err_exit: 1178 if (is_mq) { 1179 scsi_mq_free_sgtables(cmd); 1180 } else { 1181 scsi_release_buffers(cmd); 1182 cmd->request->special = NULL; 1183 scsi_put_command(cmd); 1184 put_device(&sdev->sdev_gendev); 1185 } 1186 return error; 1187} 1188EXPORT_SYMBOL(scsi_init_io); 1189 1190static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1191 struct request *req) 1192{ 1193 struct scsi_cmnd *cmd; 1194 1195 if (!req->special) { 1196 /* Bail if we can't get a reference to the device */ 1197 if (!get_device(&sdev->sdev_gendev)) 1198 return NULL; 1199 1200 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1201 if (unlikely(!cmd)) { 1202 put_device(&sdev->sdev_gendev); 1203 return NULL; 1204 } 1205 req->special = cmd; 1206 } else { 1207 cmd = req->special; 1208 } 1209 1210 /* pull a tag out of the request if we have one */ 1211 cmd->tag = req->tag; 1212 cmd->request = req; 1213 1214 cmd->cmnd = req->cmd; 1215 cmd->prot_op = SCSI_PROT_NORMAL; 1216 1217 return cmd; 1218} 1219 1220static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1221{ 1222 struct scsi_cmnd *cmd = req->special; 1223 1224 /* 1225 * BLOCK_PC requests may transfer data, in which case they must 1226 * a bio attached to them. Or they might contain a SCSI command 1227 * that does not transfer data, in which case they may optionally 1228 * submit a request without an attached bio. 1229 */ 1230 if (req->bio) { 1231 int ret = scsi_init_io(cmd); 1232 if (unlikely(ret)) 1233 return ret; 1234 } else { 1235 BUG_ON(blk_rq_bytes(req)); 1236 1237 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1238 } 1239 1240 cmd->cmd_len = req->cmd_len; 1241 cmd->transfersize = blk_rq_bytes(req); 1242 cmd->allowed = req->retries; 1243 return BLKPREP_OK; 1244} 1245 1246/* 1247 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1248 * that still need to be translated to SCSI CDBs from the ULD. 1249 */ 1250static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1251{ 1252 struct scsi_cmnd *cmd = req->special; 1253 1254 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1255 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1256 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1257 if (ret != BLKPREP_OK) 1258 return ret; 1259 } 1260 1261 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1262 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1263} 1264 1265static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1266{ 1267 struct scsi_cmnd *cmd = req->special; 1268 1269 if (!blk_rq_bytes(req)) 1270 cmd->sc_data_direction = DMA_NONE; 1271 else if (rq_data_dir(req) == WRITE) 1272 cmd->sc_data_direction = DMA_TO_DEVICE; 1273 else 1274 cmd->sc_data_direction = DMA_FROM_DEVICE; 1275 1276 switch (req->cmd_type) { 1277 case REQ_TYPE_FS: 1278 return scsi_setup_fs_cmnd(sdev, req); 1279 case REQ_TYPE_BLOCK_PC: 1280 return scsi_setup_blk_pc_cmnd(sdev, req); 1281 default: 1282 return BLKPREP_KILL; 1283 } 1284} 1285 1286static int 1287scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1288{ 1289 int ret = BLKPREP_OK; 1290 1291 /* 1292 * If the device is not in running state we will reject some 1293 * or all commands. 1294 */ 1295 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1296 switch (sdev->sdev_state) { 1297 case SDEV_OFFLINE: 1298 case SDEV_TRANSPORT_OFFLINE: 1299 /* 1300 * If the device is offline we refuse to process any 1301 * commands. The device must be brought online 1302 * before trying any recovery commands. 1303 */ 1304 sdev_printk(KERN_ERR, sdev, 1305 "rejecting I/O to offline device\n"); 1306 ret = BLKPREP_KILL; 1307 break; 1308 case SDEV_DEL: 1309 /* 1310 * If the device is fully deleted, we refuse to 1311 * process any commands as well. 1312 */ 1313 sdev_printk(KERN_ERR, sdev, 1314 "rejecting I/O to dead device\n"); 1315 ret = BLKPREP_KILL; 1316 break; 1317 case SDEV_BLOCK: 1318 case SDEV_CREATED_BLOCK: 1319 ret = BLKPREP_DEFER; 1320 break; 1321 case SDEV_QUIESCE: 1322 /* 1323 * If the devices is blocked we defer normal commands. 1324 */ 1325 if (!(req->cmd_flags & REQ_PREEMPT)) 1326 ret = BLKPREP_DEFER; 1327 break; 1328 default: 1329 /* 1330 * For any other not fully online state we only allow 1331 * special commands. In particular any user initiated 1332 * command is not allowed. 1333 */ 1334 if (!(req->cmd_flags & REQ_PREEMPT)) 1335 ret = BLKPREP_KILL; 1336 break; 1337 } 1338 } 1339 return ret; 1340} 1341 1342static int 1343scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1344{ 1345 struct scsi_device *sdev = q->queuedata; 1346 1347 switch (ret) { 1348 case BLKPREP_KILL: 1349 req->errors = DID_NO_CONNECT << 16; 1350 /* release the command and kill it */ 1351 if (req->special) { 1352 struct scsi_cmnd *cmd = req->special; 1353 scsi_release_buffers(cmd); 1354 scsi_put_command(cmd); 1355 put_device(&sdev->sdev_gendev); 1356 req->special = NULL; 1357 } 1358 break; 1359 case BLKPREP_DEFER: 1360 /* 1361 * If we defer, the blk_peek_request() returns NULL, but the 1362 * queue must be restarted, so we schedule a callback to happen 1363 * shortly. 1364 */ 1365 if (atomic_read(&sdev->device_busy) == 0) 1366 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1367 break; 1368 default: 1369 req->cmd_flags |= REQ_DONTPREP; 1370 } 1371 1372 return ret; 1373} 1374 1375static int scsi_prep_fn(struct request_queue *q, struct request *req) 1376{ 1377 struct scsi_device *sdev = q->queuedata; 1378 struct scsi_cmnd *cmd; 1379 int ret; 1380 1381 ret = scsi_prep_state_check(sdev, req); 1382 if (ret != BLKPREP_OK) 1383 goto out; 1384 1385 cmd = scsi_get_cmd_from_req(sdev, req); 1386 if (unlikely(!cmd)) { 1387 ret = BLKPREP_DEFER; 1388 goto out; 1389 } 1390 1391 ret = scsi_setup_cmnd(sdev, req); 1392out: 1393 return scsi_prep_return(q, req, ret); 1394} 1395 1396static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1397{ 1398 scsi_uninit_cmd(req->special); 1399} 1400 1401/* 1402 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1403 * return 0. 1404 * 1405 * Called with the queue_lock held. 1406 */ 1407static inline int scsi_dev_queue_ready(struct request_queue *q, 1408 struct scsi_device *sdev) 1409{ 1410 unsigned int busy; 1411 1412 busy = atomic_inc_return(&sdev->device_busy) - 1; 1413 if (atomic_read(&sdev->device_blocked)) { 1414 if (busy) 1415 goto out_dec; 1416 1417 /* 1418 * unblock after device_blocked iterates to zero 1419 */ 1420 if (atomic_dec_return(&sdev->device_blocked) > 0) { 1421 /* 1422 * For the MQ case we take care of this in the caller. 1423 */ 1424 if (!q->mq_ops) 1425 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1426 goto out_dec; 1427 } 1428 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1429 "unblocking device at zero depth\n")); 1430 } 1431 1432 if (busy >= sdev->queue_depth) 1433 goto out_dec; 1434 1435 return 1; 1436out_dec: 1437 atomic_dec(&sdev->device_busy); 1438 return 0; 1439} 1440 1441/* 1442 * scsi_target_queue_ready: checks if there we can send commands to target 1443 * @sdev: scsi device on starget to check. 1444 */ 1445static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1446 struct scsi_device *sdev) 1447{ 1448 struct scsi_target *starget = scsi_target(sdev); 1449 unsigned int busy; 1450 1451 if (starget->single_lun) { 1452 spin_lock_irq(shost->host_lock); 1453 if (starget->starget_sdev_user && 1454 starget->starget_sdev_user != sdev) { 1455 spin_unlock_irq(shost->host_lock); 1456 return 0; 1457 } 1458 starget->starget_sdev_user = sdev; 1459 spin_unlock_irq(shost->host_lock); 1460 } 1461 1462 if (starget->can_queue <= 0) 1463 return 1; 1464 1465 busy = atomic_inc_return(&starget->target_busy) - 1; 1466 if (atomic_read(&starget->target_blocked) > 0) { 1467 if (busy) 1468 goto starved; 1469 1470 /* 1471 * unblock after target_blocked iterates to zero 1472 */ 1473 if (atomic_dec_return(&starget->target_blocked) > 0) 1474 goto out_dec; 1475 1476 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1477 "unblocking target at zero depth\n")); 1478 } 1479 1480 if (busy >= starget->can_queue) 1481 goto starved; 1482 1483 return 1; 1484 1485starved: 1486 spin_lock_irq(shost->host_lock); 1487 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1488 spin_unlock_irq(shost->host_lock); 1489out_dec: 1490 if (starget->can_queue > 0) 1491 atomic_dec(&starget->target_busy); 1492 return 0; 1493} 1494 1495/* 1496 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1497 * return 0. We must end up running the queue again whenever 0 is 1498 * returned, else IO can hang. 1499 */ 1500static inline int scsi_host_queue_ready(struct request_queue *q, 1501 struct Scsi_Host *shost, 1502 struct scsi_device *sdev) 1503{ 1504 unsigned int busy; 1505 1506 if (scsi_host_in_recovery(shost)) 1507 return 0; 1508 1509 busy = atomic_inc_return(&shost->host_busy) - 1; 1510 if (atomic_read(&shost->host_blocked) > 0) { 1511 if (busy) 1512 goto starved; 1513 1514 /* 1515 * unblock after host_blocked iterates to zero 1516 */ 1517 if (atomic_dec_return(&shost->host_blocked) > 0) 1518 goto out_dec; 1519 1520 SCSI_LOG_MLQUEUE(3, 1521 shost_printk(KERN_INFO, shost, 1522 "unblocking host at zero depth\n")); 1523 } 1524 1525 if (shost->can_queue > 0 && busy >= shost->can_queue) 1526 goto starved; 1527 if (shost->host_self_blocked) 1528 goto starved; 1529 1530 /* We're OK to process the command, so we can't be starved */ 1531 if (!list_empty(&sdev->starved_entry)) { 1532 spin_lock_irq(shost->host_lock); 1533 if (!list_empty(&sdev->starved_entry)) 1534 list_del_init(&sdev->starved_entry); 1535 spin_unlock_irq(shost->host_lock); 1536 } 1537 1538 return 1; 1539 1540starved: 1541 spin_lock_irq(shost->host_lock); 1542 if (list_empty(&sdev->starved_entry)) 1543 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1544 spin_unlock_irq(shost->host_lock); 1545out_dec: 1546 atomic_dec(&shost->host_busy); 1547 return 0; 1548} 1549 1550/* 1551 * Busy state exporting function for request stacking drivers. 1552 * 1553 * For efficiency, no lock is taken to check the busy state of 1554 * shost/starget/sdev, since the returned value is not guaranteed and 1555 * may be changed after request stacking drivers call the function, 1556 * regardless of taking lock or not. 1557 * 1558 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1559 * needs to return 'not busy'. Otherwise, request stacking drivers 1560 * may hold requests forever. 1561 */ 1562static int scsi_lld_busy(struct request_queue *q) 1563{ 1564 struct scsi_device *sdev = q->queuedata; 1565 struct Scsi_Host *shost; 1566 1567 if (blk_queue_dying(q)) 1568 return 0; 1569 1570 shost = sdev->host; 1571 1572 /* 1573 * Ignore host/starget busy state. 1574 * Since block layer does not have a concept of fairness across 1575 * multiple queues, congestion of host/starget needs to be handled 1576 * in SCSI layer. 1577 */ 1578 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1579 return 1; 1580 1581 return 0; 1582} 1583 1584/* 1585 * Kill a request for a dead device 1586 */ 1587static void scsi_kill_request(struct request *req, struct request_queue *q) 1588{ 1589 struct scsi_cmnd *cmd = req->special; 1590 struct scsi_device *sdev; 1591 struct scsi_target *starget; 1592 struct Scsi_Host *shost; 1593 1594 blk_start_request(req); 1595 1596 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1597 1598 sdev = cmd->device; 1599 starget = scsi_target(sdev); 1600 shost = sdev->host; 1601 scsi_init_cmd_errh(cmd); 1602 cmd->result = DID_NO_CONNECT << 16; 1603 atomic_inc(&cmd->device->iorequest_cnt); 1604 1605 /* 1606 * SCSI request completion path will do scsi_device_unbusy(), 1607 * bump busy counts. To bump the counters, we need to dance 1608 * with the locks as normal issue path does. 1609 */ 1610 atomic_inc(&sdev->device_busy); 1611 atomic_inc(&shost->host_busy); 1612 if (starget->can_queue > 0) 1613 atomic_inc(&starget->target_busy); 1614 1615 blk_complete_request(req); 1616} 1617 1618static void scsi_softirq_done(struct request *rq) 1619{ 1620 struct scsi_cmnd *cmd = rq->special; 1621 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1622 int disposition; 1623 1624 INIT_LIST_HEAD(&cmd->eh_entry); 1625 1626 atomic_inc(&cmd->device->iodone_cnt); 1627 if (cmd->result) 1628 atomic_inc(&cmd->device->ioerr_cnt); 1629 1630 disposition = scsi_decide_disposition(cmd); 1631 if (disposition != SUCCESS && 1632 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1633 sdev_printk(KERN_ERR, cmd->device, 1634 "timing out command, waited %lus\n", 1635 wait_for/HZ); 1636 disposition = SUCCESS; 1637 } 1638 1639 scsi_log_completion(cmd, disposition); 1640 1641 switch (disposition) { 1642 case SUCCESS: 1643 scsi_finish_command(cmd); 1644 break; 1645 case NEEDS_RETRY: 1646 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1647 break; 1648 case ADD_TO_MLQUEUE: 1649 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1650 break; 1651 default: 1652 if (!scsi_eh_scmd_add(cmd, 0)) 1653 scsi_finish_command(cmd); 1654 } 1655} 1656 1657/** 1658 * scsi_dispatch_command - Dispatch a command to the low-level driver. 1659 * @cmd: command block we are dispatching. 1660 * 1661 * Return: nonzero return request was rejected and device's queue needs to be 1662 * plugged. 1663 */ 1664static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 1665{ 1666 struct Scsi_Host *host = cmd->device->host; 1667 int rtn = 0; 1668 1669 atomic_inc(&cmd->device->iorequest_cnt); 1670 1671 /* check if the device is still usable */ 1672 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 1673 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 1674 * returns an immediate error upwards, and signals 1675 * that the device is no longer present */ 1676 cmd->result = DID_NO_CONNECT << 16; 1677 goto done; 1678 } 1679 1680 /* Check to see if the scsi lld made this device blocked. */ 1681 if (unlikely(scsi_device_blocked(cmd->device))) { 1682 /* 1683 * in blocked state, the command is just put back on 1684 * the device queue. The suspend state has already 1685 * blocked the queue so future requests should not 1686 * occur until the device transitions out of the 1687 * suspend state. 1688 */ 1689 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1690 "queuecommand : device blocked\n")); 1691 return SCSI_MLQUEUE_DEVICE_BUSY; 1692 } 1693 1694 /* Store the LUN value in cmnd, if needed. */ 1695 if (cmd->device->lun_in_cdb) 1696 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 1697 (cmd->device->lun << 5 & 0xe0); 1698 1699 scsi_log_send(cmd); 1700 1701 /* 1702 * Before we queue this command, check if the command 1703 * length exceeds what the host adapter can handle. 1704 */ 1705 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 1706 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1707 "queuecommand : command too long. " 1708 "cdb_size=%d host->max_cmd_len=%d\n", 1709 cmd->cmd_len, cmd->device->host->max_cmd_len)); 1710 cmd->result = (DID_ABORT << 16); 1711 goto done; 1712 } 1713 1714 if (unlikely(host->shost_state == SHOST_DEL)) { 1715 cmd->result = (DID_NO_CONNECT << 16); 1716 goto done; 1717 1718 } 1719 1720 trace_scsi_dispatch_cmd_start(cmd); 1721 rtn = host->hostt->queuecommand(host, cmd); 1722 if (rtn) { 1723 trace_scsi_dispatch_cmd_error(cmd, rtn); 1724 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 1725 rtn != SCSI_MLQUEUE_TARGET_BUSY) 1726 rtn = SCSI_MLQUEUE_HOST_BUSY; 1727 1728 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 1729 "queuecommand : request rejected\n")); 1730 } 1731 1732 return rtn; 1733 done: 1734 cmd->scsi_done(cmd); 1735 return 0; 1736} 1737 1738/** 1739 * scsi_done - Invoke completion on finished SCSI command. 1740 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1741 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1742 * 1743 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1744 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1745 * calls blk_complete_request() for further processing. 1746 * 1747 * This function is interrupt context safe. 1748 */ 1749static void scsi_done(struct scsi_cmnd *cmd) 1750{ 1751 trace_scsi_dispatch_cmd_done(cmd); 1752 blk_complete_request(cmd->request); 1753} 1754 1755/* 1756 * Function: scsi_request_fn() 1757 * 1758 * Purpose: Main strategy routine for SCSI. 1759 * 1760 * Arguments: q - Pointer to actual queue. 1761 * 1762 * Returns: Nothing 1763 * 1764 * Lock status: IO request lock assumed to be held when called. 1765 */ 1766static void scsi_request_fn(struct request_queue *q) 1767 __releases(q->queue_lock) 1768 __acquires(q->queue_lock) 1769{ 1770 struct scsi_device *sdev = q->queuedata; 1771 struct Scsi_Host *shost; 1772 struct scsi_cmnd *cmd; 1773 struct request *req; 1774 1775 /* 1776 * To start with, we keep looping until the queue is empty, or until 1777 * the host is no longer able to accept any more requests. 1778 */ 1779 shost = sdev->host; 1780 for (;;) { 1781 int rtn; 1782 /* 1783 * get next queueable request. We do this early to make sure 1784 * that the request is fully prepared even if we cannot 1785 * accept it. 1786 */ 1787 req = blk_peek_request(q); 1788 if (!req) 1789 break; 1790 1791 if (unlikely(!scsi_device_online(sdev))) { 1792 sdev_printk(KERN_ERR, sdev, 1793 "rejecting I/O to offline device\n"); 1794 scsi_kill_request(req, q); 1795 continue; 1796 } 1797 1798 if (!scsi_dev_queue_ready(q, sdev)) 1799 break; 1800 1801 /* 1802 * Remove the request from the request list. 1803 */ 1804 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1805 blk_start_request(req); 1806 1807 spin_unlock_irq(q->queue_lock); 1808 cmd = req->special; 1809 if (unlikely(cmd == NULL)) { 1810 printk(KERN_CRIT "impossible request in %s.\n" 1811 "please mail a stack trace to " 1812 "linux-scsi@vger.kernel.org\n", 1813 __func__); 1814 blk_dump_rq_flags(req, "foo"); 1815 BUG(); 1816 } 1817 1818 /* 1819 * We hit this when the driver is using a host wide 1820 * tag map. For device level tag maps the queue_depth check 1821 * in the device ready fn would prevent us from trying 1822 * to allocate a tag. Since the map is a shared host resource 1823 * we add the dev to the starved list so it eventually gets 1824 * a run when a tag is freed. 1825 */ 1826 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { 1827 spin_lock_irq(shost->host_lock); 1828 if (list_empty(&sdev->starved_entry)) 1829 list_add_tail(&sdev->starved_entry, 1830 &shost->starved_list); 1831 spin_unlock_irq(shost->host_lock); 1832 goto not_ready; 1833 } 1834 1835 if (!scsi_target_queue_ready(shost, sdev)) 1836 goto not_ready; 1837 1838 if (!scsi_host_queue_ready(q, shost, sdev)) 1839 goto host_not_ready; 1840 1841 if (sdev->simple_tags) 1842 cmd->flags |= SCMD_TAGGED; 1843 else 1844 cmd->flags &= ~SCMD_TAGGED; 1845 1846 /* 1847 * Finally, initialize any error handling parameters, and set up 1848 * the timers for timeouts. 1849 */ 1850 scsi_init_cmd_errh(cmd); 1851 1852 /* 1853 * Dispatch the command to the low-level driver. 1854 */ 1855 cmd->scsi_done = scsi_done; 1856 rtn = scsi_dispatch_cmd(cmd); 1857 if (rtn) { 1858 scsi_queue_insert(cmd, rtn); 1859 spin_lock_irq(q->queue_lock); 1860 goto out_delay; 1861 } 1862 spin_lock_irq(q->queue_lock); 1863 } 1864 1865 return; 1866 1867 host_not_ready: 1868 if (scsi_target(sdev)->can_queue > 0) 1869 atomic_dec(&scsi_target(sdev)->target_busy); 1870 not_ready: 1871 /* 1872 * lock q, handle tag, requeue req, and decrement device_busy. We 1873 * must return with queue_lock held. 1874 * 1875 * Decrementing device_busy without checking it is OK, as all such 1876 * cases (host limits or settings) should run the queue at some 1877 * later time. 1878 */ 1879 spin_lock_irq(q->queue_lock); 1880 blk_requeue_request(q, req); 1881 atomic_dec(&sdev->device_busy); 1882out_delay: 1883 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1884 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1885} 1886 1887static inline int prep_to_mq(int ret) 1888{ 1889 switch (ret) { 1890 case BLKPREP_OK: 1891 return 0; 1892 case BLKPREP_DEFER: 1893 return BLK_MQ_RQ_QUEUE_BUSY; 1894 default: 1895 return BLK_MQ_RQ_QUEUE_ERROR; 1896 } 1897} 1898 1899static int scsi_mq_prep_fn(struct request *req) 1900{ 1901 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1902 struct scsi_device *sdev = req->q->queuedata; 1903 struct Scsi_Host *shost = sdev->host; 1904 unsigned char *sense_buf = cmd->sense_buffer; 1905 struct scatterlist *sg; 1906 1907 memset(cmd, 0, sizeof(struct scsi_cmnd)); 1908 1909 req->special = cmd; 1910 1911 cmd->request = req; 1912 cmd->device = sdev; 1913 cmd->sense_buffer = sense_buf; 1914 1915 cmd->tag = req->tag; 1916 1917 cmd->cmnd = req->cmd; 1918 cmd->prot_op = SCSI_PROT_NORMAL; 1919 1920 INIT_LIST_HEAD(&cmd->list); 1921 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1922 cmd->jiffies_at_alloc = jiffies; 1923 1924 if (shost->use_cmd_list) { 1925 spin_lock_irq(&sdev->list_lock); 1926 list_add_tail(&cmd->list, &sdev->cmd_list); 1927 spin_unlock_irq(&sdev->list_lock); 1928 } 1929 1930 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1931 cmd->sdb.table.sgl = sg; 1932 1933 if (scsi_host_get_prot(shost)) { 1934 cmd->prot_sdb = (void *)sg + 1935 min_t(unsigned int, 1936 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * 1937 sizeof(struct scatterlist); 1938 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1939 1940 cmd->prot_sdb->table.sgl = 1941 (struct scatterlist *)(cmd->prot_sdb + 1); 1942 } 1943 1944 if (blk_bidi_rq(req)) { 1945 struct request *next_rq = req->next_rq; 1946 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); 1947 1948 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); 1949 bidi_sdb->table.sgl = 1950 (struct scatterlist *)(bidi_sdb + 1); 1951 1952 next_rq->special = bidi_sdb; 1953 } 1954 1955 blk_mq_start_request(req); 1956 1957 return scsi_setup_cmnd(sdev, req); 1958} 1959 1960static void scsi_mq_done(struct scsi_cmnd *cmd) 1961{ 1962 trace_scsi_dispatch_cmd_done(cmd); 1963 blk_mq_complete_request(cmd->request); 1964} 1965 1966static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1967 const struct blk_mq_queue_data *bd) 1968{ 1969 struct request *req = bd->rq; 1970 struct request_queue *q = req->q; 1971 struct scsi_device *sdev = q->queuedata; 1972 struct Scsi_Host *shost = sdev->host; 1973 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1974 int ret; 1975 int reason; 1976 1977 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1978 if (ret) 1979 goto out; 1980 1981 ret = BLK_MQ_RQ_QUEUE_BUSY; 1982 if (!get_device(&sdev->sdev_gendev)) 1983 goto out; 1984 1985 if (!scsi_dev_queue_ready(q, sdev)) 1986 goto out_put_device; 1987 if (!scsi_target_queue_ready(shost, sdev)) 1988 goto out_dec_device_busy; 1989 if (!scsi_host_queue_ready(q, shost, sdev)) 1990 goto out_dec_target_busy; 1991 1992 1993 if (!(req->cmd_flags & REQ_DONTPREP)) { 1994 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1995 if (ret) 1996 goto out_dec_host_busy; 1997 req->cmd_flags |= REQ_DONTPREP; 1998 } else { 1999 blk_mq_start_request(req); 2000 } 2001 2002 if (sdev->simple_tags) 2003 cmd->flags |= SCMD_TAGGED; 2004 else 2005 cmd->flags &= ~SCMD_TAGGED; 2006 2007 scsi_init_cmd_errh(cmd); 2008 cmd->scsi_done = scsi_mq_done; 2009 2010 reason = scsi_dispatch_cmd(cmd); 2011 if (reason) { 2012 scsi_set_blocked(cmd, reason); 2013 ret = BLK_MQ_RQ_QUEUE_BUSY; 2014 goto out_dec_host_busy; 2015 } 2016 2017 return BLK_MQ_RQ_QUEUE_OK; 2018 2019out_dec_host_busy: 2020 atomic_dec(&shost->host_busy); 2021out_dec_target_busy: 2022 if (scsi_target(sdev)->can_queue > 0) 2023 atomic_dec(&scsi_target(sdev)->target_busy); 2024out_dec_device_busy: 2025 atomic_dec(&sdev->device_busy); 2026out_put_device: 2027 put_device(&sdev->sdev_gendev); 2028out: 2029 switch (ret) { 2030 case BLK_MQ_RQ_QUEUE_BUSY: 2031 blk_mq_stop_hw_queue(hctx); 2032 if (atomic_read(&sdev->device_busy) == 0 && 2033 !scsi_device_blocked(sdev)) 2034 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 2035 break; 2036 case BLK_MQ_RQ_QUEUE_ERROR: 2037 /* 2038 * Make sure to release all allocated ressources when 2039 * we hit an error, as we will never see this command 2040 * again. 2041 */ 2042 if (req->cmd_flags & REQ_DONTPREP) 2043 scsi_mq_uninit_cmd(cmd); 2044 break; 2045 default: 2046 break; 2047 } 2048 return ret; 2049} 2050 2051static enum blk_eh_timer_return scsi_timeout(struct request *req, 2052 bool reserved) 2053{ 2054 if (reserved) 2055 return BLK_EH_RESET_TIMER; 2056 return scsi_times_out(req); 2057} 2058 2059static int scsi_init_request(void *data, struct request *rq, 2060 unsigned int hctx_idx, unsigned int request_idx, 2061 unsigned int numa_node) 2062{ 2063 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2064 2065 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, 2066 numa_node); 2067 if (!cmd->sense_buffer) 2068 return -ENOMEM; 2069 return 0; 2070} 2071 2072static void scsi_exit_request(void *data, struct request *rq, 2073 unsigned int hctx_idx, unsigned int request_idx) 2074{ 2075 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2076 2077 kfree(cmd->sense_buffer); 2078} 2079 2080static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 2081{ 2082 struct device *host_dev; 2083 u64 bounce_limit = 0xffffffff; 2084 2085 if (shost->unchecked_isa_dma) 2086 return BLK_BOUNCE_ISA; 2087 /* 2088 * Platforms with virtual-DMA translation 2089 * hardware have no practical limit. 2090 */ 2091 if (!PCI_DMA_BUS_IS_PHYS) 2092 return BLK_BOUNCE_ANY; 2093 2094 host_dev = scsi_get_device(shost); 2095 if (host_dev && host_dev->dma_mask) 2096 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 2097 2098 return bounce_limit; 2099} 2100 2101static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 2102{ 2103 struct device *dev = shost->dma_dev; 2104 2105 /* 2106 * this limit is imposed by hardware restrictions 2107 */ 2108 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 2109 SCSI_MAX_SG_CHAIN_SEGMENTS)); 2110 2111 if (scsi_host_prot_dma(shost)) { 2112 shost->sg_prot_tablesize = 2113 min_not_zero(shost->sg_prot_tablesize, 2114 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 2115 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 2116 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 2117 } 2118 2119 blk_queue_max_hw_sectors(q, shost->max_sectors); 2120 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 2121 blk_queue_segment_boundary(q, shost->dma_boundary); 2122 dma_set_seg_boundary(dev, shost->dma_boundary); 2123 2124 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 2125 2126 if (!shost->use_clustering) 2127 q->limits.cluster = 0; 2128 2129 /* 2130 * set a reasonable default alignment on word boundaries: the 2131 * host and device may alter it using 2132 * blk_queue_update_dma_alignment() later. 2133 */ 2134 blk_queue_dma_alignment(q, 0x03); 2135} 2136 2137struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 2138 request_fn_proc *request_fn) 2139{ 2140 struct request_queue *q; 2141 2142 q = blk_init_queue(request_fn, NULL); 2143 if (!q) 2144 return NULL; 2145 __scsi_init_queue(shost, q); 2146 return q; 2147} 2148EXPORT_SYMBOL(__scsi_alloc_queue); 2149 2150struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 2151{ 2152 struct request_queue *q; 2153 2154 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 2155 if (!q) 2156 return NULL; 2157 2158 blk_queue_prep_rq(q, scsi_prep_fn); 2159 blk_queue_unprep_rq(q, scsi_unprep_fn); 2160 blk_queue_softirq_done(q, scsi_softirq_done); 2161 blk_queue_rq_timed_out(q, scsi_times_out); 2162 blk_queue_lld_busy(q, scsi_lld_busy); 2163 return q; 2164} 2165 2166static struct blk_mq_ops scsi_mq_ops = { 2167 .map_queue = blk_mq_map_queue, 2168 .queue_rq = scsi_queue_rq, 2169 .complete = scsi_softirq_done, 2170 .timeout = scsi_timeout, 2171 .init_request = scsi_init_request, 2172 .exit_request = scsi_exit_request, 2173}; 2174 2175struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) 2176{ 2177 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); 2178 if (IS_ERR(sdev->request_queue)) 2179 return NULL; 2180 2181 sdev->request_queue->queuedata = sdev; 2182 __scsi_init_queue(sdev->host, sdev->request_queue); 2183 return sdev->request_queue; 2184} 2185 2186int scsi_mq_setup_tags(struct Scsi_Host *shost) 2187{ 2188 unsigned int cmd_size, sgl_size, tbl_size; 2189 2190 tbl_size = shost->sg_tablesize; 2191 if (tbl_size > SCSI_MAX_SG_SEGMENTS) 2192 tbl_size = SCSI_MAX_SG_SEGMENTS; 2193 sgl_size = tbl_size * sizeof(struct scatterlist); 2194 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 2195 if (scsi_host_get_prot(shost)) 2196 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; 2197 2198 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 2199 shost->tag_set.ops = &scsi_mq_ops; 2200 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; 2201 shost->tag_set.queue_depth = shost->can_queue; 2202 shost->tag_set.cmd_size = cmd_size; 2203 shost->tag_set.numa_node = NUMA_NO_NODE; 2204 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2205 shost->tag_set.flags |= 2206 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); 2207 shost->tag_set.driver_data = shost; 2208 2209 return blk_mq_alloc_tag_set(&shost->tag_set); 2210} 2211 2212void scsi_mq_destroy_tags(struct Scsi_Host *shost) 2213{ 2214 blk_mq_free_tag_set(&shost->tag_set); 2215} 2216 2217/* 2218 * Function: scsi_block_requests() 2219 * 2220 * Purpose: Utility function used by low-level drivers to prevent further 2221 * commands from being queued to the device. 2222 * 2223 * Arguments: shost - Host in question 2224 * 2225 * Returns: Nothing 2226 * 2227 * Lock status: No locks are assumed held. 2228 * 2229 * Notes: There is no timer nor any other means by which the requests 2230 * get unblocked other than the low-level driver calling 2231 * scsi_unblock_requests(). 2232 */ 2233void scsi_block_requests(struct Scsi_Host *shost) 2234{ 2235 shost->host_self_blocked = 1; 2236} 2237EXPORT_SYMBOL(scsi_block_requests); 2238 2239/* 2240 * Function: scsi_unblock_requests() 2241 * 2242 * Purpose: Utility function used by low-level drivers to allow further 2243 * commands from being queued to the device. 2244 * 2245 * Arguments: shost - Host in question 2246 * 2247 * Returns: Nothing 2248 * 2249 * Lock status: No locks are assumed held. 2250 * 2251 * Notes: There is no timer nor any other means by which the requests 2252 * get unblocked other than the low-level driver calling 2253 * scsi_unblock_requests(). 2254 * 2255 * This is done as an API function so that changes to the 2256 * internals of the scsi mid-layer won't require wholesale 2257 * changes to drivers that use this feature. 2258 */ 2259void scsi_unblock_requests(struct Scsi_Host *shost) 2260{ 2261 shost->host_self_blocked = 0; 2262 scsi_run_host_queues(shost); 2263} 2264EXPORT_SYMBOL(scsi_unblock_requests); 2265 2266int __init scsi_init_queue(void) 2267{ 2268 int i; 2269 2270 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 2271 sizeof(struct scsi_data_buffer), 2272 0, 0, NULL); 2273 if (!scsi_sdb_cache) { 2274 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 2275 return -ENOMEM; 2276 } 2277 2278 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2279 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2280 int size = sgp->size * sizeof(struct scatterlist); 2281 2282 sgp->slab = kmem_cache_create(sgp->name, size, 0, 2283 SLAB_HWCACHE_ALIGN, NULL); 2284 if (!sgp->slab) { 2285 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 2286 sgp->name); 2287 goto cleanup_sdb; 2288 } 2289 2290 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 2291 sgp->slab); 2292 if (!sgp->pool) { 2293 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 2294 sgp->name); 2295 goto cleanup_sdb; 2296 } 2297 } 2298 2299 return 0; 2300 2301cleanup_sdb: 2302 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2303 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2304 if (sgp->pool) 2305 mempool_destroy(sgp->pool); 2306 if (sgp->slab) 2307 kmem_cache_destroy(sgp->slab); 2308 } 2309 kmem_cache_destroy(scsi_sdb_cache); 2310 2311 return -ENOMEM; 2312} 2313 2314void scsi_exit_queue(void) 2315{ 2316 int i; 2317 2318 kmem_cache_destroy(scsi_sdb_cache); 2319 2320 for (i = 0; i < SG_MEMPOOL_NR; i++) { 2321 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 2322 mempool_destroy(sgp->pool); 2323 kmem_cache_destroy(sgp->slab); 2324 } 2325} 2326 2327/** 2328 * scsi_mode_select - issue a mode select 2329 * @sdev: SCSI device to be queried 2330 * @pf: Page format bit (1 == standard, 0 == vendor specific) 2331 * @sp: Save page bit (0 == don't save, 1 == save) 2332 * @modepage: mode page being requested 2333 * @buffer: request buffer (may not be smaller than eight bytes) 2334 * @len: length of request buffer. 2335 * @timeout: command timeout 2336 * @retries: number of retries before failing 2337 * @data: returns a structure abstracting the mode header data 2338 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2339 * must be SCSI_SENSE_BUFFERSIZE big. 2340 * 2341 * Returns zero if successful; negative error number or scsi 2342 * status on error 2343 * 2344 */ 2345int 2346scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 2347 unsigned char *buffer, int len, int timeout, int retries, 2348 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2349{ 2350 unsigned char cmd[10]; 2351 unsigned char *real_buffer; 2352 int ret; 2353 2354 memset(cmd, 0, sizeof(cmd)); 2355 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2356 2357 if (sdev->use_10_for_ms) { 2358 if (len > 65535) 2359 return -EINVAL; 2360 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2361 if (!real_buffer) 2362 return -ENOMEM; 2363 memcpy(real_buffer + 8, buffer, len); 2364 len += 8; 2365 real_buffer[0] = 0; 2366 real_buffer[1] = 0; 2367 real_buffer[2] = data->medium_type; 2368 real_buffer[3] = data->device_specific; 2369 real_buffer[4] = data->longlba ? 0x01 : 0; 2370 real_buffer[5] = 0; 2371 real_buffer[6] = data->block_descriptor_length >> 8; 2372 real_buffer[7] = data->block_descriptor_length; 2373 2374 cmd[0] = MODE_SELECT_10; 2375 cmd[7] = len >> 8; 2376 cmd[8] = len; 2377 } else { 2378 if (len > 255 || data->block_descriptor_length > 255 || 2379 data->longlba) 2380 return -EINVAL; 2381 2382 real_buffer = kmalloc(4 + len, GFP_KERNEL); 2383 if (!real_buffer) 2384 return -ENOMEM; 2385 memcpy(real_buffer + 4, buffer, len); 2386 len += 4; 2387 real_buffer[0] = 0; 2388 real_buffer[1] = data->medium_type; 2389 real_buffer[2] = data->device_specific; 2390 real_buffer[3] = data->block_descriptor_length; 2391 2392 2393 cmd[0] = MODE_SELECT; 2394 cmd[4] = len; 2395 } 2396 2397 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 2398 sshdr, timeout, retries, NULL); 2399 kfree(real_buffer); 2400 return ret; 2401} 2402EXPORT_SYMBOL_GPL(scsi_mode_select); 2403 2404/** 2405 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2406 * @sdev: SCSI device to be queried 2407 * @dbd: set if mode sense will allow block descriptors to be returned 2408 * @modepage: mode page being requested 2409 * @buffer: request buffer (may not be smaller than eight bytes) 2410 * @len: length of request buffer. 2411 * @timeout: command timeout 2412 * @retries: number of retries before failing 2413 * @data: returns a structure abstracting the mode header data 2414 * @sshdr: place to put sense data (or NULL if no sense to be collected). 2415 * must be SCSI_SENSE_BUFFERSIZE big. 2416 * 2417 * Returns zero if unsuccessful, or the header offset (either 4 2418 * or 8 depending on whether a six or ten byte command was 2419 * issued) if successful. 2420 */ 2421int 2422scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 2423 unsigned char *buffer, int len, int timeout, int retries, 2424 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 2425{ 2426 unsigned char cmd[12]; 2427 int use_10_for_ms; 2428 int header_length; 2429 int result; 2430 struct scsi_sense_hdr my_sshdr; 2431 2432 memset(data, 0, sizeof(*data)); 2433 memset(&cmd[0], 0, 12); 2434 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 2435 cmd[2] = modepage; 2436 2437 /* caller might not be interested in sense, but we need it */ 2438 if (!sshdr) 2439 sshdr = &my_sshdr; 2440 2441 retry: 2442 use_10_for_ms = sdev->use_10_for_ms; 2443 2444 if (use_10_for_ms) { 2445 if (len < 8) 2446 len = 8; 2447 2448 cmd[0] = MODE_SENSE_10; 2449 cmd[8] = len; 2450 header_length = 8; 2451 } else { 2452 if (len < 4) 2453 len = 4; 2454 2455 cmd[0] = MODE_SENSE; 2456 cmd[4] = len; 2457 header_length = 4; 2458 } 2459 2460 memset(buffer, 0, len); 2461 2462 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 2463 sshdr, timeout, retries, NULL); 2464 2465 /* This code looks awful: what it's doing is making sure an 2466 * ILLEGAL REQUEST sense return identifies the actual command 2467 * byte as the problem. MODE_SENSE commands can return 2468 * ILLEGAL REQUEST if the code page isn't supported */ 2469 2470 if (use_10_for_ms && !scsi_status_is_good(result) && 2471 (driver_byte(result) & DRIVER_SENSE)) { 2472 if (scsi_sense_valid(sshdr)) { 2473 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2474 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2475 /* 2476 * Invalid command operation code 2477 */ 2478 sdev->use_10_for_ms = 0; 2479 goto retry; 2480 } 2481 } 2482 } 2483 2484 if(scsi_status_is_good(result)) { 2485 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 2486 (modepage == 6 || modepage == 8))) { 2487 /* Initio breakage? */ 2488 header_length = 0; 2489 data->length = 13; 2490 data->medium_type = 0; 2491 data->device_specific = 0; 2492 data->longlba = 0; 2493 data->block_descriptor_length = 0; 2494 } else if(use_10_for_ms) { 2495 data->length = buffer[0]*256 + buffer[1] + 2; 2496 data->medium_type = buffer[2]; 2497 data->device_specific = buffer[3]; 2498 data->longlba = buffer[4] & 0x01; 2499 data->block_descriptor_length = buffer[6]*256 2500 + buffer[7]; 2501 } else { 2502 data->length = buffer[0] + 1; 2503 data->medium_type = buffer[1]; 2504 data->device_specific = buffer[2]; 2505 data->block_descriptor_length = buffer[3]; 2506 } 2507 data->header_length = header_length; 2508 } 2509 2510 return result; 2511} 2512EXPORT_SYMBOL(scsi_mode_sense); 2513 2514/** 2515 * scsi_test_unit_ready - test if unit is ready 2516 * @sdev: scsi device to change the state of. 2517 * @timeout: command timeout 2518 * @retries: number of retries before failing 2519 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2520 * returning sense. Make sure that this is cleared before passing 2521 * in. 2522 * 2523 * Returns zero if unsuccessful or an error if TUR failed. For 2524 * removable media, UNIT_ATTENTION sets ->changed flag. 2525 **/ 2526int 2527scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2528 struct scsi_sense_hdr *sshdr_external) 2529{ 2530 char cmd[] = { 2531 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2532 }; 2533 struct scsi_sense_hdr *sshdr; 2534 int result; 2535 2536 if (!sshdr_external) 2537 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2538 else 2539 sshdr = sshdr_external; 2540 2541 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2542 do { 2543 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2544 timeout, retries, NULL); 2545 if (sdev->removable && scsi_sense_valid(sshdr) && 2546 sshdr->sense_key == UNIT_ATTENTION) 2547 sdev->changed = 1; 2548 } while (scsi_sense_valid(sshdr) && 2549 sshdr->sense_key == UNIT_ATTENTION && --retries); 2550 2551 if (!sshdr_external) 2552 kfree(sshdr); 2553 return result; 2554} 2555EXPORT_SYMBOL(scsi_test_unit_ready); 2556 2557/** 2558 * scsi_device_set_state - Take the given device through the device state model. 2559 * @sdev: scsi device to change the state of. 2560 * @state: state to change to. 2561 * 2562 * Returns zero if unsuccessful or an error if the requested 2563 * transition is illegal. 2564 */ 2565int 2566scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2567{ 2568 enum scsi_device_state oldstate = sdev->sdev_state; 2569 2570 if (state == oldstate) 2571 return 0; 2572 2573 switch (state) { 2574 case SDEV_CREATED: 2575 switch (oldstate) { 2576 case SDEV_CREATED_BLOCK: 2577 break; 2578 default: 2579 goto illegal; 2580 } 2581 break; 2582 2583 case SDEV_RUNNING: 2584 switch (oldstate) { 2585 case SDEV_CREATED: 2586 case SDEV_OFFLINE: 2587 case SDEV_TRANSPORT_OFFLINE: 2588 case SDEV_QUIESCE: 2589 case SDEV_BLOCK: 2590 break; 2591 default: 2592 goto illegal; 2593 } 2594 break; 2595 2596 case SDEV_QUIESCE: 2597 switch (oldstate) { 2598 case SDEV_RUNNING: 2599 case SDEV_OFFLINE: 2600 case SDEV_TRANSPORT_OFFLINE: 2601 break; 2602 default: 2603 goto illegal; 2604 } 2605 break; 2606 2607 case SDEV_OFFLINE: 2608 case SDEV_TRANSPORT_OFFLINE: 2609 switch (oldstate) { 2610 case SDEV_CREATED: 2611 case SDEV_RUNNING: 2612 case SDEV_QUIESCE: 2613 case SDEV_BLOCK: 2614 break; 2615 default: 2616 goto illegal; 2617 } 2618 break; 2619 2620 case SDEV_BLOCK: 2621 switch (oldstate) { 2622 case SDEV_RUNNING: 2623 case SDEV_CREATED_BLOCK: 2624 break; 2625 default: 2626 goto illegal; 2627 } 2628 break; 2629 2630 case SDEV_CREATED_BLOCK: 2631 switch (oldstate) { 2632 case SDEV_CREATED: 2633 break; 2634 default: 2635 goto illegal; 2636 } 2637 break; 2638 2639 case SDEV_CANCEL: 2640 switch (oldstate) { 2641 case SDEV_CREATED: 2642 case SDEV_RUNNING: 2643 case SDEV_QUIESCE: 2644 case SDEV_OFFLINE: 2645 case SDEV_TRANSPORT_OFFLINE: 2646 case SDEV_BLOCK: 2647 break; 2648 default: 2649 goto illegal; 2650 } 2651 break; 2652 2653 case SDEV_DEL: 2654 switch (oldstate) { 2655 case SDEV_CREATED: 2656 case SDEV_RUNNING: 2657 case SDEV_OFFLINE: 2658 case SDEV_TRANSPORT_OFFLINE: 2659 case SDEV_CANCEL: 2660 case SDEV_CREATED_BLOCK: 2661 break; 2662 default: 2663 goto illegal; 2664 } 2665 break; 2666 2667 } 2668 sdev->sdev_state = state; 2669 return 0; 2670 2671 illegal: 2672 SCSI_LOG_ERROR_RECOVERY(1, 2673 sdev_printk(KERN_ERR, sdev, 2674 "Illegal state transition %s->%s", 2675 scsi_device_state_name(oldstate), 2676 scsi_device_state_name(state)) 2677 ); 2678 return -EINVAL; 2679} 2680EXPORT_SYMBOL(scsi_device_set_state); 2681 2682/** 2683 * sdev_evt_emit - emit a single SCSI device uevent 2684 * @sdev: associated SCSI device 2685 * @evt: event to emit 2686 * 2687 * Send a single uevent (scsi_event) to the associated scsi_device. 2688 */ 2689static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2690{ 2691 int idx = 0; 2692 char *envp[3]; 2693 2694 switch (evt->evt_type) { 2695 case SDEV_EVT_MEDIA_CHANGE: 2696 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2697 break; 2698 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2699 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2700 break; 2701 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2702 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2703 break; 2704 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2705 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2706 break; 2707 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2708 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2709 break; 2710 case SDEV_EVT_LUN_CHANGE_REPORTED: 2711 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2712 break; 2713 default: 2714 /* do nothing */ 2715 break; 2716 } 2717 2718 envp[idx++] = NULL; 2719 2720 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2721} 2722 2723/** 2724 * sdev_evt_thread - send a uevent for each scsi event 2725 * @work: work struct for scsi_device 2726 * 2727 * Dispatch queued events to their associated scsi_device kobjects 2728 * as uevents. 2729 */ 2730void scsi_evt_thread(struct work_struct *work) 2731{ 2732 struct scsi_device *sdev; 2733 enum scsi_device_event evt_type; 2734 LIST_HEAD(event_list); 2735 2736 sdev = container_of(work, struct scsi_device, event_work); 2737 2738 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2739 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2740 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2741 2742 while (1) { 2743 struct scsi_event *evt; 2744 struct list_head *this, *tmp; 2745 unsigned long flags; 2746 2747 spin_lock_irqsave(&sdev->list_lock, flags); 2748 list_splice_init(&sdev->event_list, &event_list); 2749 spin_unlock_irqrestore(&sdev->list_lock, flags); 2750 2751 if (list_empty(&event_list)) 2752 break; 2753 2754 list_for_each_safe(this, tmp, &event_list) { 2755 evt = list_entry(this, struct scsi_event, node); 2756 list_del(&evt->node); 2757 scsi_evt_emit(sdev, evt); 2758 kfree(evt); 2759 } 2760 } 2761} 2762 2763/** 2764 * sdev_evt_send - send asserted event to uevent thread 2765 * @sdev: scsi_device event occurred on 2766 * @evt: event to send 2767 * 2768 * Assert scsi device event asynchronously. 2769 */ 2770void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2771{ 2772 unsigned long flags; 2773 2774#if 0 2775 /* FIXME: currently this check eliminates all media change events 2776 * for polled devices. Need to update to discriminate between AN 2777 * and polled events */ 2778 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2779 kfree(evt); 2780 return; 2781 } 2782#endif 2783 2784 spin_lock_irqsave(&sdev->list_lock, flags); 2785 list_add_tail(&evt->node, &sdev->event_list); 2786 schedule_work(&sdev->event_work); 2787 spin_unlock_irqrestore(&sdev->list_lock, flags); 2788} 2789EXPORT_SYMBOL_GPL(sdev_evt_send); 2790 2791/** 2792 * sdev_evt_alloc - allocate a new scsi event 2793 * @evt_type: type of event to allocate 2794 * @gfpflags: GFP flags for allocation 2795 * 2796 * Allocates and returns a new scsi_event. 2797 */ 2798struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2799 gfp_t gfpflags) 2800{ 2801 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2802 if (!evt) 2803 return NULL; 2804 2805 evt->evt_type = evt_type; 2806 INIT_LIST_HEAD(&evt->node); 2807 2808 /* evt_type-specific initialization, if any */ 2809 switch (evt_type) { 2810 case SDEV_EVT_MEDIA_CHANGE: 2811 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2812 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2813 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2814 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2815 case SDEV_EVT_LUN_CHANGE_REPORTED: 2816 default: 2817 /* do nothing */ 2818 break; 2819 } 2820 2821 return evt; 2822} 2823EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2824 2825/** 2826 * sdev_evt_send_simple - send asserted event to uevent thread 2827 * @sdev: scsi_device event occurred on 2828 * @evt_type: type of event to send 2829 * @gfpflags: GFP flags for allocation 2830 * 2831 * Assert scsi device event asynchronously, given an event type. 2832 */ 2833void sdev_evt_send_simple(struct scsi_device *sdev, 2834 enum scsi_device_event evt_type, gfp_t gfpflags) 2835{ 2836 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2837 if (!evt) { 2838 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2839 evt_type); 2840 return; 2841 } 2842 2843 sdev_evt_send(sdev, evt); 2844} 2845EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2846 2847/** 2848 * scsi_device_quiesce - Block user issued commands. 2849 * @sdev: scsi device to quiesce. 2850 * 2851 * This works by trying to transition to the SDEV_QUIESCE state 2852 * (which must be a legal transition). When the device is in this 2853 * state, only special requests will be accepted, all others will 2854 * be deferred. Since special requests may also be requeued requests, 2855 * a successful return doesn't guarantee the device will be 2856 * totally quiescent. 2857 * 2858 * Must be called with user context, may sleep. 2859 * 2860 * Returns zero if unsuccessful or an error if not. 2861 */ 2862int 2863scsi_device_quiesce(struct scsi_device *sdev) 2864{ 2865 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2866 if (err) 2867 return err; 2868 2869 scsi_run_queue(sdev->request_queue); 2870 while (atomic_read(&sdev->device_busy)) { 2871 msleep_interruptible(200); 2872 scsi_run_queue(sdev->request_queue); 2873 } 2874 return 0; 2875} 2876EXPORT_SYMBOL(scsi_device_quiesce); 2877 2878/** 2879 * scsi_device_resume - Restart user issued commands to a quiesced device. 2880 * @sdev: scsi device to resume. 2881 * 2882 * Moves the device from quiesced back to running and restarts the 2883 * queues. 2884 * 2885 * Must be called with user context, may sleep. 2886 */ 2887void scsi_device_resume(struct scsi_device *sdev) 2888{ 2889 /* check if the device state was mutated prior to resume, and if 2890 * so assume the state is being managed elsewhere (for example 2891 * device deleted during suspend) 2892 */ 2893 if (sdev->sdev_state != SDEV_QUIESCE || 2894 scsi_device_set_state(sdev, SDEV_RUNNING)) 2895 return; 2896 scsi_run_queue(sdev->request_queue); 2897} 2898EXPORT_SYMBOL(scsi_device_resume); 2899 2900static void 2901device_quiesce_fn(struct scsi_device *sdev, void *data) 2902{ 2903 scsi_device_quiesce(sdev); 2904} 2905 2906void 2907scsi_target_quiesce(struct scsi_target *starget) 2908{ 2909 starget_for_each_device(starget, NULL, device_quiesce_fn); 2910} 2911EXPORT_SYMBOL(scsi_target_quiesce); 2912 2913static void 2914device_resume_fn(struct scsi_device *sdev, void *data) 2915{ 2916 scsi_device_resume(sdev); 2917} 2918 2919void 2920scsi_target_resume(struct scsi_target *starget) 2921{ 2922 starget_for_each_device(starget, NULL, device_resume_fn); 2923} 2924EXPORT_SYMBOL(scsi_target_resume); 2925 2926/** 2927 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2928 * @sdev: device to block 2929 * 2930 * Block request made by scsi lld's to temporarily stop all 2931 * scsi commands on the specified device. Called from interrupt 2932 * or normal process context. 2933 * 2934 * Returns zero if successful or error if not 2935 * 2936 * Notes: 2937 * This routine transitions the device to the SDEV_BLOCK state 2938 * (which must be a legal transition). When the device is in this 2939 * state, all commands are deferred until the scsi lld reenables 2940 * the device with scsi_device_unblock or device_block_tmo fires. 2941 */ 2942int 2943scsi_internal_device_block(struct scsi_device *sdev) 2944{ 2945 struct request_queue *q = sdev->request_queue; 2946 unsigned long flags; 2947 int err = 0; 2948 2949 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2950 if (err) { 2951 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2952 2953 if (err) 2954 return err; 2955 } 2956 2957 /* 2958 * The device has transitioned to SDEV_BLOCK. Stop the 2959 * block layer from calling the midlayer with this device's 2960 * request queue. 2961 */ 2962 if (q->mq_ops) { 2963 blk_mq_stop_hw_queues(q); 2964 } else { 2965 spin_lock_irqsave(q->queue_lock, flags); 2966 blk_stop_queue(q); 2967 spin_unlock_irqrestore(q->queue_lock, flags); 2968 } 2969 2970 return 0; 2971} 2972EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2973 2974/** 2975 * scsi_internal_device_unblock - resume a device after a block request 2976 * @sdev: device to resume 2977 * @new_state: state to set devices to after unblocking 2978 * 2979 * Called by scsi lld's or the midlayer to restart the device queue 2980 * for the previously suspended scsi device. Called from interrupt or 2981 * normal process context. 2982 * 2983 * Returns zero if successful or error if not. 2984 * 2985 * Notes: 2986 * This routine transitions the device to the SDEV_RUNNING state 2987 * or to one of the offline states (which must be a legal transition) 2988 * allowing the midlayer to goose the queue for this device. 2989 */ 2990int 2991scsi_internal_device_unblock(struct scsi_device *sdev, 2992 enum scsi_device_state new_state) 2993{ 2994 struct request_queue *q = sdev->request_queue; 2995 unsigned long flags; 2996 2997 /* 2998 * Try to transition the scsi device to SDEV_RUNNING or one of the 2999 * offlined states and goose the device queue if successful. 3000 */ 3001 if ((sdev->sdev_state == SDEV_BLOCK) || 3002 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 3003 sdev->sdev_state = new_state; 3004 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 3005 if (new_state == SDEV_TRANSPORT_OFFLINE || 3006 new_state == SDEV_OFFLINE) 3007 sdev->sdev_state = new_state; 3008 else 3009 sdev->sdev_state = SDEV_CREATED; 3010 } else if (sdev->sdev_state != SDEV_CANCEL && 3011 sdev->sdev_state != SDEV_OFFLINE) 3012 return -EINVAL; 3013 3014 if (q->mq_ops) { 3015 blk_mq_start_stopped_hw_queues(q, false); 3016 } else { 3017 spin_lock_irqsave(q->queue_lock, flags); 3018 blk_start_queue(q); 3019 spin_unlock_irqrestore(q->queue_lock, flags); 3020 } 3021 3022 return 0; 3023} 3024EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 3025 3026static void 3027device_block(struct scsi_device *sdev, void *data) 3028{ 3029 scsi_internal_device_block(sdev); 3030} 3031 3032static int 3033target_block(struct device *dev, void *data) 3034{ 3035 if (scsi_is_target_device(dev)) 3036 starget_for_each_device(to_scsi_target(dev), NULL, 3037 device_block); 3038 return 0; 3039} 3040 3041void 3042scsi_target_block(struct device *dev) 3043{ 3044 if (scsi_is_target_device(dev)) 3045 starget_for_each_device(to_scsi_target(dev), NULL, 3046 device_block); 3047 else 3048 device_for_each_child(dev, NULL, target_block); 3049} 3050EXPORT_SYMBOL_GPL(scsi_target_block); 3051 3052static void 3053device_unblock(struct scsi_device *sdev, void *data) 3054{ 3055 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 3056} 3057 3058static int 3059target_unblock(struct device *dev, void *data) 3060{ 3061 if (scsi_is_target_device(dev)) 3062 starget_for_each_device(to_scsi_target(dev), data, 3063 device_unblock); 3064 return 0; 3065} 3066 3067void 3068scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 3069{ 3070 if (scsi_is_target_device(dev)) 3071 starget_for_each_device(to_scsi_target(dev), &new_state, 3072 device_unblock); 3073 else 3074 device_for_each_child(dev, &new_state, target_unblock); 3075} 3076EXPORT_SYMBOL_GPL(scsi_target_unblock); 3077 3078/** 3079 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 3080 * @sgl: scatter-gather list 3081 * @sg_count: number of segments in sg 3082 * @offset: offset in bytes into sg, on return offset into the mapped area 3083 * @len: bytes to map, on return number of bytes mapped 3084 * 3085 * Returns virtual address of the start of the mapped page 3086 */ 3087void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 3088 size_t *offset, size_t *len) 3089{ 3090 int i; 3091 size_t sg_len = 0, len_complete = 0; 3092 struct scatterlist *sg; 3093 struct page *page; 3094 3095 WARN_ON(!irqs_disabled()); 3096 3097 for_each_sg(sgl, sg, sg_count, i) { 3098 len_complete = sg_len; /* Complete sg-entries */ 3099 sg_len += sg->length; 3100 if (sg_len > *offset) 3101 break; 3102 } 3103 3104 if (unlikely(i == sg_count)) { 3105 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 3106 "elements %d\n", 3107 __func__, sg_len, *offset, sg_count); 3108 WARN_ON(1); 3109 return NULL; 3110 } 3111 3112 /* Offset starting from the beginning of first page in this sg-entry */ 3113 *offset = *offset - len_complete + sg->offset; 3114 3115 /* Assumption: contiguous pages can be accessed as "page + i" */ 3116 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 3117 *offset &= ~PAGE_MASK; 3118 3119 /* Bytes in this sg-entry from *offset to the end of the page */ 3120 sg_len = PAGE_SIZE - *offset; 3121 if (*len > sg_len) 3122 *len = sg_len; 3123 3124 return kmap_atomic(page); 3125} 3126EXPORT_SYMBOL(scsi_kmap_atomic_sg); 3127 3128/** 3129 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 3130 * @virt: virtual address to be unmapped 3131 */ 3132void scsi_kunmap_atomic_sg(void *virt) 3133{ 3134 kunmap_atomic(virt); 3135} 3136EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 3137 3138void sdev_disable_disk_events(struct scsi_device *sdev) 3139{ 3140 atomic_inc(&sdev->disk_events_disable_depth); 3141} 3142EXPORT_SYMBOL(sdev_disable_disk_events); 3143 3144void sdev_enable_disk_events(struct scsi_device *sdev) 3145{ 3146 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) 3147 return; 3148 atomic_dec(&sdev->disk_events_disable_depth); 3149} 3150EXPORT_SYMBOL(sdev_enable_disk_events); 3151