/linux-4.1.27/include/trace/events/ |
H A D | libata.h | 144 TP_PROTO(struct ata_queued_cmd *qc), 146 TP_ARGS(qc), 170 __entry->ata_port = qc->ap->print_id; 171 __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 172 __entry->tag = qc->tag; 173 __entry->proto = qc->tf.protocol; 174 __entry->cmd = qc->tf.command; 175 __entry->dev = qc->tf.device; 176 __entry->lbal = qc->tf.lbal; 177 __entry->lbam = qc->tf.lbam; 178 __entry->lbah = qc->tf.lbah; 179 __entry->hob_lbal = qc->tf.hob_lbal; 180 __entry->hob_lbam = qc->tf.hob_lbam; 181 __entry->hob_lbah = qc->tf.hob_lbah; 182 __entry->feature = qc->tf.feature; 183 __entry->hob_feature = qc->tf.hob_feature; 184 __entry->nsect = qc->tf.nsect; 185 __entry->hob_nsect = qc->tf.hob_nsect; 202 TP_PROTO(struct ata_queued_cmd *qc), 204 TP_ARGS(qc), 227 __entry->ata_port = qc->ap->print_id; 228 __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 229 __entry->tag = qc->tag; 230 __entry->status = qc->result_tf.command; 231 __entry->dev = qc->result_tf.device; 232 __entry->lbal = qc->result_tf.lbal; 233 __entry->lbam = qc->result_tf.lbam; 234 __entry->lbah = qc->result_tf.lbah; 235 __entry->hob_lbal = qc->result_tf.hob_lbal; 236 __entry->hob_lbam = qc->result_tf.hob_lbam; 237 __entry->hob_lbah = qc->result_tf.hob_lbah; 238 __entry->error = qc->result_tf.feature; 239 __entry->hob_feature = qc->result_tf.hob_feature; 240 __entry->nsect = qc->result_tf.nsect; 241 __entry->hob_nsect = qc->result_tf.hob_nsect; 257 TP_PROTO(struct ata_queued_cmd *qc), 258 TP_ARGS(qc)); 261 TP_PROTO(struct ata_queued_cmd *qc), 262 TP_ARGS(qc)); 265 TP_PROTO(struct ata_queued_cmd *qc), 266 TP_ARGS(qc)); 296 TP_PROTO(struct ata_queued_cmd *qc), 298 TP_ARGS(qc), 309 __entry->ata_port = qc->ap->print_id; 310 __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno; 311 __entry->tag = qc->tag; 312 __entry->qc_flags = qc->flags; 313 __entry->eh_err_mask = qc->err_mask;
|
/linux-4.1.27/drivers/ata/ |
H A D | pdc_adma.c | 135 static void adma_qc_prep(struct ata_queued_cmd *qc); 136 static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); 137 static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 188 static int adma_check_atapi_dma(struct ata_queued_cmd *qc) adma_check_atapi_dma() argument 272 static int adma_fill_sg(struct ata_queued_cmd *qc) adma_fill_sg() argument 275 struct ata_port *ap = qc->ap; adma_fill_sg() 279 u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); adma_fill_sg() 282 for_each_sg(qc->sg, sg, qc->n_elem, si) { adma_fill_sg() 296 buf[i++] = qc->dev->dma_mode & 0xf; adma_fill_sg() 314 static void adma_qc_prep(struct ata_queued_cmd *qc) adma_qc_prep() argument 316 struct adma_port_priv *pp = qc->ap->private_data; adma_qc_prep() 323 adma_enter_reg_mode(qc->ap); adma_qc_prep() 324 if (qc->tf.protocol != ATA_PROT_DMA) adma_qc_prep() 342 buf[i++] = qc->tf.device; adma_qc_prep() 344 if ((qc->tf.flags & ATA_TFLAG_LBA48)) { adma_qc_prep() 345 buf[i++] = qc->tf.hob_nsect; adma_qc_prep() 347 buf[i++] = qc->tf.hob_lbal; adma_qc_prep() 349 buf[i++] = qc->tf.hob_lbam; adma_qc_prep() 351 buf[i++] = qc->tf.hob_lbah; adma_qc_prep() 354 buf[i++] = qc->tf.nsect; adma_qc_prep() 356 buf[i++] = qc->tf.lbal; adma_qc_prep() 358 buf[i++] = qc->tf.lbam; adma_qc_prep() 360 buf[i++] = qc->tf.lbah; adma_qc_prep() 366 buf[i++] = qc->tf.command; adma_qc_prep() 372 i = adma_fill_sg(qc); adma_qc_prep() 392 static inline void adma_packet_start(struct ata_queued_cmd *qc) adma_packet_start() argument 394 struct ata_port *ap = qc->ap; adma_packet_start() 403 static unsigned int adma_qc_issue(struct ata_queued_cmd *qc) adma_qc_issue() argument 405 struct adma_port_priv *pp = qc->ap->private_data; adma_qc_issue() 407 switch (qc->tf.protocol) { adma_qc_issue() 410 adma_packet_start(qc); adma_qc_issue() 422 return ata_sff_qc_issue(qc); adma_qc_issue() 432 struct ata_queued_cmd *qc; adma_intr_pkt() local 443 qc = ata_qc_from_tag(ap, ap->link.active_tag); adma_intr_pkt() 444 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { adma_intr_pkt() 446 qc->err_mask |= AC_ERR_HOST_BUS; adma_intr_pkt() 448 qc->err_mask |= AC_ERR_OTHER; adma_intr_pkt() 451 qc->err_mask |= AC_ERR_DEV; adma_intr_pkt() 453 qc->err_mask |= AC_ERR_OTHER; adma_intr_pkt() 455 if (!qc->err_mask) adma_intr_pkt() 456 ata_qc_complete(qc); adma_intr_pkt() 465 if (qc->err_mask == AC_ERR_DEV) adma_intr_pkt() 482 struct ata_queued_cmd *qc; adma_intr_mmio() local 486 qc = ata_qc_from_tag(ap, ap->link.active_tag); adma_intr_mmio() 487 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { adma_intr_mmio() 494 ap->print_id, qc->tf.protocol, status); adma_intr_mmio() 498 qc->err_mask |= ac_err_mask(status); adma_intr_mmio() 499 if (!qc->err_mask) adma_intr_mmio() 500 ata_qc_complete(qc); adma_intr_mmio() 506 if (qc->err_mask == AC_ERR_DEV) adma_intr_mmio()
|
H A D | sata_dwc_460ex.c | 177 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\ 178 (qc)->ap->host->private_data) 211 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 212 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 303 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) dma_dwc_xfer_setup() argument 305 struct ata_port *ap = qc->ap; dma_dwc_xfer_setup() 312 if (qc->dma_dir == DMA_DEV_TO_MEM) { dma_dwc_xfer_setup() 320 sconf.direction = qc->dma_dir; dma_dwc_xfer_setup() 329 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, dma_dwc_xfer_setup() 330 qc->dma_dir, dma_dwc_xfer_setup() 340 __func__, qc->sg, qc->n_elem, &addr); dma_dwc_xfer_setup() 409 struct ata_queued_cmd *qc; sata_dwc_error_intr() local 438 qc = ata_qc_from_tag(ap, tag); sata_dwc_error_intr() 439 if (qc) sata_dwc_error_intr() 440 qc->err_mask |= err_mask; sata_dwc_error_intr() 459 struct ata_queued_cmd *qc; sata_dwc_isr() local 496 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 502 qc->ap->link.active_tag = tag; sata_dwc_isr() 503 sata_dwc_bmdma_start_by_tag(qc, tag); sata_dwc_isr() 517 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 519 /* DEV interrupt w/ no active qc? */ sata_dwc_isr() 520 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { sata_dwc_isr() 522 "%s interrupt with no active qc qc=%p\n", sata_dwc_isr() 523 __func__, qc); sata_dwc_isr() 530 qc->ap->link.active_tag = tag; sata_dwc_isr() 535 sata_dwc_qc_complete(ap, qc, 1); sata_dwc_isr() 541 __func__, get_prot_descript(qc->tf.protocol)); sata_dwc_isr() 543 if (ata_is_dma(qc->tf.protocol)) { sata_dwc_isr() 561 } else if (ata_is_pio(qc->tf.protocol)) { sata_dwc_isr() 562 ata_sff_hsm_move(ap, qc, status, 0); sata_dwc_isr() 566 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) sata_dwc_isr() 614 qc = ata_qc_from_tag(ap, tag); sata_dwc_isr() 617 qc->ap->link.active_tag = tag; sata_dwc_isr() 624 sata_dwc_qc_complete(ap, qc, 1); sata_dwc_isr() 631 get_prot_descript(qc->tf.protocol)); sata_dwc_isr() 632 if (ata_is_dma(qc->tf.protocol)) { sata_dwc_isr() 641 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) sata_dwc_isr() 700 struct ata_queued_cmd *qc; sata_dwc_dma_xfer_complete() local 706 qc = ata_qc_from_tag(ap, tag); sata_dwc_dma_xfer_complete() 707 if (!qc) { sata_dwc_dma_xfer_complete() 708 dev_err(ap->dev, "failed to get qc"); sata_dwc_dma_xfer_complete() 716 __func__, qc->tag, qc->tf.command, sata_dwc_dma_xfer_complete() 717 get_dma_dir_descript(qc->dma_dir), sata_dwc_dma_xfer_complete() 718 get_prot_descript(qc->tf.protocol), sata_dwc_dma_xfer_complete() 723 if (ata_is_dma(qc->tf.protocol)) { sata_dwc_dma_xfer_complete() 732 sata_dwc_qc_complete(ap, qc, check_status); sata_dwc_dma_xfer_complete() 735 sata_dwc_qc_complete(ap, qc, check_status); sata_dwc_dma_xfer_complete() 739 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, sata_dwc_qc_complete() argument 744 u8 tag = qc->tag; sata_dwc_qc_complete() 755 qc->tf.command, status, ap->print_id, qc->tf.protocol); sata_dwc_qc_complete() 763 ata_qc_complete(qc); sata_dwc_qc_complete() 946 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) sata_dwc_bmdma_setup_by_tag() argument 948 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, sata_dwc_bmdma_setup_by_tag() 952 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) sata_dwc_bmdma_setup() argument 954 u8 tag = qc->tag; sata_dwc_bmdma_setup() 956 if (ata_is_ncq(qc->tf.protocol)) { sata_dwc_bmdma_setup() 957 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", sata_dwc_bmdma_setup() 958 __func__, qc->ap->link.sactive, tag); sata_dwc_bmdma_setup() 962 sata_dwc_bmdma_setup_by_tag(qc, tag); sata_dwc_bmdma_setup() 965 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) sata_dwc_bmdma_start_by_tag() argument 969 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); sata_dwc_bmdma_start_by_tag() 970 struct ata_port *ap = qc->ap; sata_dwc_bmdma_start_by_tag() 973 int dir = qc->dma_dir; sata_dwc_bmdma_start_by_tag() 989 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", sata_dwc_bmdma_start_by_tag() 990 __func__, qc, tag, qc->tf.command, sata_dwc_bmdma_start_by_tag() 991 get_dma_dir_descript(qc->dma_dir), start_dma); sata_dwc_bmdma_start_by_tag() 992 sata_dwc_tf_dump(ap, &qc->tf); sata_dwc_bmdma_start_by_tag() 1014 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) sata_dwc_bmdma_start() argument 1016 u8 tag = qc->tag; sata_dwc_bmdma_start() 1018 if (ata_is_ncq(qc->tf.protocol)) { sata_dwc_bmdma_start() 1019 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", sata_dwc_bmdma_start() 1020 __func__, qc->ap->link.sactive, tag); sata_dwc_bmdma_start() 1024 dev_dbg(qc->ap->dev, "%s\n", __func__); sata_dwc_bmdma_start() 1025 sata_dwc_bmdma_start_by_tag(qc, tag); sata_dwc_bmdma_start() 1030 * arguments : ata_queued_cmd *qc, u8 tag 1034 static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) sata_dwc_qc_prep_by_tag() argument 1037 struct ata_port *ap = qc->ap; sata_dwc_qc_prep_by_tag() 1041 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir), sata_dwc_qc_prep_by_tag() 1042 qc->n_elem); sata_dwc_qc_prep_by_tag() 1044 desc = dma_dwc_xfer_setup(qc); sata_dwc_qc_prep_by_tag() 1053 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) sata_dwc_qc_issue() argument 1056 u8 tag = qc->tag; sata_dwc_qc_issue() 1057 struct ata_port *ap = qc->ap; sata_dwc_qc_issue() 1060 if (qc->tag > 0 || ap->link.sactive > 1) sata_dwc_qc_issue() 1062 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", sata_dwc_qc_issue() 1063 __func__, ap->print_id, qc->tf.command, sata_dwc_qc_issue() 1064 ata_get_cmd_descript(qc->tf.command), sata_dwc_qc_issue() 1065 qc->tag, get_prot_descript(qc->tf.protocol), sata_dwc_qc_issue() 1069 if (!ata_is_ncq(qc->tf.protocol)) sata_dwc_qc_issue() 1071 sata_dwc_qc_prep_by_tag(qc, tag); sata_dwc_qc_issue() 1073 if (ata_is_ncq(qc->tf.protocol)) { sata_dwc_qc_issue() 1078 dev_dbg(qc->ap->dev, sata_dwc_qc_issue() 1080 __func__, tag, qc->ap->link.sactive, sactive); sata_dwc_qc_issue() 1082 ap->ops->sff_tf_load(ap, &qc->tf); sata_dwc_qc_issue() 1083 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag, sata_dwc_qc_issue() 1086 ata_sff_qc_issue(qc); sata_dwc_qc_issue() 1093 * arguments : ata_queued_cmd *qc 1098 static void sata_dwc_qc_prep(struct ata_queued_cmd *qc) sata_dwc_qc_prep() argument 1100 if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO)) sata_dwc_qc_prep() 1104 if (qc->tag > 0) sata_dwc_qc_prep() 1105 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n", sata_dwc_qc_prep() 1106 __func__, qc->tag, qc->ap->link.active_tag); sata_dwc_qc_prep()
|
H A D | libata-sff.c | 691 * @qc: Command on going 693 * Transfer qc->sect_size bytes of data from/to the ATA device. 698 static void ata_pio_sector(struct ata_queued_cmd *qc) ata_pio_sector() argument 700 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); ata_pio_sector() 701 struct ata_port *ap = qc->ap; ata_pio_sector() 706 if (qc->curbytes == qc->nbytes - qc->sect_size) ata_pio_sector() 709 page = sg_page(qc->cursg); ata_pio_sector() 710 offset = qc->cursg->offset + qc->cursg_ofs; ata_pio_sector() 716 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); ata_pio_sector() 726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, ata_pio_sector() 733 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, ata_pio_sector() 740 qc->curbytes += qc->sect_size; ata_pio_sector() 741 qc->cursg_ofs += qc->sect_size; ata_pio_sector() 743 if (qc->cursg_ofs == qc->cursg->length) { ata_pio_sector() 744 qc->cursg = sg_next(qc->cursg); ata_pio_sector() 745 qc->cursg_ofs = 0; ata_pio_sector() 751 * @qc: Command on going 759 static void ata_pio_sectors(struct ata_queued_cmd *qc) ata_pio_sectors() argument 761 if (is_multi_taskfile(&qc->tf)) { ata_pio_sectors() 765 WARN_ON_ONCE(qc->dev->multi_count == 0); ata_pio_sectors() 767 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, ata_pio_sectors() 768 qc->dev->multi_count); ata_pio_sectors() 770 ata_pio_sector(qc); ata_pio_sectors() 772 ata_pio_sector(qc); ata_pio_sectors() 774 ata_sff_sync(qc->ap); /* flush */ ata_pio_sectors() 780 * @qc: Taskfile currently active 788 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) atapi_send_cdb() argument 792 WARN_ON_ONCE(qc->dev->cdb_len < 12); atapi_send_cdb() 794 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); atapi_send_cdb() 798 switch (qc->tf.protocol) { atapi_send_cdb() 809 ap->ops->bmdma_start(qc); atapi_send_cdb() 819 * @qc: Command on going 828 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) __atapi_pio_bytes() argument 830 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; __atapi_pio_bytes() 831 struct ata_port *ap = qc->ap; __atapi_pio_bytes() 832 struct ata_device *dev = qc->dev; __atapi_pio_bytes() 840 sg = qc->cursg; __atapi_pio_bytes() 844 qc->nbytes, qc->curbytes, bytes); __atapi_pio_bytes() 849 offset = sg->offset + qc->cursg_ofs; __atapi_pio_bytes() 856 count = min(sg->length - qc->cursg_ofs, bytes); __atapi_pio_bytes() 861 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); __atapi_pio_bytes() 883 qc->curbytes += count; __atapi_pio_bytes() 884 qc->cursg_ofs += count; __atapi_pio_bytes() 886 if (qc->cursg_ofs == sg->length) { __atapi_pio_bytes() 887 qc->cursg = sg_next(qc->cursg); __atapi_pio_bytes() 888 qc->cursg_ofs = 0; __atapi_pio_bytes() 892 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); __atapi_pio_bytes() 904 * @qc: Command on going 911 static void atapi_pio_bytes(struct ata_queued_cmd *qc) atapi_pio_bytes() argument 913 struct ata_port *ap = qc->ap; atapi_pio_bytes() 914 struct ata_device *dev = qc->dev; atapi_pio_bytes() 917 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; atapi_pio_bytes() 919 /* Abuse qc->result_tf for temp storage of intermediate TF atapi_pio_bytes() 921 * For normal completion, qc->result_tf is not relevant. For atapi_pio_bytes() 922 * error, qc->result_tf is later overwritten by ata_qc_complete(). atapi_pio_bytes() 923 * So, the correctness of qc->result_tf is not affected. atapi_pio_bytes() 925 ap->ops->sff_tf_read(ap, &qc->result_tf); atapi_pio_bytes() 926 ireason = qc->result_tf.nsect; atapi_pio_bytes() 927 bc_lo = qc->result_tf.lbam; atapi_pio_bytes() 928 bc_hi = qc->result_tf.lbah; atapi_pio_bytes() 945 if (unlikely(__atapi_pio_bytes(qc, bytes))) atapi_pio_bytes() 955 qc->err_mask |= AC_ERR_HSM; atapi_pio_bytes() 960 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 962 * @qc: qc on going 968 struct ata_queued_cmd *qc) ata_hsm_ok_in_wq() 970 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_hsm_ok_in_wq() 974 if (qc->tf.protocol == ATA_PROT_PIO && ata_hsm_ok_in_wq() 975 (qc->tf.flags & ATA_TFLAG_WRITE)) ata_hsm_ok_in_wq() 978 if (ata_is_atapi(qc->tf.protocol) && ata_hsm_ok_in_wq() 979 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ata_hsm_ok_in_wq() 987 * ata_hsm_qc_complete - finish a qc running on standard HSM 988 * @qc: Command to complete 991 * Finish @qc which is running on standard HSM. 997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) ata_hsm_qc_complete() argument 999 struct ata_port *ap = qc->ap; ata_hsm_qc_complete() 1006 qc = ata_qc_from_tag(ap, qc->tag); ata_hsm_qc_complete() 1007 if (qc) { ata_hsm_qc_complete() 1008 if (likely(!(qc->err_mask & AC_ERR_HSM))) { ata_hsm_qc_complete() 1010 ata_qc_complete(qc); ata_hsm_qc_complete() 1015 if (likely(!(qc->err_mask & AC_ERR_HSM))) ata_hsm_qc_complete() 1016 ata_qc_complete(qc); ata_hsm_qc_complete() 1023 ata_qc_complete(qc); ata_hsm_qc_complete() 1025 ata_qc_complete(qc); ata_hsm_qc_complete() 1032 * @qc: qc on going 1039 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, ata_sff_hsm_move() argument 1042 struct ata_link *link = qc->dev->link; ata_sff_hsm_move() 1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); ata_sff_hsm_move() 1052 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). ata_sff_hsm_move() 1054 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); ata_sff_hsm_move() 1058 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); ata_sff_hsm_move() 1068 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); ata_sff_hsm_move() 1075 qc->err_mask |= AC_ERR_DEV; ata_sff_hsm_move() 1080 qc->err_mask |= AC_ERR_HSM; ata_sff_hsm_move() 1099 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { ata_sff_hsm_move() 1103 qc->err_mask |= AC_ERR_HSM; ata_sff_hsm_move() 1109 if (qc->tf.protocol == ATA_PROT_PIO) { ata_sff_hsm_move() 1119 ata_pio_sectors(qc); ata_sff_hsm_move() 1122 atapi_send_cdb(ap, qc); ata_sff_hsm_move() 1131 if (qc->tf.protocol == ATAPI_PROT_PIO) { ata_sff_hsm_move() 1151 qc->err_mask |= AC_ERR_HSM; ata_sff_hsm_move() 1156 atapi_pio_bytes(qc); ata_sff_hsm_move() 1168 qc->err_mask |= AC_ERR_DEV; ata_sff_hsm_move() 1174 if (qc->dev->horkage & ata_sff_hsm_move() 1176 qc->err_mask |= ata_sff_hsm_move() 1186 qc->err_mask |= AC_ERR_HSM | ata_sff_hsm_move() 1206 qc->err_mask |= AC_ERR_DEV; ata_sff_hsm_move() 1208 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { ata_sff_hsm_move() 1209 ata_pio_sectors(qc); ata_sff_hsm_move() 1217 qc->err_mask |= AC_ERR_HSM; ata_sff_hsm_move() 1228 qc->err_mask |= AC_ERR_NODEV_HINT; ata_sff_hsm_move() 1238 ata_pio_sectors(qc); ata_sff_hsm_move() 1241 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { ata_sff_hsm_move() 1253 qc->err_mask |= __ac_err_mask(status); ata_sff_hsm_move() 1260 ap->print_id, qc->dev->devno, status); ata_sff_hsm_move() 1262 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); ata_sff_hsm_move() 1267 ata_hsm_qc_complete(qc, in_wq); ata_sff_hsm_move() 1276 ata_hsm_qc_complete(qc, in_wq); ata_sff_hsm_move() 1343 struct ata_queued_cmd *qc; ata_sff_pio_task() local 1350 /* qc can be NULL if timeout occurred */ ata_sff_pio_task() 1351 qc = ata_qc_from_tag(ap, link->active_tag); ata_sff_pio_task() 1352 if (!qc) { ata_sff_pio_task() 1386 poll_next = ata_sff_hsm_move(ap, qc, status, 1); ata_sff_pio_task() 1399 * @qc: command to issue to device 1410 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ata_sff_qc_issue() argument 1412 struct ata_port *ap = qc->ap; ata_sff_qc_issue() 1413 struct ata_link *link = qc->dev->link; ata_sff_qc_issue() 1419 qc->tf.flags |= ATA_TFLAG_POLLING; ata_sff_qc_issue() 1422 ata_dev_select(ap, qc->dev->devno, 1, 0); ata_sff_qc_issue() 1425 switch (qc->tf.protocol) { ata_sff_qc_issue() 1427 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_qc_issue() 1428 ata_qc_set_polling(qc); ata_sff_qc_issue() 1430 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1433 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_qc_issue() 1439 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_qc_issue() 1440 ata_qc_set_polling(qc); ata_sff_qc_issue() 1442 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1444 if (qc->tf.flags & ATA_TFLAG_WRITE) { ata_sff_qc_issue() 1456 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_qc_issue() 1469 if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_qc_issue() 1470 ata_qc_set_polling(qc); ata_sff_qc_issue() 1472 ata_tf_to_host(ap, &qc->tf); ata_sff_qc_issue() 1477 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || ata_sff_qc_issue() 1478 (qc->tf.flags & ATA_TFLAG_POLLING)) ata_sff_qc_issue() 1493 * @qc: qc to fill result TF for 1495 * @qc is finished and result TF needs to be filled. Fill it 1504 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) ata_sff_qc_fill_rtf() argument 1506 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); ata_sff_qc_fill_rtf() 1528 struct ata_queued_cmd *qc, __ata_sff_port_intr() 1534 ap->print_id, qc->tf.protocol, ap->hsm_task_state); __ata_sff_port_intr() 1545 * need to check ata_is_atapi(qc->tf.protocol) again. __ata_sff_port_intr() 1547 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) __ata_sff_port_intr() 1561 qc->err_mask |= AC_ERR_HSM; __ata_sff_port_intr() 1571 ata_sff_hsm_move(ap, qc, status, 0); __ata_sff_port_intr() 1579 * @qc: Taskfile currently active in engine 1589 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_sff_port_intr() argument 1591 return __ata_sff_port_intr(ap, qc, false); ata_sff_port_intr() 1611 struct ata_queued_cmd *qc; __ata_sff_interrupt() local 1613 qc = ata_qc_from_tag(ap, ap->link.active_tag); __ata_sff_interrupt() 1614 if (qc) { __ata_sff_interrupt() 1615 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) __ata_sff_interrupt() 1616 handled |= port_intr(ap, qc); __ata_sff_interrupt() 1703 struct ata_queued_cmd *qc; ata_sff_lost_interrupt() local 1706 qc = ata_qc_from_tag(ap, ap->link.active_tag); ata_sff_lost_interrupt() 1708 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_lost_interrupt() 1722 ata_sff_port_intr(ap, qc); ata_sff_lost_interrupt() 2155 * @qc: command 2163 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) ata_sff_drain_fifo() argument 2169 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) ata_sff_drain_fifo() 2172 ap = qc->ap; ata_sff_drain_fifo() 2201 struct ata_queued_cmd *qc; ata_sff_error_handler() local 2204 qc = __ata_qc_from_tag(ap, ap->link.active_tag); ata_sff_error_handler() 2205 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) ata_sff_error_handler() 2206 qc = NULL; ata_sff_error_handler() 2214 * qc in case anyone wants to do different PIO/DMA recovery or ata_sff_error_handler() 2218 ap->ops->sff_drain_fifo(qc); ata_sff_error_handler() 2623 * @qc: Metadata associated with taskfile to be transferred 2632 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) ata_bmdma_fill_sg() argument 2634 struct ata_port *ap = qc->ap; ata_bmdma_fill_sg() 2640 for_each_sg(qc->sg, sg, qc->n_elem, si) { ata_bmdma_fill_sg() 2672 * @qc: Metadata associated with taskfile to be transferred 2683 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) ata_bmdma_fill_sg_dumb() argument 2685 struct ata_port *ap = qc->ap; ata_bmdma_fill_sg_dumb() 2691 for_each_sg(qc->sg, sg, qc->n_elem, si) { ata_bmdma_fill_sg_dumb() 2732 * @qc: Metadata associated with taskfile to be prepared 2739 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) ata_bmdma_qc_prep() argument 2741 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) ata_bmdma_qc_prep() 2744 ata_bmdma_fill_sg(qc); ata_bmdma_qc_prep() 2750 * @qc: Metadata associated with taskfile to be prepared 2757 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) ata_bmdma_dumb_qc_prep() argument 2759 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) ata_bmdma_dumb_qc_prep() 2762 ata_bmdma_fill_sg_dumb(qc); ata_bmdma_dumb_qc_prep() 2768 * @qc: command to issue to device 2780 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) ata_bmdma_qc_issue() argument 2782 struct ata_port *ap = qc->ap; ata_bmdma_qc_issue() 2783 struct ata_link *link = qc->dev->link; ata_bmdma_qc_issue() 2786 if (!ata_is_dma(qc->tf.protocol)) ata_bmdma_qc_issue() 2787 return ata_sff_qc_issue(qc); ata_bmdma_qc_issue() 2790 ata_dev_select(ap, qc->dev->devno, 1, 0); ata_bmdma_qc_issue() 2793 switch (qc->tf.protocol) { ata_bmdma_qc_issue() 2795 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); ata_bmdma_qc_issue() 2797 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ata_bmdma_qc_issue() 2798 ap->ops->bmdma_setup(qc); /* set up bmdma */ ata_bmdma_qc_issue() 2799 ap->ops->bmdma_start(qc); /* initiate bmdma */ ata_bmdma_qc_issue() 2804 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); ata_bmdma_qc_issue() 2806 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ata_bmdma_qc_issue() 2807 ap->ops->bmdma_setup(qc); /* set up bmdma */ ata_bmdma_qc_issue() 2811 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ata_bmdma_qc_issue() 2827 * @qc: Taskfile currently active in engine 2837 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_bmdma_port_intr() argument 2844 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { ata_bmdma_port_intr() 2854 ap->ops->bmdma_stop(qc); ata_bmdma_port_intr() 2859 qc->err_mask |= AC_ERR_HOST_BUS; ata_bmdma_port_intr() 2864 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); ata_bmdma_port_intr() 2866 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) ata_bmdma_port_intr() 2907 struct ata_queued_cmd *qc; ata_bmdma_error_handler() local 2911 qc = __ata_qc_from_tag(ap, ap->link.active_tag); ata_bmdma_error_handler() 2912 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) ata_bmdma_error_handler() 2913 qc = NULL; ata_bmdma_error_handler() 2918 if (qc && ata_is_dma(qc->tf.protocol)) { ata_bmdma_error_handler() 2928 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { ata_bmdma_error_handler() 2929 qc->err_mask = AC_ERR_HOST_BUS; ata_bmdma_error_handler() 2933 ap->ops->bmdma_stop(qc); ata_bmdma_error_handler() 2954 * @qc: internal command to clean up 2959 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) ata_bmdma_post_internal_cmd() argument 2961 struct ata_port *ap = qc->ap; ata_bmdma_post_internal_cmd() 2964 if (ata_is_dma(qc->tf.protocol)) { ata_bmdma_post_internal_cmd() 2966 ap->ops->bmdma_stop(qc); ata_bmdma_post_internal_cmd() 2996 * @qc: Info associated with this ATA transaction. 3001 void ata_bmdma_setup(struct ata_queued_cmd *qc) ata_bmdma_setup() argument 3003 struct ata_port *ap = qc->ap; ata_bmdma_setup() 3004 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); ata_bmdma_setup() 3019 ap->ops->sff_exec_command(ap, &qc->tf); ata_bmdma_setup() 3025 * @qc: Info associated with this ATA transaction. 3030 void ata_bmdma_start(struct ata_queued_cmd *qc) ata_bmdma_start() argument 3032 struct ata_port *ap = qc->ap; ata_bmdma_start() 3058 * @qc: Command we are ending DMA for 3067 void ata_bmdma_stop(struct ata_queued_cmd *qc) ata_bmdma_stop() argument 3069 struct ata_port *ap = qc->ap; ata_bmdma_stop() 967 ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) ata_hsm_ok_in_wq() argument 1527 __ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc, bool hsmv_on_idle) __ata_sff_port_intr() argument
|
H A D | sata_sx4.c | 209 struct ata_queued_cmd *qc; member in struct:pdc_host_priv::__anon3354 221 static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 237 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc); 240 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 241 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); 453 static void pdc20621_dma_prep(struct ata_queued_cmd *qc) pdc20621_dma_prep() argument 456 struct ata_port *ap = qc->ap; pdc20621_dma_prep() 464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); pdc20621_dma_prep() 475 for_each_sg(qc->sg, sg, qc->n_elem, si) { pdc20621_dma_prep() 487 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); pdc20621_dma_prep() 490 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); pdc20621_dma_prep() 492 if (qc->tf.flags & ATA_TFLAG_LBA48) pdc20621_dma_prep() 493 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); pdc20621_dma_prep() 495 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc20621_dma_prep() 497 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); pdc20621_dma_prep() 514 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) pdc20621_nodata_prep() argument 516 struct ata_port *ap = qc->ap; pdc20621_nodata_prep() 528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); pdc20621_nodata_prep() 530 if (qc->tf.flags & ATA_TFLAG_LBA48) pdc20621_nodata_prep() 531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); pdc20621_nodata_prep() 533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pdc20621_nodata_prep() 535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); pdc20621_nodata_prep() 549 static void pdc20621_qc_prep(struct ata_queued_cmd *qc) pdc20621_qc_prep() argument 551 switch (qc->tf.protocol) { pdc20621_qc_prep() 553 pdc20621_dma_prep(qc); pdc20621_qc_prep() 556 pdc20621_nodata_prep(qc); pdc20621_qc_prep() 563 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, __pdc20621_push_hdma() argument 567 struct ata_port *ap = qc->ap; __pdc20621_push_hdma() 581 static void pdc20621_push_hdma(struct ata_queued_cmd *qc, pdc20621_push_hdma() argument 585 struct ata_port *ap = qc->ap; pdc20621_push_hdma() 590 __pdc20621_push_hdma(qc, seq, pkt_ofs); pdc20621_push_hdma() 595 pp->hdma[idx].qc = qc; pdc20621_push_hdma() 601 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) pdc20621_pop_hdma() argument 603 struct ata_port *ap = qc->ap; pdc20621_pop_hdma() 613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, pdc20621_pop_hdma() 619 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) pdc20621_dump_hdma() argument 621 struct ata_port *ap = qc->ap; pdc20621_dump_hdma() 634 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } pdc20621_dump_hdma() argument 637 static void pdc20621_packet_start(struct ata_queued_cmd *qc) pdc20621_packet_start() argument 639 struct ata_port *ap = qc->ap; pdc20621_packet_start() 643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); pdc20621_packet_start() 657 if (rw && qc->tf.protocol == ATA_PROT_DMA) { pdc20621_packet_start() 660 pdc20621_dump_hdma(qc); pdc20621_packet_start() 661 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); pdc20621_packet_start() 680 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) pdc20621_qc_issue() argument 682 switch (qc->tf.protocol) { pdc20621_qc_issue() 684 if (qc->tf.flags & ATA_TFLAG_POLLING) pdc20621_qc_issue() 688 pdc20621_packet_start(qc); pdc20621_qc_issue() 699 return ata_sff_qc_issue(qc); pdc20621_qc_issue() 703 struct ata_queued_cmd *qc, pdc20621_host_intr() 715 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ pdc20621_host_intr() 716 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { pdc20621_host_intr() 723 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc20621_host_intr() 724 ata_qc_complete(qc); pdc20621_host_intr() 725 pdc20621_pop_hdma(qc); pdc20621_host_intr() 735 pdc20621_dump_hdma(qc); pdc20621_host_intr() 736 pdc20621_push_hdma(qc, seq, pdc20621_host_intr() 741 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ pdc20621_host_intr() 762 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc20621_host_intr() 763 ata_qc_complete(qc); pdc20621_host_intr() 764 pdc20621_pop_hdma(qc); pdc20621_host_intr() 769 } else if (qc->tf.protocol == ATA_PROT_NODATA) { pdc20621_host_intr() 773 qc->err_mask |= ac_err_mask(status); pdc20621_host_intr() 774 ata_qc_complete(qc); pdc20621_host_intr() 835 struct ata_queued_cmd *qc; pdc20621_interrupt() local 837 qc = ata_qc_from_tag(ap, ap->link.active_tag); pdc20621_interrupt() 838 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) pdc20621_interrupt() 839 handled += pdc20621_host_intr(ap, qc, (i > 4), pdc20621_interrupt() 923 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) pdc_post_internal_cmd() argument 925 struct ata_port *ap = qc->ap; pdc_post_internal_cmd() 928 if (qc->flags & ATA_QCFLAG_FAILED) pdc_post_internal_cmd() 932 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) pdc_check_atapi_dma() argument 934 u8 *scsicmd = qc->scsicmd->cmnd; pdc_check_atapi_dma() 702 pdc20621_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc, unsigned int doing_hdma, void __iomem *mmio) pdc20621_host_intr() argument
|
H A D | sata_inic162x.c | 379 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); inic_host_intr() local 391 if (unlikely(!qc)) inic_host_intr() 402 qc->err_mask |= AC_ERR_DEV; inic_host_intr() 404 ata_qc_complete(qc); inic_host_intr() 410 qc ? qc->tf.command : 0xff, irq_stat, idma_stat); inic_host_intr() 439 static int inic_check_atapi_dma(struct ata_queued_cmd *qc) inic_check_atapi_dma() argument 447 if (atapi_cmd_type(qc->cdb[0]) == READ) inic_check_atapi_dma() 452 static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) inic_fill_sg() argument 458 if (qc->tf.flags & ATA_TFLAG_WRITE) inic_fill_sg() 461 if (ata_is_dma(qc->tf.protocol)) inic_fill_sg() 464 for_each_sg(qc->sg, sg, qc->n_elem, si) { inic_fill_sg() 475 static void inic_qc_prep(struct ata_queued_cmd *qc) inic_qc_prep() argument 477 struct inic_port_priv *pp = qc->ap->private_data; inic_qc_prep() 481 bool is_atapi = ata_is_atapi(qc->tf.protocol); inic_qc_prep() 482 bool is_data = ata_is_data(qc->tf.protocol); inic_qc_prep() 488 cdb_len = qc->dev->cdb_len; inic_qc_prep() 497 cpb->len = cpu_to_le32(qc->nbytes + cdb_len); inic_qc_prep() 500 cpb->device = qc->tf.device; inic_qc_prep() 501 cpb->feature = qc->tf.feature; inic_qc_prep() 502 cpb->nsect = qc->tf.nsect; inic_qc_prep() 503 cpb->lbal = qc->tf.lbal; inic_qc_prep() 504 cpb->lbam = qc->tf.lbam; inic_qc_prep() 505 cpb->lbah = qc->tf.lbah; inic_qc_prep() 507 if (qc->tf.flags & ATA_TFLAG_LBA48) { inic_qc_prep() 508 cpb->hob_feature = qc->tf.hob_feature; inic_qc_prep() 509 cpb->hob_nsect = qc->tf.hob_nsect; inic_qc_prep() 510 cpb->hob_lbal = qc->tf.hob_lbal; inic_qc_prep() 511 cpb->hob_lbam = qc->tf.hob_lbam; inic_qc_prep() 512 cpb->hob_lbah = qc->tf.hob_lbah; inic_qc_prep() 515 cpb->command = qc->tf.command; inic_qc_prep() 520 memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); inic_qc_prep() 532 inic_fill_sg(prd, qc); inic_qc_prep() 537 static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) inic_qc_issue() argument 539 struct ata_port *ap = qc->ap; inic_qc_issue() 563 static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) inic_qc_fill_rtf() argument 565 struct ata_taskfile *rtf = &qc->result_tf; inic_qc_fill_rtf() 575 inic_tf_read(qc->ap, &tf); inic_qc_fill_rtf() 666 static void inic_post_internal_cmd(struct ata_queued_cmd *qc) inic_post_internal_cmd() argument 669 if (qc->flags & ATA_QCFLAG_FAILED) inic_post_internal_cmd() 670 inic_reset_port(inic_port_base(qc->ap)); inic_post_internal_cmd()
|
H A D | sata_qstor.c | 119 static void qs_qc_prep(struct ata_queued_cmd *qc); 120 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); 121 static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 182 static int qs_check_atapi_dma(struct ata_queued_cmd *qc) qs_check_atapi_dma() argument 252 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) qs_fill_sg() argument 255 struct ata_port *ap = qc->ap; qs_fill_sg() 260 for_each_sg(qc->sg, sg, qc->n_elem, si) { qs_fill_sg() 279 static void qs_qc_prep(struct ata_queued_cmd *qc) qs_qc_prep() argument 281 struct qs_port_priv *pp = qc->ap->private_data; qs_qc_prep() 289 qs_enter_reg_mode(qc->ap); qs_qc_prep() 290 if (qc->tf.protocol != ATA_PROT_DMA) qs_qc_prep() 293 nelem = qs_fill_sg(qc); qs_qc_prep() 295 if ((qc->tf.flags & ATA_TFLAG_WRITE)) qs_qc_prep() 297 if ((qc->tf.flags & ATA_TFLAG_LBA48)) qs_qc_prep() 303 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes); qs_qc_prep() 313 ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); qs_qc_prep() 316 static inline void qs_packet_start(struct ata_queued_cmd *qc) qs_packet_start() argument 318 struct ata_port *ap = qc->ap; qs_packet_start() 329 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) qs_qc_issue() argument 331 struct qs_port_priv *pp = qc->ap->private_data; qs_qc_issue() 333 switch (qc->tf.protocol) { qs_qc_issue() 336 qs_packet_start(qc); qs_qc_issue() 348 return ata_sff_qc_issue(qc); qs_qc_issue() 351 static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) qs_do_or_die() argument 353 qc->err_mask |= ac_err_mask(status); qs_do_or_die() 355 if (!qc->err_mask) { qs_do_or_die() 356 ata_qc_complete(qc); qs_do_or_die() 358 struct ata_port *ap = qc->ap; qs_do_or_die() 364 if (qc->err_mask == AC_ERR_DEV) qs_do_or_die() 389 struct ata_queued_cmd *qc; qs_intr_pkt() local 396 qc = ata_qc_from_tag(ap, ap->link.active_tag); qs_intr_pkt() 397 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { qs_intr_pkt() 401 qs_enter_reg_mode(qc->ap); qs_intr_pkt() 402 qs_do_or_die(qc, sDST); qs_intr_pkt() 420 struct ata_queued_cmd *qc; qs_intr_mmio() local 422 qc = ata_qc_from_tag(ap, ap->link.active_tag); qs_intr_mmio() 423 if (!qc) { qs_intr_mmio() 440 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) qs_intr_mmio() 441 handled |= ata_sff_port_intr(ap, qc); qs_intr_mmio()
|
H A D | sata_promise.c | 158 static void pdc_qc_prep(struct ata_queued_cmd *qc); 161 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); 162 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc); 164 static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc); 174 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc); 501 static void pdc_atapi_pkt(struct ata_queued_cmd *qc) pdc_atapi_pkt() argument 503 struct ata_port *ap = qc->ap; pdc_atapi_pkt() 505 unsigned int cdb_len = qc->dev->cdb_len; pdc_atapi_pkt() 506 u8 *cdb = qc->cdb; pdc_atapi_pkt() 515 switch (qc->tf.protocol) { pdc_atapi_pkt() 517 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) pdc_atapi_pkt() 536 dev_sel = qc->tf.device; pdc_atapi_pkt() 544 buf[17] = qc->tf.nsect; pdc_atapi_pkt() 546 buf[19] = qc->tf.lbal; pdc_atapi_pkt() 549 if (qc->tf.protocol != ATAPI_PROT_DMA) pdc_atapi_pkt() 557 buf[23] = qc->tf.lbam; pdc_atapi_pkt() 559 buf[25] = qc->tf.lbah; pdc_atapi_pkt() 563 buf[27] = qc->tf.command; pdc_atapi_pkt() 579 * @qc: Metadata associated with taskfile to be transferred 589 static void pdc_fill_sg(struct ata_queued_cmd *qc) pdc_fill_sg() argument 591 struct ata_port *ap = qc->ap; pdc_fill_sg() 598 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) pdc_fill_sg() 602 for_each_sg(qc->sg, sg, qc->n_elem, si) { pdc_fill_sg() 652 static void pdc_qc_prep(struct ata_queued_cmd *qc) pdc_qc_prep() argument 654 struct pdc_port_priv *pp = qc->ap->private_data; pdc_qc_prep() 659 switch (qc->tf.protocol) { pdc_qc_prep() 661 pdc_fill_sg(qc); pdc_qc_prep() 664 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, pdc_qc_prep() 665 qc->dev->devno, pp->pkt); pdc_qc_prep() 666 if (qc->tf.flags & ATA_TFLAG_LBA48) pdc_qc_prep() 667 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); pdc_qc_prep() 669 i = pdc_prep_lba28(&qc->tf, pp->pkt, i); pdc_qc_prep() 670 pdc_pkt_footer(&qc->tf, pp->pkt, i); pdc_qc_prep() 673 pdc_fill_sg(qc); pdc_qc_prep() 676 pdc_fill_sg(qc); pdc_qc_prep() 679 pdc_atapi_pkt(qc); pdc_qc_prep() 848 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) pdc_post_internal_cmd() argument 850 struct ata_port *ap = qc->ap; pdc_post_internal_cmd() 853 if (qc->flags & ATA_QCFLAG_FAILED) pdc_post_internal_cmd() 857 static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc, pdc_error_intr() argument 884 qc->err_mask |= ac_err_mask; pdc_error_intr() 892 struct ata_queued_cmd *qc) pdc_host_intr() 905 pdc_error_intr(ap, qc, port_status, err_mask); pdc_host_intr() 909 switch (qc->tf.protocol) { pdc_host_intr() 914 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); pdc_host_intr() 915 ata_qc_complete(qc); pdc_host_intr() 1004 struct ata_queued_cmd *qc; pdc_interrupt() local 1006 qc = ata_qc_from_tag(ap, ap->link.active_tag); pdc_interrupt() 1007 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) pdc_interrupt() 1008 handled += pdc_host_intr(ap, qc); pdc_interrupt() 1019 static void pdc_packet_start(struct ata_queued_cmd *qc) pdc_packet_start() argument 1021 struct ata_port *ap = qc->ap; pdc_packet_start() 1039 static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) pdc_qc_issue() argument 1041 switch (qc->tf.protocol) { pdc_qc_issue() 1043 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) pdc_qc_issue() 1047 if (qc->tf.flags & ATA_TFLAG_POLLING) pdc_qc_issue() 1052 pdc_packet_start(qc); pdc_qc_issue() 1057 return ata_sff_qc_issue(qc); pdc_qc_issue() 1073 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) pdc_check_atapi_dma() argument 1075 u8 *scsicmd = qc->scsicmd->cmnd; pdc_check_atapi_dma() 1103 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc) pdc_old_sata_check_atapi_dma() argument 891 pdc_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) pdc_host_intr() argument
|
H A D | sata_nv.c | 315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); 330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc); 338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); 339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); 340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); 783 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) nv_adma_check_atapi_dma() argument 785 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_check_atapi_dma() 882 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); nv_host_intr() local 894 /* DEV interrupt w/ no active qc? */ nv_host_intr() 895 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { nv_host_intr() 901 return ata_bmdma_port_intr(ap, qc); nv_host_intr() 1113 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) nv_adma_post_internal_cmd() argument 1115 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_post_internal_cmd() 1118 ata_bmdma_post_internal_cmd(qc); nv_adma_post_internal_cmd() 1329 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, nv_adma_fill_aprd() argument 1335 if (qc->tf.flags & ATA_TFLAG_WRITE) nv_adma_fill_aprd() 1337 if (idx == qc->n_elem - 1) nv_adma_fill_aprd() 1348 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) nv_adma_fill_sg() argument 1350 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_fill_sg() 1357 for_each_sg(qc->sg, sg, qc->n_elem, si) { nv_adma_fill_sg() 1359 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; nv_adma_fill_sg() 1360 nv_adma_fill_aprd(qc, sg, si, aprd); nv_adma_fill_sg() 1363 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); nv_adma_fill_sg() 1368 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) nv_adma_use_reg_mode() argument 1370 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_use_reg_mode() 1375 (qc->tf.flags & ATA_TFLAG_POLLING)) nv_adma_use_reg_mode() 1378 if ((qc->flags & ATA_QCFLAG_DMAMAP) || nv_adma_use_reg_mode() 1379 (qc->tf.protocol == ATA_PROT_NODATA)) nv_adma_use_reg_mode() 1385 static void nv_adma_qc_prep(struct ata_queued_cmd *qc) nv_adma_qc_prep() argument 1387 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_qc_prep() 1388 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; nv_adma_qc_prep() 1392 if (nv_adma_use_reg_mode(qc)) { nv_adma_qc_prep() 1394 (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_qc_prep() 1395 nv_adma_register_mode(qc->ap); nv_adma_qc_prep() 1396 ata_bmdma_qc_prep(qc); nv_adma_qc_prep() 1406 cpb->tag = qc->tag; nv_adma_qc_prep() 1410 if (qc->tf.protocol == ATA_PROT_NCQ) nv_adma_qc_prep() 1413 VPRINTK("qc->flags = 0x%lx\n", qc->flags); nv_adma_qc_prep() 1415 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); nv_adma_qc_prep() 1417 if (qc->flags & ATA_QCFLAG_DMAMAP) { nv_adma_qc_prep() 1418 nv_adma_fill_sg(qc, cpb); nv_adma_qc_prep() 1431 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) nv_adma_qc_issue() argument 1433 struct nv_adma_port_priv *pp = qc->ap->private_data; nv_adma_qc_issue() 1435 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); nv_adma_qc_issue() 1442 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && nv_adma_qc_issue() 1443 (qc->flags & ATA_QCFLAG_RESULT_TF))) { nv_adma_qc_issue() 1444 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); nv_adma_qc_issue() 1448 if (nv_adma_use_reg_mode(qc)) { nv_adma_qc_issue() 1450 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); nv_adma_qc_issue() 1452 (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_qc_issue() 1453 nv_adma_register_mode(qc->ap); nv_adma_qc_issue() 1454 return ata_bmdma_qc_issue(qc); nv_adma_qc_issue() 1456 nv_adma_mode(qc->ap); nv_adma_qc_issue() 1469 writew(qc->tag, mmio + NV_ADMA_APPEND); nv_adma_qc_issue() 1471 DPRINTK("Issued tag %u\n", qc->tag); nv_adma_qc_issue() 1487 struct ata_queued_cmd *qc; nv_generic_interrupt() local 1489 qc = ata_qc_from_tag(ap, ap->link.active_tag); nv_generic_interrupt() 1490 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { nv_generic_interrupt() 1491 handled += ata_bmdma_port_intr(ap, qc); nv_generic_interrupt() 1726 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) nv_swncq_qc_to_dq() argument 1733 dq->defer_bits |= (1 << qc->tag); nv_swncq_qc_to_dq() 1734 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; nv_swncq_qc_to_dq() 1786 struct ata_queued_cmd qc; __ata_bmdma_stop() local 1788 qc.ap = ap; __ata_bmdma_stop() 1789 ata_bmdma_stop(&qc); __ata_bmdma_stop() 1992 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) nv_swncq_qc_prep() argument 1994 if (qc->tf.protocol != ATA_PROT_NCQ) { nv_swncq_qc_prep() 1995 ata_bmdma_qc_prep(qc); nv_swncq_qc_prep() 1999 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) nv_swncq_qc_prep() 2002 nv_swncq_fill_sg(qc); nv_swncq_qc_prep() 2005 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) nv_swncq_fill_sg() argument 2007 struct ata_port *ap = qc->ap; nv_swncq_fill_sg() 2013 prd = pp->prd + ATA_MAX_PRD * qc->tag; nv_swncq_fill_sg() 2016 for_each_sg(qc->sg, sg, qc->n_elem, si) { nv_swncq_fill_sg() 2042 struct ata_queued_cmd *qc) nv_swncq_issue_atacmd() 2046 if (qc == NULL) nv_swncq_issue_atacmd() 2051 writel((1 << qc->tag), pp->sactive_block); nv_swncq_issue_atacmd() 2052 pp->last_issue_tag = qc->tag; nv_swncq_issue_atacmd() 2053 pp->dhfis_bits &= ~(1 << qc->tag); nv_swncq_issue_atacmd() 2054 pp->dmafis_bits &= ~(1 << qc->tag); nv_swncq_issue_atacmd() 2055 pp->qc_active |= (0x1 << qc->tag); nv_swncq_issue_atacmd() 2057 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ nv_swncq_issue_atacmd() 2058 ap->ops->sff_exec_command(ap, &qc->tf); nv_swncq_issue_atacmd() 2060 DPRINTK("Issued tag %u\n", qc->tag); nv_swncq_issue_atacmd() 2065 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) nv_swncq_qc_issue() argument 2067 struct ata_port *ap = qc->ap; nv_swncq_qc_issue() 2070 if (qc->tf.protocol != ATA_PROT_NCQ) nv_swncq_qc_issue() 2071 return ata_bmdma_qc_issue(qc); nv_swncq_qc_issue() 2076 nv_swncq_issue_atacmd(ap, qc); nv_swncq_qc_issue() 2078 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ nv_swncq_qc_issue() 2110 struct ata_queued_cmd *qc; nv_swncq_sdbfis() local 2166 qc = ata_qc_from_tag(ap, pp->last_issue_tag); nv_swncq_sdbfis() 2167 nv_swncq_issue_atacmd(ap, qc); nv_swncq_sdbfis() 2173 qc = nv_swncq_qc_from_dq(ap); nv_swncq_sdbfis() 2174 WARN_ON(qc == NULL); nv_swncq_sdbfis() 2175 nv_swncq_issue_atacmd(ap, qc); nv_swncq_sdbfis() 2192 struct ata_queued_cmd *qc; nv_swncq_dmafis() local 2202 qc = ata_qc_from_tag(ap, tag); nv_swncq_dmafis() 2204 if (unlikely(!qc)) nv_swncq_dmafis() 2207 rw = qc->tf.flags & ATA_TFLAG_WRITE; nv_swncq_dmafis() 2210 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, nv_swncq_dmafis() 2225 struct ata_queued_cmd *qc; nv_swncq_host_interrupt() local 2298 qc = nv_swncq_qc_from_dq(ap); nv_swncq_host_interrupt() 2299 nv_swncq_issue_atacmd(ap, qc); nv_swncq_host_interrupt() 2041 nv_swncq_issue_atacmd(struct ata_port *ap, struct ata_queued_cmd *qc) nv_swncq_issue_atacmd() argument
|
H A D | acard-ahci.c | 75 static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); 76 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 229 static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) acard_ahci_fill_sg() argument 240 for_each_sg(qc->sg, sg, qc->n_elem, si) { acard_ahci_fill_sg() 260 static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) acard_ahci_qc_prep() argument 262 struct ata_port *ap = qc->ap; acard_ahci_qc_prep() 264 int is_atapi = ata_is_atapi(qc->tf.protocol); acard_ahci_qc_prep() 274 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; acard_ahci_qc_prep() 276 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); acard_ahci_qc_prep() 279 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); acard_ahci_qc_prep() 283 if (qc->flags & ATA_QCFLAG_DMAMAP) acard_ahci_qc_prep() 284 n_elem = acard_ahci_fill_sg(qc, cmd_tbl); acard_ahci_qc_prep() 291 opts = cmd_fis_len | (qc->dev->link->pmp << 12); acard_ahci_qc_prep() 292 if (qc->tf.flags & ATA_TFLAG_WRITE) acard_ahci_qc_prep() 297 ahci_fill_cmd_slot(pp, qc->tag, opts); acard_ahci_qc_prep() 300 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) acard_ahci_qc_fill_rtf() argument 302 struct ahci_port_priv *pp = qc->ap->private_data; acard_ahci_qc_fill_rtf() 306 rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ; acard_ahci_qc_fill_rtf() 314 if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && acard_ahci_qc_fill_rtf() 315 !(qc->flags & ATA_QCFLAG_FAILED)) { acard_ahci_qc_fill_rtf() 316 ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); acard_ahci_qc_fill_rtf() 317 qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; acard_ahci_qc_fill_rtf() 319 ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); acard_ahci_qc_fill_rtf()
|
H A D | pata_ns87415.c | 114 * @qc: Command block 120 static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) ns87415_bmdma_setup() argument 122 struct ata_port *ap = qc->ap; ns87415_bmdma_setup() 123 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); ns87415_bmdma_setup() 140 ap->ops->sff_exec_command(ap, &qc->tf); ns87415_bmdma_setup() 145 * @qc: Command block 154 static void ns87415_bmdma_start(struct ata_queued_cmd *qc) ns87415_bmdma_start() argument 156 ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode); ns87415_bmdma_start() 157 ata_bmdma_start(qc); ns87415_bmdma_start() 162 * @qc: Command block 167 static void ns87415_bmdma_stop(struct ata_queued_cmd *qc) ns87415_bmdma_stop() argument 169 ata_bmdma_stop(qc); ns87415_bmdma_stop() 170 ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode); ns87415_bmdma_stop() 193 * @qc: Command block 199 static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc) ns87415_check_atapi_dma() argument
|
H A D | pata_pxa.c | 60 static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc) pxa_load_dmac() argument 62 struct pata_pxa_data *pd = qc->ap->private_data; pxa_load_dmac() 79 if (qc->tf.flags & ATA_TFLAG_WRITE) { pxa_load_dmac() 105 static void pxa_qc_prep(struct ata_queued_cmd *qc) pxa_qc_prep() argument 107 struct pata_pxa_data *pd = qc->ap->private_data; pxa_qc_prep() 111 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) pxa_qc_prep() 119 for_each_sg(qc->sg, sg, qc->n_elem, si) pxa_qc_prep() 120 pxa_load_dmac(sg, qc); pxa_qc_prep() 136 static void pxa_bmdma_setup(struct ata_queued_cmd *qc) pxa_bmdma_setup() argument 138 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); pxa_bmdma_setup() 144 static void pxa_bmdma_start(struct ata_queued_cmd *qc) pxa_bmdma_start() argument 146 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_start() 154 static void pxa_bmdma_stop(struct ata_queued_cmd *qc) pxa_bmdma_stop() argument 156 struct pata_pxa_data *pd = qc->ap->private_data; pxa_bmdma_stop() 160 dev_err(qc->ap->dev, "Timeout waiting for DMA completion!"); pxa_bmdma_stop() 191 static int pxa_check_atapi_dma(struct ata_queued_cmd *qc) pxa_check_atapi_dma() argument
|
H A D | pata_pdc202xx_old.c | 167 * @qc: ATA command 176 static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) pdc2026x_bmdma_start() argument 178 struct ata_port *ap = qc->ap; pdc2026x_bmdma_start() 179 struct ata_device *adev = qc->dev; pdc2026x_bmdma_start() 180 struct ata_taskfile *tf = &qc->tf; pdc2026x_bmdma_start() 197 pdc202xx_set_dmamode(ap, qc->dev); pdc2026x_bmdma_start() 201 len = qc->nbytes / 2; pdc2026x_bmdma_start() 212 ata_bmdma_start(qc); pdc2026x_bmdma_start() 217 * @qc: ATA command 226 static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) pdc2026x_bmdma_stop() argument 228 struct ata_port *ap = qc->ap; pdc2026x_bmdma_stop() 229 struct ata_device *adev = qc->dev; pdc2026x_bmdma_stop() 230 struct ata_taskfile *tf = &qc->tf; pdc2026x_bmdma_stop() 246 ata_bmdma_stop(qc); pdc2026x_bmdma_stop() 277 * @qc: Metadata associated with taskfile to check 288 static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc) pdc2026x_check_atapi_dma() argument
|
H A D | sata_sil.c | 122 static void sil_qc_prep(struct ata_queued_cmd *qc); 123 static void sil_bmdma_setup(struct ata_queued_cmd *qc); 124 static void sil_bmdma_start(struct ata_queued_cmd *qc); 125 static void sil_bmdma_stop(struct ata_queued_cmd *qc); 267 static void sil_bmdma_stop(struct ata_queued_cmd *qc) sil_bmdma_stop() argument 269 struct ata_port *ap = qc->ap; sil_bmdma_stop() 280 static void sil_bmdma_setup(struct ata_queued_cmd *qc) sil_bmdma_setup() argument 282 struct ata_port *ap = qc->ap; sil_bmdma_setup() 289 ap->ops->sff_exec_command(ap, &qc->tf); sil_bmdma_setup() 292 static void sil_bmdma_start(struct ata_queued_cmd *qc) sil_bmdma_start() argument 294 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); sil_bmdma_start() 295 struct ata_port *ap = qc->ap; sil_bmdma_start() 309 static void sil_fill_sg(struct ata_queued_cmd *qc) sil_fill_sg() argument 312 struct ata_port *ap = qc->ap; sil_fill_sg() 317 for_each_sg(qc->sg, sg, qc->n_elem, si) { sil_fill_sg() 336 static void sil_qc_prep(struct ata_queued_cmd *qc) sil_qc_prep() argument 338 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) sil_qc_prep() 341 sil_fill_sg(qc); sil_qc_prep() 437 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); sil_host_intr() local 462 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { sil_host_intr() 477 * need to check ata_is_atapi(qc->tf.protocol) again. sil_host_intr() 479 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) sil_host_intr() 483 if (ata_is_dma(qc->tf.protocol)) { sil_host_intr() 485 ap->ops->bmdma_stop(qc); sil_host_intr() 488 qc->err_mask |= AC_ERR_HOST_BUS; sil_host_intr() 508 ata_sff_hsm_move(ap, qc, status, 0); sil_host_intr() 510 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) sil_host_intr() 516 qc->err_mask |= AC_ERR_HSM; sil_host_intr()
|
H A D | sata_sil24.c | 338 static int sil24_qc_defer(struct ata_queued_cmd *qc); 339 static void sil24_qc_prep(struct ata_queued_cmd *qc); 340 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 341 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); 353 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 779 static inline void sil24_fill_sg(struct ata_queued_cmd *qc, sil24_fill_sg() argument 786 for_each_sg(qc->sg, sg, qc->n_elem, si) { sil24_fill_sg() 798 static int sil24_qc_defer(struct ata_queued_cmd *qc) sil24_qc_defer() argument 800 struct ata_link *link = qc->dev->link; sil24_qc_defer() 802 u8 prot = qc->tf.protocol; sil24_qc_defer() 824 (qc->flags & ATA_QCFLAG_RESULT_TF)); sil24_qc_defer() 830 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; sil24_qc_defer() 837 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; sil24_qc_defer() 840 return ata_std_qc_defer(qc); sil24_qc_defer() 843 static void sil24_qc_prep(struct ata_queued_cmd *qc) sil24_qc_prep() argument 845 struct ata_port *ap = qc->ap; sil24_qc_prep() 852 cb = &pp->cmd_block[sil24_tag(qc->tag)]; sil24_qc_prep() 854 if (!ata_is_atapi(qc->tf.protocol)) { sil24_qc_prep() 857 if (ata_is_data(qc->tf.protocol)) { sil24_qc_prep() 860 if (ata_is_ncq(qc->tf.protocol)) sil24_qc_prep() 862 if (qc->tf.flags & ATA_TFLAG_WRITE) sil24_qc_prep() 872 memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); sil24_qc_prep() 874 if (ata_is_data(qc->tf.protocol)) { sil24_qc_prep() 875 if (qc->tf.flags & ATA_TFLAG_WRITE) sil24_qc_prep() 883 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis); sil24_qc_prep() 885 if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_qc_prep() 886 sil24_fill_sg(qc, sge); sil24_qc_prep() 889 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) sil24_qc_issue() argument 891 struct ata_port *ap = qc->ap; sil24_qc_issue() 894 unsigned int tag = sil24_tag(qc->tag); sil24_qc_issue() 912 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) sil24_qc_fill_rtf() argument 914 sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); sil24_qc_fill_rtf() 982 struct ata_queued_cmd *qc = NULL; sil24_error_intr() local 1041 /* find out the offending link and qc */ sil24_error_intr() 1049 qc = ata_qc_from_tag(ap, link->active_tag); sil24_error_intr() 1060 qc = ata_qc_from_tag(ap, link->active_tag); sil24_error_intr() 1082 if (qc) sil24_error_intr() 1083 qc->err_mask |= err_mask; sil24_error_intr() 1098 if (qc) sil24_error_intr() 1099 ata_link_abort(qc->dev->link); sil24_error_intr() 1191 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc) sil24_post_internal_cmd() argument 1193 struct ata_port *ap = qc->ap; sil24_post_internal_cmd() 1196 if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap)) sil24_post_internal_cmd()
|
H A D | pata_sl82c105.c | 151 * @qc: ATA command 160 static void sl82c105_bmdma_start(struct ata_queued_cmd *qc) sl82c105_bmdma_start() argument 162 struct ata_port *ap = qc->ap; sl82c105_bmdma_start() 169 sl82c105_configure_dmamode(ap, qc->dev); sl82c105_bmdma_start() 171 ata_bmdma_start(qc); sl82c105_bmdma_start() 176 * @qc: ATA command 189 static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc) sl82c105_bmdma_stop() argument 191 struct ata_port *ap = qc->ap; sl82c105_bmdma_stop() 193 ata_bmdma_stop(qc); sl82c105_bmdma_stop() 199 sl82c105_set_piomode(ap, qc->dev); sl82c105_bmdma_stop() 204 * @qc: command 212 static int sl82c105_qc_defer(struct ata_queued_cmd *qc) sl82c105_qc_defer() argument 214 struct ata_host *host = qc->ap->host; sl82c105_qc_defer() 215 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; sl82c105_qc_defer() 219 rc = ata_std_qc_defer(qc); sl82c105_qc_defer()
|
H A D | pata_triflex.c | 145 * @qc: Command in progress 154 static void triflex_bmdma_start(struct ata_queued_cmd *qc) triflex_bmdma_start() argument 156 triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode); triflex_bmdma_start() 157 ata_bmdma_start(qc); triflex_bmdma_start() 170 static void triflex_bmdma_stop(struct ata_queued_cmd *qc) triflex_bmdma_stop() argument 172 ata_bmdma_stop(qc); triflex_bmdma_stop() 173 triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode); triflex_bmdma_stop()
|
H A D | libata-scsi.c | 62 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 756 struct ata_queued_cmd *qc; ata_scsi_qc_new() local 758 qc = ata_qc_new_init(dev, cmd->request->tag); ata_scsi_qc_new() 759 if (qc) { ata_scsi_qc_new() 760 qc->scsicmd = cmd; ata_scsi_qc_new() 761 qc->scsidone = cmd->scsi_done; ata_scsi_qc_new() 763 qc->sg = scsi_sglist(cmd); ata_scsi_qc_new() 764 qc->n_elem = scsi_sg_count(cmd); ata_scsi_qc_new() 770 return qc; ata_scsi_qc_new() 773 static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) ata_qc_set_pc_nbytes() argument 775 struct scsi_cmnd *scmd = qc->scsicmd; ata_qc_set_pc_nbytes() 777 qc->extrabytes = scmd->request->extra_len; ata_qc_set_pc_nbytes() 778 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; ata_qc_set_pc_nbytes() 961 * @qc: Command that completed. 976 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) ata_gen_passthru_sense() argument 978 struct scsi_cmnd *cmd = qc->scsicmd; ata_gen_passthru_sense() 979 struct ata_taskfile *tf = &qc->result_tf; ata_gen_passthru_sense() 982 int verbose = qc->ap->ops->error_handler == NULL; ata_gen_passthru_sense() 992 if (qc->err_mask || ata_gen_passthru_sense() 994 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, ata_gen_passthru_sense() 1041 * @qc: Command that we are erroring out 1043 * Generate sense block for a failed ATA command @qc. Descriptor 1049 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) ata_gen_ata_sense() argument 1051 struct ata_device *dev = qc->dev; ata_gen_ata_sense() 1052 struct scsi_cmnd *cmd = qc->scsicmd; ata_gen_ata_sense() 1053 struct ata_taskfile *tf = &qc->result_tf; ata_gen_ata_sense() 1056 int verbose = qc->ap->ops->error_handler == NULL; ata_gen_ata_sense() 1069 if (qc->err_mask || ata_gen_ata_sense() 1071 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, ata_gen_ata_sense() 1076 block = ata_tf_read_block(&qc->result_tf, dev); ata_gen_ata_sense() 1100 * it needs to see every deferred qc. Set dev_blocked to 1 to ata_scsi_sdev_config() 1328 * @qc: Storage for translated ATA taskfile 1341 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) ata_scsi_start_stop_xlat() argument 1343 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_start_stop_xlat() 1344 struct ata_taskfile *tf = &qc->tf; ata_scsi_start_stop_xlat() 1363 if (qc->dev->flags & ATA_DFLAG_LBA) { ata_scsi_start_stop_xlat() 1382 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && ata_scsi_start_stop_xlat() 1386 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && ata_scsi_start_stop_xlat() 1415 * @qc: Storage for translated ATA taskfile 1426 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) ata_scsi_flush_xlat() argument 1428 struct ata_taskfile *tf = &qc->tf; ata_scsi_flush_xlat() 1433 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) ata_scsi_flush_xlat() 1439 qc->flags |= ATA_QCFLAG_IO; ata_scsi_flush_xlat() 1537 * @qc: Storage for translated ATA taskfile 1547 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) ata_scsi_verify_xlat() argument 1549 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_verify_xlat() 1550 struct ata_taskfile *tf = &qc->tf; ata_scsi_verify_xlat() 1551 struct ata_device *dev = qc->dev; ata_scsi_verify_xlat() 1552 u64 dev_sectors = qc->dev->n_sectors; ata_scsi_verify_xlat() 1659 * @qc: Storage for translated ATA taskfile 1675 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) ata_scsi_rw_xlat() argument 1677 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_rw_xlat() 1733 qc->flags |= ATA_QCFLAG_IO; ata_scsi_rw_xlat() 1734 qc->nbytes = n_block * scmd->device->sector_size; ata_scsi_rw_xlat() 1736 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, ata_scsi_rw_xlat() 1737 qc->tag); ata_scsi_rw_xlat() 1759 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) ata_scsi_qc_complete() argument 1761 struct ata_port *ap = qc->ap; ata_scsi_qc_complete() 1762 struct scsi_cmnd *cmd = qc->scsicmd; ata_scsi_qc_complete() 1764 int need_sense = (qc->err_mask != 0); ata_scsi_qc_complete() 1777 ata_gen_passthru_sense(qc); ata_scsi_qc_complete() 1788 ata_gen_ata_sense(qc); ata_scsi_qc_complete() 1793 ata_dump_status(ap->print_id, &qc->result_tf); ata_scsi_qc_complete() 1795 qc->scsidone(cmd); ata_scsi_qc_complete() 1797 ata_qc_free(qc); ata_scsi_qc_complete() 1830 struct ata_queued_cmd *qc; ata_scsi_translate() local 1835 qc = ata_scsi_qc_new(dev, cmd); ata_scsi_translate() 1836 if (!qc) ata_scsi_translate() 1847 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); ata_scsi_translate() 1849 qc->dma_dir = cmd->sc_data_direction; ata_scsi_translate() 1852 qc->complete_fn = ata_scsi_qc_complete; ata_scsi_translate() 1854 if (xlat_func(qc)) ata_scsi_translate() 1858 if ((rc = ap->ops->qc_defer(qc))) ata_scsi_translate() 1863 ata_qc_issue(qc); ata_scsi_translate() 1869 ata_qc_free(qc); ata_scsi_translate() 1875 ata_qc_free(qc); ata_scsi_translate() 1883 ata_qc_free(qc); ata_scsi_translate() 2585 static void atapi_sense_complete(struct ata_queued_cmd *qc) atapi_sense_complete() argument 2587 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { atapi_sense_complete() 2593 ata_gen_passthru_sense(qc); atapi_sense_complete() 2596 qc->scsidone(qc->scsicmd); atapi_sense_complete() 2597 ata_qc_free(qc); atapi_sense_complete() 2606 static void atapi_request_sense(struct ata_queued_cmd *qc) atapi_request_sense() argument 2608 struct ata_port *ap = qc->ap; atapi_request_sense() 2609 struct scsi_cmnd *cmd = qc->scsicmd; atapi_request_sense() 2617 ap->ops->sff_tf_read(ap, &qc->tf); atapi_request_sense() 2622 cmd->sense_buffer[2] = qc->tf.feature >> 4; atapi_request_sense() 2624 ata_qc_reinit(qc); atapi_request_sense() 2627 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); atapi_request_sense() 2628 ata_sg_init(qc, &qc->sgent, 1); atapi_request_sense() 2629 qc->dma_dir = DMA_FROM_DEVICE; atapi_request_sense() 2631 memset(&qc->cdb, 0, qc->dev->cdb_len); atapi_request_sense() 2632 qc->cdb[0] = REQUEST_SENSE; atapi_request_sense() 2633 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; atapi_request_sense() 2635 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; atapi_request_sense() 2636 qc->tf.command = ATA_CMD_PACKET; atapi_request_sense() 2639 qc->tf.protocol = ATAPI_PROT_DMA; atapi_request_sense() 2640 qc->tf.feature |= ATAPI_PKT_DMA; atapi_request_sense() 2642 qc->tf.protocol = ATAPI_PROT_PIO; atapi_request_sense() 2643 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; atapi_request_sense() 2644 qc->tf.lbah = 0; atapi_request_sense() 2646 qc->nbytes = SCSI_SENSE_BUFFERSIZE; atapi_request_sense() 2648 qc->complete_fn = atapi_sense_complete; atapi_request_sense() 2650 ata_qc_issue(qc); atapi_request_sense() 2655 static void atapi_qc_complete(struct ata_queued_cmd *qc) atapi_qc_complete() argument 2657 struct scsi_cmnd *cmd = qc->scsicmd; atapi_qc_complete() 2658 unsigned int err_mask = qc->err_mask; atapi_qc_complete() 2663 if (unlikely(qc->ap->ops->error_handler && atapi_qc_complete() 2664 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { atapi_qc_complete() 2666 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { atapi_qc_complete() 2672 ata_gen_passthru_sense(qc); atapi_qc_complete() 2686 * sure qc->dev->sdev isn't NULL before dereferencing. atapi_qc_complete() 2688 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) atapi_qc_complete() 2689 qc->dev->sdev->locked = 0; atapi_qc_complete() 2691 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; atapi_qc_complete() 2692 qc->scsidone(cmd); atapi_qc_complete() 2693 ata_qc_free(qc); atapi_qc_complete() 2700 atapi_request_sense(qc); atapi_qc_complete() 2708 ata_gen_passthru_sense(qc); atapi_qc_complete() 2737 qc->scsidone(cmd); atapi_qc_complete() 2738 ata_qc_free(qc); atapi_qc_complete() 2742 * @qc: command structure to be initialized 2750 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) atapi_xlat() argument 2752 struct scsi_cmnd *scmd = qc->scsicmd; atapi_xlat() 2753 struct ata_device *dev = qc->dev; atapi_xlat() 2758 memset(qc->cdb, 0, dev->cdb_len); atapi_xlat() 2759 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); atapi_xlat() 2761 qc->complete_fn = atapi_qc_complete; atapi_xlat() 2763 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; atapi_xlat() 2765 qc->tf.flags |= ATA_TFLAG_WRITE; atapi_xlat() 2769 qc->tf.command = ATA_CMD_PACKET; atapi_xlat() 2770 ata_qc_set_pc_nbytes(qc); atapi_xlat() 2773 if (!nodata && !using_pio && atapi_check_dma(qc)) atapi_xlat() 2781 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024); atapi_xlat() 2810 qc->tf.lbam = (nbytes & 0xFF); atapi_xlat() 2811 qc->tf.lbah = (nbytes >> 8); atapi_xlat() 2814 qc->tf.protocol = ATAPI_PROT_NODATA; atapi_xlat() 2816 qc->tf.protocol = ATAPI_PROT_PIO; atapi_xlat() 2819 qc->tf.protocol = ATAPI_PROT_DMA; atapi_xlat() 2820 qc->tf.feature |= ATAPI_PKT_DMA; atapi_xlat() 2825 qc->tf.feature |= ATAPI_DMADIR; atapi_xlat() 2932 * @qc: command structure to be initialized 2939 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) ata_scsi_pass_thru() argument 2941 struct ata_taskfile *tf = &(qc->tf); ata_scsi_pass_thru() 2942 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_pass_thru() 2943 struct ata_device *dev = qc->dev; ata_scsi_pass_thru() 3006 qc->sect_size = scsi_bufflen(scmd); ata_scsi_pass_thru() 3040 qc->sect_size = scmd->device->sector_size; ata_scsi_pass_thru() 3045 qc->sect_size = ATA_SECT_SIZE; ata_scsi_pass_thru() 3057 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; ata_scsi_pass_thru() 3065 ata_qc_set_pc_nbytes(qc); ata_scsi_pass_thru() 3123 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) ata_scsi_write_same_xlat() argument 3125 struct ata_taskfile *tf = &qc->tf; ata_scsi_write_same_xlat() 3126 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_write_same_xlat() 3127 struct ata_device *dev = qc->dev; ata_scsi_write_same_xlat() 3161 tf->nsect = qc->tag << 3; ata_scsi_write_same_xlat() 3178 ata_qc_set_pc_nbytes(qc); ata_scsi_write_same_xlat() 3190 * @qc: Storage for translated ATA taskfile 3199 static int ata_mselect_caching(struct ata_queued_cmd *qc, ata_mselect_caching() argument 3202 struct ata_taskfile *tf = &qc->tf; ata_mselect_caching() 3203 struct ata_device *dev = qc->dev; ata_mselect_caching() 3236 * @qc: Storage for translated ATA taskfile 3245 static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) ata_scsi_mode_select_xlat() argument 3247 struct scsi_cmnd *scmd = qc->scsicmd; ata_scsi_mode_select_xlat() 3333 if (ata_mselect_caching(qc, p, pg_len) < 0) ata_scsi_mode_select_xlat() 3685 * callback and it needs to see every deferred qc. ata_scsi_add_hosts()
|
H A D | libata-eh.c | 508 * the qc for @cmd. If the qc is already gone, we lose and let 509 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 527 struct ata_queued_cmd *qc; ata_scsi_timed_out() local 539 qc = ata_qc_from_tag(ap, ap->link.active_tag); ata_scsi_timed_out() 540 if (qc) { ata_scsi_timed_out() 541 WARN_ON(qc->scsicmd != cmd); ata_scsi_timed_out() 542 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; ata_scsi_timed_out() 543 qc->err_mask |= AC_ERR_TIMEOUT; ata_scsi_timed_out() 638 * completion wins, the qc never reaches EH. When error ata_scsi_cmd_error_handler() 639 * completion wins, the qc has ATA_QCFLAG_FAILED set. ata_scsi_cmd_error_handler() 645 * timed out iff its associated qc is active and not failed. ata_scsi_cmd_error_handler() 666 struct ata_queued_cmd *qc; list_for_each_entry_safe() local 669 qc = __ata_qc_from_tag(ap, i); list_for_each_entry_safe() 670 if (qc->flags & ATA_QCFLAG_ACTIVE && list_for_each_entry_safe() 671 qc->scsicmd == scmd) list_for_each_entry_safe() 676 /* the scmd has an associated qc */ list_for_each_entry_safe() 677 if (!(qc->flags & ATA_QCFLAG_FAILED)) { list_for_each_entry_safe() 679 qc->err_mask |= AC_ERR_TIMEOUT; list_for_each_entry_safe() 680 qc->flags |= ATA_QCFLAG_FAILED; list_for_each_entry_safe() 904 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); ata_eh_fastdrain_timerfn() local 905 if (qc) ata_eh_fastdrain_timerfn() 906 qc->err_mask |= AC_ERR_TIMEOUT; ata_eh_fastdrain_timerfn() 960 * ata_qc_schedule_eh - schedule qc for error handling 961 * @qc: command to schedule error handling for 963 * Schedule error handling for @qc. EH will kick in as soon as 969 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) ata_qc_schedule_eh() argument 971 struct ata_port *ap = qc->ap; ata_qc_schedule_eh() 972 struct request_queue *q = qc->scsicmd->device->request_queue; ata_qc_schedule_eh() 977 qc->flags |= ATA_QCFLAG_FAILED; ata_qc_schedule_eh() 986 blk_abort_request(qc->scsicmd->request); ata_qc_schedule_eh() 1033 * ata_port_schedule_eh - schedule error handling without a qc 1058 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); ata_do_link_abort() local 1060 if (qc && (!link || qc->dev->link == link)) { ata_do_link_abort() 1061 qc->flags |= ATA_QCFLAG_FAILED; ata_do_link_abort() 1062 ata_qc_complete(qc); ata_do_link_abort() 1074 * ata_link_abort - abort all qc's on the link 1075 * @link: ATA link to abort qc's for 1077 * Abort all active qc's active on @link and schedule EH. 1083 * Number of aborted qc's. 1091 * ata_port_abort - abort all qc's on the port 1092 * @ap: ATA port to abort qc's for 1094 * Abort all active qc's of @ap and schedule EH. 1100 * Number of aborted qc's. 1290 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) __ata_eh_qc_complete() argument 1292 struct ata_port *ap = qc->ap; __ata_eh_qc_complete() 1293 struct scsi_cmnd *scmd = qc->scsicmd; __ata_eh_qc_complete() 1297 qc->scsidone = ata_eh_scsidone; __ata_eh_qc_complete() 1298 __ata_qc_complete(qc); __ata_eh_qc_complete() 1299 WARN_ON(ata_tag_valid(qc->tag)); __ata_eh_qc_complete() 1307 * @qc: Command to complete 1312 void ata_eh_qc_complete(struct ata_queued_cmd *qc) ata_eh_qc_complete() argument 1314 struct scsi_cmnd *scmd = qc->scsicmd; ata_eh_qc_complete() 1316 __ata_eh_qc_complete(qc); ata_eh_qc_complete() 1321 * @qc: Command to retry 1328 * due to unrelated failures (qc->err_mask is zero). 1330 void ata_eh_qc_retry(struct ata_queued_cmd *qc) ata_eh_qc_retry() argument 1332 struct scsi_cmnd *scmd = qc->scsicmd; ata_eh_qc_retry() 1333 if (!qc->err_mask) ata_eh_qc_retry() 1335 __ata_eh_qc_complete(qc); ata_eh_qc_retry() 1736 * Read log page 10h, determine the offending qc and acquire 1749 struct ata_queued_cmd *qc; ata_eh_analyze_ncq_error() local 1763 qc = __ata_qc_from_tag(ap, tag); ata_eh_analyze_ncq_error() 1765 if (!(qc->flags & ATA_QCFLAG_FAILED)) ata_eh_analyze_ncq_error() 1768 if (qc->err_mask) ata_eh_analyze_ncq_error() 1788 qc = __ata_qc_from_tag(ap, tag); ata_eh_analyze_ncq_error() 1789 memcpy(&qc->result_tf, &tf, sizeof(tf)); ata_eh_analyze_ncq_error() 1790 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; ata_eh_analyze_ncq_error() 1791 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; ata_eh_analyze_ncq_error() 1796 * ata_eh_analyze_tf - analyze taskfile of a failed qc 1797 * @qc: qc to analyze 1800 * Analyze taskfile of @qc and further determine cause of 1810 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, ata_eh_analyze_tf() argument 1817 qc->err_mask |= AC_ERR_HSM; ata_eh_analyze_tf() 1822 qc->err_mask |= AC_ERR_DEV; ata_eh_analyze_tf() 1826 switch (qc->dev->class) { ata_eh_analyze_tf() 1830 qc->err_mask |= AC_ERR_ATA_BUS; ata_eh_analyze_tf() 1832 qc->err_mask |= AC_ERR_MEDIA; ata_eh_analyze_tf() 1834 qc->err_mask |= AC_ERR_INVALID; ata_eh_analyze_tf() 1838 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { ata_eh_analyze_tf() 1839 tmp = atapi_eh_request_sense(qc->dev, ata_eh_analyze_tf() 1840 qc->scsicmd->sense_buffer, ata_eh_analyze_tf() 1841 qc->result_tf.feature >> 4); ata_eh_analyze_tf() 1850 qc->flags |= ATA_QCFLAG_SENSE_VALID; ata_eh_analyze_tf() 1852 qc->err_mask |= tmp; ata_eh_analyze_tf() 1856 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) ata_eh_analyze_tf() 2106 * @qc: qc to possibly retry 2113 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) ata_eh_worth_retry() argument 2115 if (qc->err_mask & AC_ERR_MEDIA) ata_eh_worth_retry() 2117 if (qc->flags & ATA_QCFLAG_IO) ata_eh_worth_retry() 2119 if (qc->err_mask & AC_ERR_INVALID) ata_eh_worth_retry() 2121 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ ata_eh_worth_retry() 2172 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_autopsy() local 2174 if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_eh_link_autopsy() 2175 ata_dev_phys_link(qc->dev) != link) ata_eh_link_autopsy() 2179 qc->err_mask |= ehc->i.err_mask; ata_eh_link_autopsy() 2182 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); ata_eh_link_autopsy() 2185 if (qc->err_mask & AC_ERR_ATA_BUS) ata_eh_link_autopsy() 2186 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | ata_eh_link_autopsy() 2190 if (qc->err_mask & ~AC_ERR_OTHER) ata_eh_link_autopsy() 2191 qc->err_mask &= ~AC_ERR_OTHER; ata_eh_link_autopsy() 2194 if (qc->flags & ATA_QCFLAG_SENSE_VALID) ata_eh_link_autopsy() 2195 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); ata_eh_link_autopsy() 2198 if (ata_eh_worth_retry(qc)) ata_eh_link_autopsy() 2199 qc->flags |= ATA_QCFLAG_RETRY; ata_eh_link_autopsy() 2202 ehc->i.dev = qc->dev; ata_eh_link_autopsy() 2203 all_err_mask |= qc->err_mask; ata_eh_link_autopsy() 2204 if (qc->flags & ATA_QCFLAG_IO) ata_eh_link_autopsy() 2206 trace_ata_eh_link_autopsy_qc(qc); ata_eh_link_autopsy() 2436 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_report() local 2438 if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_eh_link_report() 2439 ata_dev_phys_link(qc->dev) != link || ata_eh_link_report() 2440 ((qc->flags & ATA_QCFLAG_QUIET) && ata_eh_link_report() 2441 qc->err_mask == AC_ERR_DEV)) ata_eh_link_report() 2443 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) ata_eh_link_report() 2500 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_link_report() local 2501 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; ata_eh_link_report() 2505 if (!(qc->flags & ATA_QCFLAG_FAILED) || ata_eh_link_report() 2506 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) ata_eh_link_report() 2509 if (qc->dma_dir != DMA_NONE) { ata_eh_link_report() 2524 prot_str[qc->tf.protocol], qc->nbytes, ata_eh_link_report() 2525 dma_str[qc->dma_dir]); ata_eh_link_report() 2528 if (ata_is_atapi(qc->tf.protocol)) { ata_eh_link_report() 2529 const u8 *cdb = qc->cdb; ata_eh_link_report() 2530 size_t cdb_len = qc->dev->cdb_len; ata_eh_link_report() 2532 if (qc->scsicmd) { ata_eh_link_report() 2533 cdb = qc->scsicmd->cmnd; ata_eh_link_report() 2534 cdb_len = qc->scsicmd->cmd_len; ata_eh_link_report() 2541 ata_dev_err(qc->dev, "failed command: %s\n", ata_eh_link_report() 2545 ata_dev_err(qc->dev, ata_eh_link_report() 2554 cmd->device, qc->tag, data_buf, cdb_buf, ata_eh_link_report() 2559 res->device, qc->err_mask, ata_err_string(qc->err_mask), ata_eh_link_report() 2560 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); ata_eh_link_report() 2566 ata_dev_err(qc->dev, "status: { Busy }\n"); ata_eh_link_report() 2568 ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", ata_eh_link_report() 2578 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", ata_eh_link_report() 3343 struct ata_queued_cmd *qc; ata_eh_maybe_retry_flush() local 3352 qc = __ata_qc_from_tag(ap, link->active_tag); ata_eh_maybe_retry_flush() 3353 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && ata_eh_maybe_retry_flush() 3354 qc->tf.command != ATA_CMD_FLUSH)) ata_eh_maybe_retry_flush() 3358 if (qc->err_mask & AC_ERR_DEV) ata_eh_maybe_retry_flush() 3364 tf.command = qc->tf.command; ata_eh_maybe_retry_flush() 3369 tf.command, qc->err_mask); ata_eh_maybe_retry_flush() 3381 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); ata_eh_maybe_retry_flush() 3389 qc->err_mask |= AC_ERR_DEV; ata_eh_maybe_retry_flush() 3390 qc->result_tf = tf; ata_eh_maybe_retry_flush() 3959 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_eh_finish() local 3961 if (!(qc->flags & ATA_QCFLAG_FAILED)) ata_eh_finish() 3964 if (qc->err_mask) { ata_eh_finish() 3969 if (qc->flags & ATA_QCFLAG_RETRY) ata_eh_finish() 3970 ata_eh_qc_retry(qc); ata_eh_finish() 3972 ata_eh_qc_complete(qc); ata_eh_finish() 3974 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { ata_eh_finish() 3975 ata_eh_qc_complete(qc); ata_eh_finish() 3978 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); ata_eh_finish() 3979 ata_eh_qc_retry(qc); ata_eh_finish()
|
H A D | pata_atiixp.c | 201 * @qc: Command in progress 210 static void atiixp_bmdma_start(struct ata_queued_cmd *qc) atiixp_bmdma_start() argument 212 struct ata_port *ap = qc->ap; atiixp_bmdma_start() 213 struct ata_device *adev = qc->dev; atiixp_bmdma_start() 225 ata_bmdma_start(qc); atiixp_bmdma_start() 230 * @qc: Command in progress 239 static void atiixp_bmdma_stop(struct ata_queued_cmd *qc) atiixp_bmdma_stop() argument 241 struct ata_port *ap = qc->ap; atiixp_bmdma_stop() 243 int dn = (2 * ap->port_no) + qc->dev->devno; atiixp_bmdma_stop() 249 ata_bmdma_stop(qc); atiixp_bmdma_stop()
|
H A D | pata_octeon_cf.c | 549 static void octeon_cf_dma_setup(struct ata_queued_cmd *qc) octeon_cf_dma_setup() argument 551 struct ata_port *ap = qc->ap; octeon_cf_dma_setup() 557 qc->cursg = qc->sg; octeon_cf_dma_setup() 559 ap->ops->sff_exec_command(ap, &qc->tf); octeon_cf_dma_setup() 566 * @qc: Information about the DMA 568 static void octeon_cf_dma_start(struct ata_queued_cmd *qc) octeon_cf_dma_start() argument 570 struct octeon_cf_port *cf_port = qc->ap->private_data; octeon_cf_dma_start() 575 VPRINTK("%d scatterlists\n", qc->n_elem); octeon_cf_dma_start() 578 sg = qc->cursg; octeon_cf_dma_start() 597 mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0); octeon_cf_dma_start() 631 struct ata_queued_cmd *qc) octeon_cf_dma_finished() 640 ap->print_id, qc->tf.protocol, ap->hsm_task_state); octeon_cf_dma_finished() 649 qc->err_mask |= AC_ERR_HOST_BUS; octeon_cf_dma_finished() 668 ata_sff_hsm_move(ap, qc, status, 0); octeon_cf_dma_finished() 670 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA)) octeon_cf_dma_finished() 694 struct ata_queued_cmd *qc; octeon_cf_interrupt() local 704 qc = ata_qc_from_tag(ap, ap->link.active_tag); octeon_cf_interrupt() 706 if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING)) octeon_cf_interrupt() 710 if (!sg_is_last(qc->cursg)) { octeon_cf_interrupt() 711 qc->cursg = sg_next(qc->cursg); octeon_cf_interrupt() 713 octeon_cf_dma_start(qc); octeon_cf_interrupt() 740 handled |= octeon_cf_dma_finished(ap, qc); octeon_cf_interrupt() 755 struct ata_queued_cmd *qc; octeon_cf_delayed_finish() local 778 qc = ata_qc_from_tag(ap, ap->link.active_tag); octeon_cf_delayed_finish() 779 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) octeon_cf_delayed_finish() 780 octeon_cf_dma_finished(ap, qc); octeon_cf_delayed_finish() 799 static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) octeon_cf_check_atapi_dma() argument 804 static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc) octeon_cf_qc_issue() argument 806 struct ata_port *ap = qc->ap; octeon_cf_qc_issue() 808 switch (qc->tf.protocol) { octeon_cf_qc_issue() 810 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); octeon_cf_qc_issue() 812 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ octeon_cf_qc_issue() 813 octeon_cf_dma_setup(qc); /* set up dma */ octeon_cf_qc_issue() 814 octeon_cf_dma_start(qc); /* initiate dma */ octeon_cf_qc_issue() 823 return ata_sff_qc_issue(qc); octeon_cf_qc_issue() 630 octeon_cf_dma_finished(struct ata_port *ap, struct ata_queued_cmd *qc) octeon_cf_dma_finished() argument
|
H A D | pata_sc1200.c | 157 * @qc: command pending 165 static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) sc1200_qc_issue() argument 167 struct ata_port *ap = qc->ap; sc1200_qc_issue() 168 struct ata_device *adev = qc->dev; sc1200_qc_issue() 180 return ata_bmdma_qc_issue(qc); sc1200_qc_issue() 185 * @qc: command 190 static int sc1200_qc_defer(struct ata_queued_cmd *qc) sc1200_qc_defer() argument 192 struct ata_host *host = qc->ap->host; sc1200_qc_defer() 193 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; sc1200_qc_defer() 197 rc = ata_std_qc_defer(qc); sc1200_qc_defer()
|
H A D | pata_arasan_cf.c | 216 /* qc to be transferred using DMA */ 217 struct ata_queued_cmd *qc; member in struct:arasan_cf_dev 366 struct ata_queued_cmd *qc = acdev->qc; dma_complete() local 369 acdev->qc = NULL; dma_complete() 373 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) dma_complete() 374 ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout"); dma_complete() 381 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE; wait4buf() 435 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE; sg_xfer() 522 struct ata_queued_cmd *qc = acdev->qc; data_xfer() local 536 for_each_sg(qc->sg, sg, qc->n_elem, temp) { data_xfer() 549 status = ioread8(qc->ap->ioaddr.altstatus_addr); data_xfer() 564 qc->err_mask |= AC_ERR_HOST_BUS; data_xfer() 565 qc->ap->hsm_task_state = HSM_ST_ERR; data_xfer() 568 spin_unlock_irqrestore(qc->ap->lock, flags); data_xfer() 577 struct ata_queued_cmd *qc = acdev->qc; delayed_finish() local 582 status = ioread8(qc->ap->ioaddr.altstatus_addr); delayed_finish() 633 struct ata_queued_cmd *qc = acdev->qc; arasan_cf_interrupt() local 636 if (qc->tf.flags & ATA_TFLAG_WRITE) arasan_cf_interrupt() 673 struct ata_queued_cmd *qc = acdev->qc; arasan_cf_dma_start() local 674 struct ata_port *ap = qc->ap; arasan_cf_dma_start() 675 struct ata_taskfile *tf = &qc->tf; arasan_cf_dma_start() 686 static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc) arasan_cf_qc_issue() argument 688 struct ata_port *ap = qc->ap; arasan_cf_qc_issue() 692 if (!ata_is_dma(qc->tf.protocol)) arasan_cf_qc_issue() 693 return ata_sff_qc_issue(qc); arasan_cf_qc_issue() 697 ata_sff_dev_select(ap, qc->dev->devno); arasan_cf_qc_issue() 701 switch (qc->tf.protocol) { arasan_cf_qc_issue() 703 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); arasan_cf_qc_issue() 705 ap->ops->sff_tf_load(ap, &qc->tf); arasan_cf_qc_issue() 707 acdev->qc = qc; arasan_cf_qc_issue()
|
H A D | sata_fsl.c | 441 static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, sata_fsl_fill_sg() argument 465 for_each_sg(qc->sg, sg, qc->n_elem, si) { sata_fsl_fill_sg() 474 ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n", sata_fsl_fill_sg() 477 ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n", sata_fsl_fill_sg() 515 static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) sata_fsl_qc_prep() argument 517 struct ata_port *ap = qc->ap; sata_fsl_qc_prep() 521 unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); sata_fsl_qc_prep() 531 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis); sata_fsl_qc_prep() 536 if (qc->tf.protocol == ATA_PROT_NCQ) { sata_fsl_qc_prep() 542 if (ata_is_atapi(qc->tf.protocol)) { sata_fsl_qc_prep() 545 memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len); sata_fsl_qc_prep() 548 if (qc->flags & ATA_QCFLAG_DMAMAP) sata_fsl_qc_prep() 549 num_prde = sata_fsl_fill_sg(qc, (void *)cd, sata_fsl_qc_prep() 553 if (qc->tf.protocol == ATA_PROT_NCQ) sata_fsl_qc_prep() 563 static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) sata_fsl_qc_issue() argument 565 struct ata_port *ap = qc->ap; sata_fsl_qc_issue() 568 unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); sata_fsl_qc_issue() 575 iowrite32(qc->dev->link->pmp, CQPMP + hcr_base); sata_fsl_qc_issue() 592 static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) sata_fsl_qc_fill_rtf() argument 594 struct sata_fsl_port_priv *pp = qc->ap->private_data; sata_fsl_qc_fill_rtf() 595 struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; sata_fsl_qc_fill_rtf() 597 unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); sata_fsl_qc_fill_rtf() 602 ata_tf_from_fis(cd->sfis, &qc->result_tf); sata_fsl_qc_fill_rtf() 1075 static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) sata_fsl_post_internal_cmd() argument 1077 if (qc->flags & ATA_QCFLAG_FAILED) sata_fsl_post_internal_cmd() 1078 qc->err_mask |= AC_ERR_OTHER; sata_fsl_post_internal_cmd() 1080 if (qc->err_mask) { sata_fsl_post_internal_cmd() 1094 struct ata_queued_cmd *qc = NULL; sata_fsl_error_intr() local 1154 /* find out the offending link and qc */ sata_fsl_error_intr() 1166 qc = ata_qc_from_tag(ap, link->active_tag); sata_fsl_error_intr() 1184 qc = ata_qc_from_tag(ap, link->active_tag); sata_fsl_error_intr() 1194 if (qc) sata_fsl_error_intr() 1195 qc->err_mask |= err_mask; sata_fsl_error_intr() 1205 if (qc) sata_fsl_error_intr() 1206 ata_link_abort(qc->dev->link); sata_fsl_error_intr() 1217 struct ata_queued_cmd *qc; sata_fsl_host_intr() local 1232 qc = ata_qc_from_tag(ap, tag); sata_fsl_host_intr() 1233 if (qc && ata_is_atapi(qc->tf.protocol)) { sata_fsl_host_intr() 1297 qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); sata_fsl_host_intr() 1302 if (qc) { sata_fsl_host_intr() 1303 ata_qc_complete(qc); sata_fsl_host_intr()
|
H A D | libata-core.c | 1524 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) ata_qc_complete_internal() argument 1526 struct completion *waiting = qc->private_data; ata_qc_complete_internal() 1562 struct ata_queued_cmd *qc; ata_exec_internal_sg() local 1579 /* initialize internal qc */ ata_exec_internal_sg() 1591 qc = __ata_qc_from_tag(ap, tag); ata_exec_internal_sg() 1593 qc->tag = tag; ata_exec_internal_sg() 1594 qc->scsicmd = NULL; ata_exec_internal_sg() 1595 qc->ap = ap; ata_exec_internal_sg() 1596 qc->dev = dev; ata_exec_internal_sg() 1597 ata_qc_reinit(qc); ata_exec_internal_sg() 1608 /* prepare & issue qc */ ata_exec_internal_sg() 1609 qc->tf = *tf; ata_exec_internal_sg() 1611 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); ata_exec_internal_sg() 1616 qc->tf.feature |= ATAPI_DMADIR; ata_exec_internal_sg() 1618 qc->flags |= ATA_QCFLAG_RESULT_TF; ata_exec_internal_sg() 1619 qc->dma_dir = dma_dir; ata_exec_internal_sg() 1627 ata_sg_init(qc, sgl, n_elem); ata_exec_internal_sg() 1628 qc->nbytes = buflen; ata_exec_internal_sg() 1631 qc->private_data = &wait; ata_exec_internal_sg() 1632 qc->complete_fn = ata_qc_complete_internal; ata_exec_internal_sg() 1634 ata_qc_issue(qc); ata_exec_internal_sg() 1661 * following test prevents us from completing the qc ata_exec_internal_sg() 1665 if (qc->flags & ATA_QCFLAG_ACTIVE) { ata_exec_internal_sg() 1666 qc->err_mask |= AC_ERR_TIMEOUT; ata_exec_internal_sg() 1671 ata_qc_complete(qc); ata_exec_internal_sg() 1674 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", ata_exec_internal_sg() 1683 ap->ops->post_internal_cmd(qc); ata_exec_internal_sg() 1686 if (qc->flags & ATA_QCFLAG_FAILED) { ata_exec_internal_sg() 1687 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) ata_exec_internal_sg() 1688 qc->err_mask |= AC_ERR_DEV; ata_exec_internal_sg() 1690 if (!qc->err_mask) ata_exec_internal_sg() 1691 qc->err_mask |= AC_ERR_OTHER; ata_exec_internal_sg() 1693 if (qc->err_mask & ~AC_ERR_OTHER) ata_exec_internal_sg() 1694 qc->err_mask &= ~AC_ERR_OTHER; ata_exec_internal_sg() 1700 *tf = qc->result_tf; ata_exec_internal_sg() 1701 err_mask = qc->err_mask; ata_exec_internal_sg() 1703 ata_qc_free(qc); ata_exec_internal_sg() 4591 * @qc: Command containing DMA memory to be released 4598 void ata_sg_clean(struct ata_queued_cmd *qc) ata_sg_clean() argument 4600 struct ata_port *ap = qc->ap; ata_sg_clean() 4601 struct scatterlist *sg = qc->sg; ata_sg_clean() 4602 int dir = qc->dma_dir; ata_sg_clean() 4606 VPRINTK("unmapping %u sg elements\n", qc->n_elem); ata_sg_clean() 4608 if (qc->n_elem) ata_sg_clean() 4609 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); ata_sg_clean() 4611 qc->flags &= ~ATA_QCFLAG_DMAMAP; ata_sg_clean() 4612 qc->sg = NULL; ata_sg_clean() 4617 * @qc: Metadata associated with taskfile to check 4629 int atapi_check_dma(struct ata_queued_cmd *qc) atapi_check_dma() argument 4631 struct ata_port *ap = qc->ap; atapi_check_dma() 4636 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && atapi_check_dma() 4637 unlikely(qc->nbytes & 15)) atapi_check_dma() 4641 return ap->ops->check_atapi_dma(qc); atapi_check_dma() 4647 * ata_std_qc_defer - Check whether a qc needs to be deferred 4648 * @qc: ATA command in question 4653 * whether a new command @qc can be issued. 4661 int ata_std_qc_defer(struct ata_queued_cmd *qc) ata_std_qc_defer() argument 4663 struct ata_link *link = qc->dev->link; ata_std_qc_defer() 4665 if (qc->tf.protocol == ATA_PROT_NCQ) { ata_std_qc_defer() 4676 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } ata_noop_qc_prep() argument 4680 * @qc: Command to be associated ata_noop_qc_prep() 4684 * Initialize the data-related elements of queued_cmd @qc ata_noop_qc_prep() 4691 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, ata_sg_init() argument 4694 qc->sg = sg; ata_sg_init() 4695 qc->n_elem = n_elem; ata_sg_init() 4696 qc->cursg = qc->sg; ata_sg_init() 4701 * @qc: Command with scatter-gather table to be mapped. 4703 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4712 static int ata_sg_setup(struct ata_queued_cmd *qc) ata_sg_setup() argument 4714 struct ata_port *ap = qc->ap; ata_sg_setup() 4719 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); ata_sg_setup() 4724 qc->orig_n_elem = qc->n_elem; ata_sg_setup() 4725 qc->n_elem = n_elem; ata_sg_setup() 4726 qc->flags |= ATA_QCFLAG_DMAMAP; ata_sg_setup() 4764 struct ata_queued_cmd *qc; ata_qc_new_init() local 4777 qc = __ata_qc_from_tag(ap, tag); ata_qc_new_init() 4778 qc->tag = tag; ata_qc_new_init() 4779 qc->scsicmd = NULL; ata_qc_new_init() 4780 qc->ap = ap; ata_qc_new_init() 4781 qc->dev = dev; ata_qc_new_init() 4783 ata_qc_reinit(qc); ata_qc_new_init() 4785 return qc; ata_qc_new_init() 4790 * @qc: Command to complete 4798 void ata_qc_free(struct ata_queued_cmd *qc) ata_qc_free() argument 4803 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ata_qc_free() 4804 ap = qc->ap; ata_qc_free() 4806 qc->flags = 0; ata_qc_free() 4807 tag = qc->tag; ata_qc_free() 4809 qc->tag = ATA_TAG_POISON; ata_qc_free() 4815 void __ata_qc_complete(struct ata_queued_cmd *qc) __ata_qc_complete() argument 4820 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ __ata_qc_complete() 4821 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); __ata_qc_complete() 4822 ap = qc->ap; __ata_qc_complete() 4823 link = qc->dev->link; __ata_qc_complete() 4825 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) __ata_qc_complete() 4826 ata_sg_clean(qc); __ata_qc_complete() 4828 /* command should be marked inactive atomically with qc completion */ __ata_qc_complete() 4829 if (qc->tf.protocol == ATA_PROT_NCQ) { __ata_qc_complete() 4830 link->sactive &= ~(1 << qc->tag); __ata_qc_complete() 4839 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && __ata_qc_complete() 4843 /* atapi: mark qc as inactive to prevent the interrupt handler __ata_qc_complete() 4847 qc->flags &= ~ATA_QCFLAG_ACTIVE; __ata_qc_complete() 4848 ap->qc_active &= ~(1 << qc->tag); __ata_qc_complete() 4851 qc->complete_fn(qc); __ata_qc_complete() 4854 static void fill_result_tf(struct ata_queued_cmd *qc) fill_result_tf() argument 4856 struct ata_port *ap = qc->ap; fill_result_tf() 4858 qc->result_tf.flags = qc->tf.flags; fill_result_tf() 4859 ap->ops->qc_fill_rtf(qc); fill_result_tf() 4862 static void ata_verify_xfer(struct ata_queued_cmd *qc) ata_verify_xfer() argument 4864 struct ata_device *dev = qc->dev; ata_verify_xfer() 4866 if (ata_is_nodata(qc->tf.protocol)) ata_verify_xfer() 4869 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) ata_verify_xfer() 4877 * @qc: Command to complete 4890 void ata_qc_complete(struct ata_queued_cmd *qc) ata_qc_complete() argument 4892 struct ata_port *ap = qc->ap; ata_qc_complete() 4897 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. ata_qc_complete() 4899 * failed qc. libata core enforces the rule by returning NULL ata_qc_complete() 4908 struct ata_device *dev = qc->dev; ata_qc_complete() 4911 if (unlikely(qc->err_mask)) ata_qc_complete() 4912 qc->flags |= ATA_QCFLAG_FAILED; ata_qc_complete() 4918 if (unlikely(ata_tag_internal(qc->tag))) { ata_qc_complete() 4919 fill_result_tf(qc); ata_qc_complete() 4920 trace_ata_qc_complete_internal(qc); ata_qc_complete() 4921 __ata_qc_complete(qc); ata_qc_complete() 4926 * Non-internal qc has failed. Fill the result TF and ata_qc_complete() 4929 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { ata_qc_complete() 4930 fill_result_tf(qc); ata_qc_complete() 4931 trace_ata_qc_complete_failed(qc); ata_qc_complete() 4932 ata_qc_schedule_eh(qc); ata_qc_complete() 4939 if (qc->flags & ATA_QCFLAG_RESULT_TF) ata_qc_complete() 4940 fill_result_tf(qc); ata_qc_complete() 4942 trace_ata_qc_complete_done(qc); ata_qc_complete() 4946 switch (qc->tf.command) { ata_qc_complete() 4948 if (qc->tf.feature != SETFEATURES_WC_ON && ata_qc_complete() 4949 qc->tf.feature != SETFEATURES_WC_OFF) ata_qc_complete() 4965 ata_verify_xfer(qc); ata_qc_complete() 4967 __ata_qc_complete(qc); ata_qc_complete() 4969 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) ata_qc_complete() 4973 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) ata_qc_complete() 4974 fill_result_tf(qc); ata_qc_complete() 4976 __ata_qc_complete(qc); ata_qc_complete() 5014 struct ata_queued_cmd *qc; ata_qc_complete_multiple() local 5017 qc = ata_qc_from_tag(ap, tag); ata_qc_complete_multiple() 5018 if (qc) { ata_qc_complete_multiple() 5019 ata_qc_complete(qc); ata_qc_complete_multiple() 5030 * @qc: command to issue to device 5040 void ata_qc_issue(struct ata_queued_cmd *qc) ata_qc_issue() argument 5042 struct ata_port *ap = qc->ap; ata_qc_issue() 5043 struct ata_link *link = qc->dev->link; ata_qc_issue() 5044 u8 prot = qc->tf.protocol; ata_qc_issue() 5047 * check is skipped for old EH because it reuses active qc to ata_qc_issue() 5053 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); ata_qc_issue() 5057 link->sactive |= 1 << qc->tag; ata_qc_issue() 5062 link->active_tag = qc->tag; ata_qc_issue() 5065 qc->flags |= ATA_QCFLAG_ACTIVE; ata_qc_issue() 5066 ap->qc_active |= 1 << qc->tag; ata_qc_issue() 5073 (!qc->sg || !qc->n_elem || !qc->nbytes))) ata_qc_issue() 5078 if (ata_sg_setup(qc)) ata_qc_issue() 5082 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { ata_qc_issue() 5089 ap->ops->qc_prep(qc); ata_qc_issue() 5090 trace_ata_qc_issue(qc); ata_qc_issue() 5091 qc->err_mask |= ap->ops->qc_issue(qc); ata_qc_issue() 5092 if (unlikely(qc->err_mask)) ata_qc_issue() 5097 qc->err_mask |= AC_ERR_SYSTEM; ata_qc_issue() 5099 ata_qc_complete(qc); ata_qc_issue() 6789 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) ata_dummy_qc_issue() argument
|
H A D | sata_svw.c | 98 static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) k2_sata_check_atapi_dma() argument 100 u8 cmnd = qc->scsicmd->cmnd[0]; k2_sata_check_atapi_dma() 102 if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) k2_sata_check_atapi_dma() 240 * @qc: Info associated with this ATA transaction. 246 static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) k2_bmdma_setup_mmio() argument 248 struct ata_port *ap = qc->ap; k2_bmdma_setup_mmio() 249 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); k2_bmdma_setup_mmio() 265 if (qc->tf.protocol != ATA_PROT_DMA) k2_bmdma_setup_mmio() 266 ap->ops->sff_exec_command(ap, &qc->tf); k2_bmdma_setup_mmio() 271 * @qc: Info associated with this ATA transaction. 277 static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) k2_bmdma_start_mmio() argument 279 struct ata_port *ap = qc->ap; k2_bmdma_start_mmio() 309 if (qc->tf.protocol == ATA_PROT_DMA) k2_bmdma_start_mmio() 310 ap->ops->sff_exec_command(ap, &qc->tf); k2_bmdma_start_mmio()
|
H A D | pata_bf54x.c | 827 * @qc: Info associated with this ATA transaction. 832 static void bfin_bmdma_setup(struct ata_queued_cmd *qc) bfin_bmdma_setup() argument 834 struct ata_port *ap = qc->ap; bfin_bmdma_setup() 844 dev_dbg(qc->ap->dev, "in atapi dma setup\n"); bfin_bmdma_setup() 846 if (qc->tf.flags & ATA_TFLAG_WRITE) { bfin_bmdma_setup() 855 dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); bfin_bmdma_setup() 858 for_each_sg(qc->sg, sg, qc->n_elem, si) { bfin_bmdma_setup() 867 dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE); bfin_bmdma_setup() 871 qc->n_elem * sizeof(struct dma_desc_array)); bfin_bmdma_setup() 882 bfin_exec_command(ap, &qc->tf); bfin_bmdma_setup() 884 if (qc->tf.flags & ATA_TFLAG_WRITE) { bfin_bmdma_setup() 906 * @qc: Info associated with this ATA transaction. 911 static void bfin_bmdma_start(struct ata_queued_cmd *qc) bfin_bmdma_start() argument 913 struct ata_port *ap = qc->ap; bfin_bmdma_start() 916 dev_dbg(qc->ap->dev, "in atapi dma start\n"); bfin_bmdma_start() 932 * @qc: Command we are ending DMA for 935 static void bfin_bmdma_stop(struct ata_queued_cmd *qc) bfin_bmdma_stop() argument 937 struct ata_port *ap = qc->ap; bfin_bmdma_stop() 940 dev_dbg(qc->ap->dev, "in atapi dma stop\n"); bfin_bmdma_stop() 946 if (qc->tf.flags & ATA_TFLAG_WRITE) { bfin_bmdma_stop() 954 dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); bfin_bmdma_stop() 1300 struct ata_queued_cmd *qc) bfin_ata_host_intr() 1306 ap->print_id, qc->tf.protocol, ap->hsm_task_state); bfin_ata_host_intr() 1317 * No need to check is_atapi_taskfile(&qc->tf) again. bfin_ata_host_intr() 1319 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) bfin_ata_host_intr() 1323 if (qc->tf.protocol == ATA_PROT_DMA || bfin_ata_host_intr() 1324 qc->tf.protocol == ATAPI_PROT_DMA) { bfin_ata_host_intr() 1335 ap->ops->bmdma_stop(qc); bfin_ata_host_intr() 1339 qc->err_mask |= AC_ERR_HOST_BUS; bfin_ata_host_intr() 1363 ata_sff_hsm_move(ap, qc, status, 0); bfin_ata_host_intr() 1365 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || bfin_ata_host_intr() 1366 qc->tf.protocol == ATAPI_PROT_DMA)) bfin_ata_host_intr() 1397 struct ata_queued_cmd *qc; bfin_ata_interrupt() local 1399 qc = ata_qc_from_tag(ap, ap->link.active_tag); bfin_ata_interrupt() 1400 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) bfin_ata_interrupt() 1401 handled |= bfin_ata_host_intr(ap, qc); bfin_ata_interrupt() 1299 bfin_ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) bfin_ata_host_intr() argument
|
H A D | pata_ns87410.c | 107 * @qc: command pending 114 static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc) ns87410_qc_issue() argument 116 struct ata_port *ap = qc->ap; ns87410_qc_issue() 117 struct ata_device *adev = qc->dev; ns87410_qc_issue() 127 return ata_sff_qc_issue(qc); ns87410_qc_issue()
|
H A D | libahci.c | 71 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 74 static void ahci_qc_prep(struct ata_queued_cmd *qc); 75 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); 90 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 1490 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) ahci_fill_sg() argument 1501 for_each_sg(qc->sg, sg, qc->n_elem, si) { ahci_fill_sg() 1513 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) ahci_pmp_qc_defer() argument 1515 struct ata_port *ap = qc->ap; ahci_pmp_qc_defer() 1519 return ata_std_qc_defer(qc); ahci_pmp_qc_defer() 1521 return sata_pmp_qc_defer_cmd_switch(qc); ahci_pmp_qc_defer() 1524 static void ahci_qc_prep(struct ata_queued_cmd *qc) ahci_qc_prep() argument 1526 struct ata_port *ap = qc->ap; ahci_qc_prep() 1528 int is_atapi = ata_is_atapi(qc->tf.protocol); ahci_qc_prep() 1538 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; ahci_qc_prep() 1540 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); ahci_qc_prep() 1543 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); ahci_qc_prep() 1547 if (qc->flags & ATA_QCFLAG_DMAMAP) ahci_qc_prep() 1548 n_elem = ahci_fill_sg(qc, cmd_tbl); ahci_qc_prep() 1553 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); ahci_qc_prep() 1554 if (qc->tf.flags & ATA_TFLAG_WRITE) ahci_qc_prep() 1559 ahci_fill_cmd_slot(pp, qc->tag, opts); ahci_qc_prep() 1633 /* If qc is active, charge it; otherwise, the active ahci_error_intr() 1634 * link. There's no active qc on NCQ errors. It will ahci_error_intr() 1889 unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) ahci_qc_issue() argument 1891 struct ata_port *ap = qc->ap; ahci_qc_issue() 1899 pp->active_link = qc->dev->link; ahci_qc_issue() 1901 if (qc->tf.protocol == ATA_PROT_NCQ) ahci_qc_issue() 1902 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); ahci_qc_issue() 1904 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { ahci_qc_issue() 1907 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; ahci_qc_issue() 1909 pp->fbs_last_dev = qc->dev->link->pmp; ahci_qc_issue() 1912 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); ahci_qc_issue() 1914 ahci_sw_activity(qc->dev->link); ahci_qc_issue() 1920 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) ahci_qc_fill_rtf() argument 1922 struct ahci_port_priv *pp = qc->ap->private_data; ahci_qc_fill_rtf() 1926 rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; ahci_qc_fill_rtf() 1934 if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && ahci_qc_fill_rtf() 1935 !(qc->flags & ATA_QCFLAG_FAILED)) { ahci_qc_fill_rtf() 1936 ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); ahci_qc_fill_rtf() 1937 qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; ahci_qc_fill_rtf() 1939 ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); ahci_qc_fill_rtf() 1986 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) ahci_post_internal_cmd() argument 1988 struct ata_port *ap = qc->ap; ahci_post_internal_cmd() 1991 if (qc->flags & ATA_QCFLAG_FAILED) ahci_post_internal_cmd()
|
H A D | pata_hpt3x2n.c | 240 * @qc: ATA command 245 static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) hpt3x2n_bmdma_stop() argument 247 struct ata_port *ap = qc->ap; hpt3x2n_bmdma_stop() 256 ata_bmdma_stop(qc); hpt3x2n_bmdma_stop() 311 static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) hpt3x2n_qc_defer() argument 313 struct ata_port *ap = qc->ap; hpt3x2n_qc_defer() 316 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); hpt3x2n_qc_defer() 319 rc = ata_std_qc_defer(qc); hpt3x2n_qc_defer() 328 static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) hpt3x2n_qc_issue() argument 330 struct ata_port *ap = qc->ap; hpt3x2n_qc_issue() 332 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); hpt3x2n_qc_issue() 341 return ata_bmdma_qc_issue(qc); hpt3x2n_qc_issue()
|
H A D | sata_mv.c | 607 static int mv_qc_defer(struct ata_queued_cmd *qc); 608 static void mv_qc_prep(struct ata_queued_cmd *qc); 609 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 610 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 663 static int mv_check_atapi_dma(struct ata_queued_cmd *qc); 664 static void mv_bmdma_setup(struct ata_queued_cmd *qc); 665 static void mv_bmdma_start(struct ata_queued_cmd *qc); 666 static void mv_bmdma_stop(struct ata_queued_cmd *qc); 1435 static int mv_qc_defer(struct ata_queued_cmd *qc) mv_qc_defer() argument 1437 struct ata_link *link = qc->dev->link; mv_qc_defer() 1460 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; mv_qc_defer() 1467 * If the port is completely idle, then allow the new qc. mv_qc_defer() 1480 if (ata_is_ncq(qc->tf.protocol)) mv_qc_defer() 1793 * @qc: queued command whose SG list to source from 1800 static void mv_fill_sg(struct ata_queued_cmd *qc) mv_fill_sg() argument 1802 struct mv_port_priv *pp = qc->ap->private_data; mv_fill_sg() 1807 mv_sg = pp->sg_tbl[qc->tag]; mv_fill_sg() 1808 for_each_sg(qc->sg, sg, qc->n_elem, si) { mv_fill_sg() 1859 * @qc: queued command to check for chipset/DMA compatibility. 1868 static int mv_check_atapi_dma(struct ata_queued_cmd *qc) mv_check_atapi_dma() argument 1870 struct scsi_cmnd *scmd = qc->scsicmd; mv_check_atapi_dma() 1891 * @qc: queued command to prepare DMA for. 1896 static void mv_bmdma_setup(struct ata_queued_cmd *qc) mv_bmdma_setup() argument 1898 struct ata_port *ap = qc->ap; mv_bmdma_setup() 1902 mv_fill_sg(qc); mv_bmdma_setup() 1908 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, mv_bmdma_setup() 1910 writelfl(pp->sg_tbl_dma[qc->tag], mv_bmdma_setup() 1914 ap->ops->sff_exec_command(ap, &qc->tf); mv_bmdma_setup() 1919 * @qc: queued command to start DMA on. 1924 static void mv_bmdma_start(struct ata_queued_cmd *qc) mv_bmdma_start() argument 1926 struct ata_port *ap = qc->ap; mv_bmdma_start() 1928 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); mv_bmdma_start() 1937 * @qc: queued command to stop DMA on. 1960 static void mv_bmdma_stop(struct ata_queued_cmd *qc) mv_bmdma_stop() argument 1962 mv_bmdma_stop_ap(qc->ap); mv_bmdma_stop() 2004 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) mv_rw_multi_errata_sata24() argument 2006 struct ata_taskfile *tf = &qc->tf; mv_rw_multi_errata_sata24() 2021 if (qc->dev->multi_count > 7) { mv_rw_multi_errata_sata24() 2039 * @qc: queued command to prepare 2049 static void mv_qc_prep(struct ata_queued_cmd *qc) mv_qc_prep() argument 2051 struct ata_port *ap = qc->ap; mv_qc_prep() 2054 struct ata_taskfile *tf = &qc->tf; mv_qc_prep() 2066 mv_rw_multi_errata_sata24(qc); mv_qc_prep() 2076 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); mv_qc_prep() 2077 flags |= qc->tag << CRQB_TAG_SHIFT; mv_qc_prep() 2078 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; mv_qc_prep() 2084 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); mv_qc_prep() 2086 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); mv_qc_prep() 2133 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) mv_qc_prep() 2135 mv_fill_sg(qc); mv_qc_prep() 2140 * @qc: queued command to prepare 2150 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) mv_qc_prep_iie() argument 2152 struct ata_port *ap = qc->ap; mv_qc_prep_iie() 2155 struct ata_taskfile *tf = &qc->tf; mv_qc_prep_iie() 2169 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); mv_qc_prep_iie() 2170 flags |= qc->tag << CRQB_TAG_SHIFT; mv_qc_prep_iie() 2171 flags |= qc->tag << CRQB_HOSTQ_SHIFT; mv_qc_prep_iie() 2172 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; mv_qc_prep_iie() 2178 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); mv_qc_prep_iie() 2179 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); mv_qc_prep_iie() 2203 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) mv_qc_prep_iie() 2205 mv_fill_sg(qc); mv_qc_prep_iie() 2281 * @qc: queued command to start 2296 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) mv_qc_issue_fis() argument 2298 struct ata_port *ap = qc->ap; mv_qc_issue_fis() 2300 struct ata_link *link = qc->dev->link; mv_qc_issue_fis() 2304 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); mv_qc_issue_fis() 2309 switch (qc->tf.protocol) { mv_qc_issue_fis() 2318 if (qc->tf.flags & ATA_TFLAG_WRITE) mv_qc_issue_fis() 2328 if (qc->tf.flags & ATA_TFLAG_POLLING) mv_qc_issue_fis() 2335 * @qc: queued command to start 2345 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) mv_qc_issue() argument 2348 struct ata_port *ap = qc->ap; mv_qc_issue() 2356 switch (qc->tf.protocol) { mv_qc_issue() 2358 if (qc->tf.command == ATA_CMD_DSM) { mv_qc_issue() 2365 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); mv_qc_issue() 2386 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { mv_qc_issue() 2388 ata_link_warn(qc->dev->link, DRV_NAME mv_qc_issue() 2397 qc->tf.flags |= ATA_TFLAG_POLLING; mv_qc_issue() 2401 if (qc->tf.flags & ATA_TFLAG_POLLING) mv_qc_issue() 2413 mv_pmp_select(ap, qc->dev->link->pmp); mv_qc_issue() 2415 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { mv_qc_issue() 2429 return mv_qc_issue_fis(qc); mv_qc_issue() 2431 return ata_bmdma_qc_issue(qc); mv_qc_issue() 2437 struct ata_queued_cmd *qc; mv_get_active_qc() local 2441 qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_get_active_qc() 2442 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) mv_get_active_qc() 2443 return qc; mv_get_active_qc() 2630 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_unexpected_intr() local 2631 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) mv_unexpected_intr() 2660 struct ata_queued_cmd *qc; mv_err_intr() local 2687 qc = mv_get_active_qc(ap); mv_err_intr() 2756 if (qc) mv_err_intr() 2757 qc->err_mask |= err_mask; mv_err_intr() 2779 if (qc) mv_err_intr() 2780 ata_link_abort(qc->dev->link); mv_err_intr() 2882 struct ata_queued_cmd *qc = mv_get_active_qc(ap); mv_port_intr() local 2883 if (qc) mv_port_intr() 2884 ata_bmdma_port_intr(ap, qc); mv_port_intr() 2967 struct ata_queued_cmd *qc; mv_pci_error() local 2991 qc = ata_qc_from_tag(ap, ap->link.active_tag); mv_pci_error() 2992 if (qc) mv_pci_error() 2993 qc->err_mask |= err_mask; mv_pci_error()
|
H A D | pata_it821x.c | 338 * @qc: Command in progress 345 static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc) it821x_passthru_bmdma_start() argument 347 struct ata_port *ap = qc->ap; it821x_passthru_bmdma_start() 348 struct ata_device *adev = qc->dev; it821x_passthru_bmdma_start() 356 ata_bmdma_start(qc); it821x_passthru_bmdma_start() 361 * @qc: ATA command 368 static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc) it821x_passthru_bmdma_stop() argument 370 struct ata_port *ap = qc->ap; it821x_passthru_bmdma_stop() 371 struct ata_device *adev = qc->dev; it821x_passthru_bmdma_stop() 375 ata_bmdma_stop(qc); it821x_passthru_bmdma_stop() 402 * it821x_smart_qc_issue - wrap qc issue prot 403 * @qc: command 410 static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) it821x_smart_qc_issue() argument 412 switch(qc->tf.command) it821x_smart_qc_issue() 432 return ata_bmdma_qc_issue(qc); it821x_smart_qc_issue() 434 printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); it821x_smart_qc_issue() 439 * it821x_passthru_qc_issue - wrap qc issue prot 440 * @qc: command 447 static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) it821x_passthru_qc_issue() argument 449 it821x_passthru_dev_select(qc->ap, qc->dev->devno); it821x_passthru_qc_issue() 450 return ata_bmdma_qc_issue(qc); it821x_passthru_qc_issue() 569 * @qc: Command we are about to issue 575 static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) it821x_check_atapi_dma() argument 577 struct ata_port *ap = qc->ap; it821x_check_atapi_dma() 581 if (ata_qc_raw_nbytes(qc) < 2048) it821x_check_atapi_dma()
|
H A D | pata_icside.c | 219 static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) pata_icside_bmdma_setup() argument 221 struct ata_port *ap = qc->ap; pata_icside_bmdma_setup() 223 unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; pata_icside_bmdma_setup() 236 set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); pata_icside_bmdma_setup() 237 set_dma_sg(state->dma, qc->sg, qc->n_elem); pata_icside_bmdma_setup() 241 ap->ops->sff_exec_command(ap, &qc->tf); pata_icside_bmdma_setup() 244 static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) pata_icside_bmdma_start() argument 246 struct ata_port *ap = qc->ap; pata_icside_bmdma_start() 253 static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) pata_icside_bmdma_stop() argument 255 struct ata_port *ap = qc->ap; pata_icside_bmdma_stop()
|
H A D | pata_acpi.c | 159 * @qc: command pending 166 static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) pacpi_qc_issue() argument 168 struct ata_port *ap = qc->ap; pacpi_qc_issue() 169 struct ata_device *adev = qc->dev; pacpi_qc_issue() 173 return ata_bmdma_qc_issue(qc); pacpi_qc_issue() 181 return ata_bmdma_qc_issue(qc); pacpi_qc_issue()
|
H A D | pata_hpt3x3.c | 110 * @qc: Queued command 116 static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc) hpt3x3_bmdma_setup() argument 118 struct ata_port *ap = qc->ap; hpt3x3_bmdma_setup() 122 return ata_bmdma_setup(qc); hpt3x3_bmdma_setup() 127 * @qc: Queued command 132 static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) hpt3x3_atapi_dma() argument
|
H A D | pata_mpc52xx.c | 443 mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc) mpc52xx_ata_build_dmatable() argument 445 struct ata_port *ap = qc->ap; mpc52xx_ata_build_dmatable() 448 unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si; mpc52xx_ata_build_dmatable() 457 for_each_sg(qc->sg, sg, qc->n_elem, si) { mpc52xx_ata_build_dmatable() 499 mpc52xx_bmdma_setup(struct ata_queued_cmd *qc) mpc52xx_bmdma_setup() argument 501 struct ata_port *ap = qc->ap; mpc52xx_bmdma_setup() 505 unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE); mpc52xx_bmdma_setup() 508 if (!mpc52xx_ata_build_dmatable(qc)) mpc52xx_bmdma_setup() 545 if (priv->timings[qc->dev->devno].using_udma) mpc52xx_bmdma_setup() 552 ap->ops->sff_exec_command(ap, &qc->tf); mpc52xx_bmdma_setup() 556 mpc52xx_bmdma_start(struct ata_queued_cmd *qc) mpc52xx_bmdma_start() argument 558 struct ata_port *ap = qc->ap; mpc52xx_bmdma_start() 566 mpc52xx_bmdma_stop(struct ata_queued_cmd *qc) mpc52xx_bmdma_stop() argument 568 struct ata_port *ap = qc->ap; mpc52xx_bmdma_stop()
|
H A D | sata_rcar.c | 488 static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc) sata_rcar_drain_fifo() argument 494 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) sata_rcar_drain_fifo() 497 ap = qc->ap; sata_rcar_drain_fifo() 528 static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) sata_rcar_bmdma_fill_sg() argument 530 struct ata_port *ap = qc->ap; sata_rcar_bmdma_fill_sg() 535 for_each_sg(qc->sg, sg, qc->n_elem, si) { sata_rcar_bmdma_fill_sg() 554 static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) sata_rcar_qc_prep() argument 556 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) sata_rcar_qc_prep() 559 sata_rcar_bmdma_fill_sg(qc); sata_rcar_qc_prep() 562 static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) sata_rcar_bmdma_setup() argument 564 struct ata_port *ap = qc->ap; sata_rcar_bmdma_setup() 565 unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE; sata_rcar_bmdma_setup() 586 ap->ops->sff_exec_command(ap, &qc->tf); sata_rcar_bmdma_setup() 589 static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc) sata_rcar_bmdma_start() argument 591 struct ata_port *ap = qc->ap; sata_rcar_bmdma_start() 603 static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc) sata_rcar_bmdma_stop() argument 605 struct ata_port *ap = qc->ap; sata_rcar_bmdma_stop() 709 struct ata_queued_cmd *qc; sata_rcar_ata_interrupt() local 712 qc = ata_qc_from_tag(ap, ap->link.active_tag); sata_rcar_ata_interrupt() 713 if (qc) sata_rcar_ata_interrupt() 714 handled |= ata_bmdma_port_intr(ap, qc); sata_rcar_ata_interrupt()
|
H A D | pata_ep93xx.c | 701 static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc) ep93xx_pata_dma_start() argument 704 struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; ep93xx_pata_dma_start() 706 struct ata_device *adev = qc->dev; ep93xx_pata_dma_start() 707 u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0; ep93xx_pata_dma_start() 708 struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE ep93xx_pata_dma_start() 711 txd = dmaengine_prep_slave_sg(channel, qc->sg, qc->n_elem, qc->dma_dir, ep93xx_pata_dma_start() 714 dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n"); ep93xx_pata_dma_start() 721 dev_err(qc->ap->dev, "failed to submit dma transfer\n"); ep93xx_pata_dma_start() 742 static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc) ep93xx_pata_dma_stop() argument 744 struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; ep93xx_pata_dma_stop() 760 qc->dev->pio_mode - XFER_PIO_0); ep93xx_pata_dma_stop() 762 ata_sff_dma_pause(qc->ap); ep93xx_pata_dma_stop() 765 static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc) ep93xx_pata_dma_setup() argument 767 qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); ep93xx_pata_dma_setup() 840 static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc) ep93xx_pata_drain_fifo() argument 847 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) ep93xx_pata_drain_fifo() 850 ap = qc->ap; ep93xx_pata_drain_fifo()
|
H A D | pata_cmd640.c | 122 * @qc: Command to be issued 128 static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc) cmd640_qc_issue() argument 130 struct ata_port *ap = qc->ap; cmd640_qc_issue() 131 struct ata_device *adev = qc->dev; cmd640_qc_issue() 139 return ata_sff_qc_issue(qc); cmd640_qc_issue()
|
H A D | pata_mpiix.c | 113 * @qc: command pending 122 static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc) mpiix_qc_issue() argument 124 struct ata_port *ap = qc->ap; mpiix_qc_issue() 125 struct ata_device *adev = qc->dev; mpiix_qc_issue() 135 return ata_sff_qc_issue(qc); mpiix_qc_issue()
|
H A D | pata_oldpiix.c | 183 * @qc: command pending 192 static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) oldpiix_qc_issue() argument 194 struct ata_port *ap = qc->ap; oldpiix_qc_issue() 195 struct ata_device *adev = qc->dev; oldpiix_qc_issue() 202 return ata_bmdma_qc_issue(qc); oldpiix_qc_issue()
|
H A D | pata_radisys.c | 158 * @qc: command pending 167 static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc) radisys_qc_issue() argument 169 struct ata_port *ap = qc->ap; radisys_qc_issue() 170 struct ata_device *adev = qc->dev; radisys_qc_issue() 181 return ata_bmdma_qc_issue(qc); radisys_qc_issue()
|
H A D | pata_macio.c | 510 static void pata_macio_qc_prep(struct ata_queued_cmd *qc) pata_macio_qc_prep() argument 512 unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); pata_macio_qc_prep() 513 struct ata_port *ap = qc->ap; pata_macio_qc_prep() 519 dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n", pata_macio_qc_prep() 520 __func__, qc, qc->flags, write, qc->dev->devno); pata_macio_qc_prep() 522 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) pata_macio_qc_prep() 528 for_each_sg(qc->sg, sg, qc->n_elem, si) { pata_macio_qc_prep() 588 static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc) pata_macio_bmdma_setup() argument 590 struct ata_port *ap = qc->ap; pata_macio_bmdma_setup() 593 int dev = qc->dev->devno; pata_macio_bmdma_setup() 595 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); pata_macio_bmdma_setup() 608 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) pata_macio_bmdma_setup() 614 ap->ops->sff_exec_command(ap, &qc->tf); pata_macio_bmdma_setup() 617 static void pata_macio_bmdma_start(struct ata_queued_cmd *qc) pata_macio_bmdma_start() argument 619 struct ata_port *ap = qc->ap; pata_macio_bmdma_start() 623 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); pata_macio_bmdma_start() 630 static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc) pata_macio_bmdma_stop() argument 632 struct ata_port *ap = qc->ap; pata_macio_bmdma_stop() 637 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc); pata_macio_bmdma_stop()
|
H A D | libata.h | 91 extern void ata_sg_clean(struct ata_queued_cmd *qc); 92 extern void ata_qc_free(struct ata_queued_cmd *qc); 93 extern void ata_qc_issue(struct ata_queued_cmd *qc); 94 extern void __ata_qc_complete(struct ata_queued_cmd *qc); 95 extern int atapi_check_dma(struct ata_queued_cmd *qc); 159 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); ata_acpi_bind_dev()
|
H A D | pata_cs5530.c | 135 * @qc: command pending 143 static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) cs5530_qc_issue() argument 145 struct ata_port *ap = qc->ap; cs5530_qc_issue() 146 struct ata_device *adev = qc->dev; cs5530_qc_issue() 158 return ata_bmdma_qc_issue(qc); cs5530_qc_issue()
|
H A D | sata_vsc.c | 237 struct ata_queued_cmd *qc; vsc_port_intr() local 245 qc = ata_qc_from_tag(ap, ap->link.active_tag); vsc_port_intr() 246 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) vsc_port_intr() 247 handled = ata_bmdma_port_intr(ap, qc); vsc_port_intr()
|
H A D | pata_artop.c | 271 * @qc: command 276 static int artop6210_qc_defer(struct ata_queued_cmd *qc) artop6210_qc_defer() argument 278 struct ata_host *host = qc->ap->host; artop6210_qc_defer() 279 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; artop6210_qc_defer() 283 rc = ata_std_qc_defer(qc); artop6210_qc_defer()
|
H A D | pata_pcmcia.c | 119 * @qc: command 127 static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc) pcmcia_8bit_drain_fifo() argument 133 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) pcmcia_8bit_drain_fifo() 136 ap = qc->ap; pcmcia_8bit_drain_fifo()
|
H A D | pata_legacy.c | 605 * @qc: command pending 618 static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc) opti82c46x_qc_issue() argument 620 struct ata_port *ap = qc->ap; opti82c46x_qc_issue() 621 struct ata_device *adev = qc->dev; opti82c46x_qc_issue() 629 return ata_sff_qc_issue(qc); opti82c46x_qc_issue() 683 * @qc: command pending 689 static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc) qdi_qc_issue() argument 691 struct ata_port *ap = qc->ap; qdi_qc_issue() 692 struct ata_device *adev = qc->dev; qdi_qc_issue() 702 return ata_sff_qc_issue(qc); qdi_qc_issue()
|
H A D | sata_via.c | 84 static void vt6420_bmdma_start(struct ata_queued_cmd *qc); 382 static void vt6420_bmdma_start(struct ata_queued_cmd *qc) vt6420_bmdma_start() argument 384 struct ata_port *ap = qc->ap; vt6420_bmdma_start() 385 if ((qc->tf.command == ATA_CMD_PACKET) && vt6420_bmdma_start() 386 (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) { vt6420_bmdma_start() 390 ata_bmdma_start(qc); vt6420_bmdma_start()
|
H A D | pata_cmd64x.c | 311 * @qc: Command in progress 316 static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc) cmd646r1_bmdma_stop() argument 318 ata_bmdma_stop(qc); cmd646r1_bmdma_stop()
|
H A D | pata_hpt37x.c | 469 * @qc: ATA command 474 static void hpt370_bmdma_stop(struct ata_queued_cmd *qc) hpt370_bmdma_stop() argument 476 struct ata_port *ap = qc->ap; hpt370_bmdma_stop() 501 ata_bmdma_stop(qc); hpt370_bmdma_stop() 563 * @qc: ATA command 568 static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc) hpt37x_bmdma_stop() argument 570 struct ata_port *ap = qc->ap; hpt37x_bmdma_stop() 579 ata_bmdma_stop(qc); hpt37x_bmdma_stop()
|
H A D | pata_pdc2027x.c | 72 static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc); 433 * @qc: Metadata associated with taskfile to check 441 static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc) pdc2027x_check_atapi_dma() argument 443 struct scsi_cmnd *cmd = qc->scsicmd; pdc2027x_check_atapi_dma()
|
H A D | ahci_xgene.c | 182 * @qc: Command to issue 196 static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) xgene_ahci_qc_issue() argument 198 struct ata_port *ap = qc->ap; xgene_ahci_qc_issue() 212 port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; xgene_ahci_qc_issue() 221 rc = ahci_qc_issue(qc); xgene_ahci_qc_issue() 224 ctx->last_cmd[ap->port_no] = qc->tf.command; xgene_ahci_qc_issue()
|
H A D | libata-pmp.c | 99 * @qc: ATA command in question 110 int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc) sata_pmp_qc_defer_cmd_switch() argument 112 struct ata_link *link = qc->dev->link; sata_pmp_qc_defer_cmd_switch() 117 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; sata_pmp_qc_defer_cmd_switch() 118 return ata_std_qc_defer(qc); sata_pmp_qc_defer_cmd_switch()
|
H A D | pata_ali.c | 321 static int ali_check_atapi_dma(struct ata_queued_cmd *qc) ali_check_atapi_dma() argument 336 if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC) ali_check_atapi_dma()
|
H A D | ahci.h | 385 unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | io.c | 86 u64 qc = (u8)c; __memset_io() local 88 qc |= qc << 8; __memset_io() 89 qc |= qc << 16; __memset_io() 90 qc |= qc << 32; __memset_io() 99 __raw_writeq(qc, dst); __memset_io()
|
/linux-4.1.27/drivers/scsi/libsas/ |
H A D | sas_ata.c | 97 struct ata_queued_cmd *qc = task->uldd_task; sas_ata_task_done() local 110 else if (qc && qc->scsicmd) sas_ata_task_done() 111 ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_ata_task_done() 118 if (!qc) sas_ata_task_done() 121 ap = qc->ap; sas_ata_task_done() 128 if (qc->scsicmd) sas_ata_task_done() 145 qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]); sas_ata_task_done() 149 qc->flags |= ATA_QCFLAG_FAILED; sas_ata_task_done() 158 qc->err_mask = ac; sas_ata_task_done() 161 qc->flags |= ATA_QCFLAG_FAILED; sas_ata_task_done() 169 qc->lldd_task = NULL; sas_ata_task_done() 170 ata_qc_complete(qc); sas_ata_task_done() 177 static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) sas_ata_qc_issue() argument 184 struct ata_port *ap = qc->ap; sas_ata_qc_issue() 207 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || sas_ata_qc_issue() 208 qc->tf.command == ATA_CMD_FPDMA_READ) { sas_ata_qc_issue() 210 qc->tf.nsect = 0; sas_ata_qc_issue() 213 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); sas_ata_qc_issue() 214 task->uldd_task = qc; sas_ata_qc_issue() 215 if (ata_is_atapi(qc->tf.protocol)) { sas_ata_qc_issue() 216 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); sas_ata_qc_issue() 217 task->total_xfer_len = qc->nbytes; sas_ata_qc_issue() 218 task->num_scatter = qc->n_elem; sas_ata_qc_issue() 220 for_each_sg(qc->sg, sg, qc->n_elem, si) sas_ata_qc_issue() 227 task->data_dir = qc->dma_dir; sas_ata_qc_issue() 228 task->scatter = qc->sg; sas_ata_qc_issue() 231 qc->lldd_task = task; sas_ata_qc_issue() 233 switch (qc->tf.protocol) { sas_ata_qc_issue() 243 if (qc->scsicmd) sas_ata_qc_issue() 244 ASSIGN_SAS_TASK(qc->scsicmd, task); sas_ata_qc_issue() 250 if (qc->scsicmd) sas_ata_qc_issue() 251 ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_ata_qc_issue() 262 static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) sas_ata_qc_fill_rtf() argument 264 struct domain_device *dev = qc->ap->private_data; sas_ata_qc_fill_rtf() 266 ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf); sas_ata_qc_fill_rtf() 473 static void sas_ata_post_internal(struct ata_queued_cmd *qc) sas_ata_post_internal() argument 475 if (qc->flags & ATA_QCFLAG_FAILED) sas_ata_post_internal() 476 qc->err_mask |= AC_ERR_OTHER; sas_ata_post_internal() 478 if (qc->err_mask) { sas_ata_post_internal() 481 * has decided to kill the qc and has frozen the port. sas_ata_post_internal() 487 struct sas_task *task = qc->lldd_task; sas_ata_post_internal() 489 qc->lldd_task = NULL; sas_ata_post_internal() 589 struct ata_queued_cmd *qc = task->uldd_task; sas_ata_task_abort() local 593 if (qc->scsicmd) { sas_ata_task_abort() 594 struct request_queue *q = qc->scsicmd->device->request_queue; sas_ata_task_abort() 598 blk_abort_request(qc->scsicmd->request); sas_ata_task_abort() 604 qc->flags &= ~ATA_QCFLAG_ACTIVE; sas_ata_task_abort() 605 qc->flags |= ATA_QCFLAG_FAILED; sas_ata_task_abort() 606 qc->err_mask |= AC_ERR_TIMEOUT; sas_ata_task_abort() 607 waiting = qc->private_data; sas_ata_task_abort()
|
/linux-4.1.27/net/mac80211/ |
H A D | mesh_ps.c | 64 u8 *qc = ieee80211_get_qos_ctl((void *) skb->data); mps_qos_null_tx() local 66 qc[0] |= IEEE80211_QOS_CTL_EOSP; mps_qos_null_tx() 191 u8 *qc; ieee80211_mps_set_frame_flags() local 213 qc = ieee80211_get_qos_ctl(hdr); ieee80211_mps_set_frame_flags() 219 qc[1] |= (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); ieee80211_mps_set_frame_flags() 221 qc[1] &= ~(IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8); ieee80211_mps_set_frame_flags() 278 u8 *qc = ieee80211_get_qos_ctl(hdr); mps_set_sta_peer_pm() local 291 if (qc[1] & (IEEE80211_QOS_CTL_MESH_PS_LEVEL >> 8)) mps_set_sta_peer_pm() 369 u8 *qc; mpsp_trigger_send() local 387 qc = ieee80211_get_qos_ctl(nullfunc); mpsp_trigger_send() 389 qc[1] |= (IEEE80211_QOS_CTL_RSPI >> 8); mpsp_trigger_send() 391 qc[0] |= IEEE80211_QOS_CTL_EOSP; mpsp_trigger_send() 529 * @qc: QoS Control field 536 void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, ieee80211_mpsp_trigger_process() argument 539 u8 rspi = qc[1] & (IEEE80211_QOS_CTL_RSPI >> 8); ieee80211_mpsp_trigger_process() 540 u8 eosp = qc[0] & IEEE80211_QOS_CTL_EOSP; ieee80211_mpsp_trigger_process()
|
H A D | status.c | 189 u8 *qc = ieee80211_get_qos_ctl(hdr); ieee80211_frame_acked() local 190 u16 tid = qc[0] & 0xf; ieee80211_frame_acked() 714 u8 *qc; ieee80211_tx_status() local 716 qc = ieee80211_get_qos_ctl(hdr); ieee80211_tx_status() 717 tid = qc[0] & 0xf; ieee80211_tx_status() 723 u8 *qc = ieee80211_get_qos_ctl(hdr); ieee80211_tx_status() local 725 tid = qc[0] & 0xf; ieee80211_tx_status()
|
H A D | tx.c | 783 u8 *qc; ieee80211_tx_h_sequence() local 831 qc = ieee80211_get_qos_ctl(hdr); ieee80211_tx_h_sequence() 832 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; ieee80211_tx_h_sequence() 1140 u8 *qc; ieee80211_tx_prepare() local 1180 qc = ieee80211_get_qos_ctl(hdr); ieee80211_tx_prepare() 1181 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; ieee80211_tx_prepare()
|
H A D | mesh.h | 254 void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
|
H A D | rx.c | 553 u8 *qc = ieee80211_get_qos_ctl(hdr); ieee80211_parse_qos() local 555 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; ieee80211_parse_qos() 556 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) ieee80211_parse_qos()
|
/linux-4.1.27/include/linux/ |
H A D | libata.h | 510 AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ 537 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 848 struct ata_link *excl_link; /* for PMP qc exclusion */ 894 int (*qc_defer)(struct ata_queued_cmd *qc); 895 int (*check_atapi_dma)(struct ata_queued_cmd *qc); 896 void (*qc_prep)(struct ata_queued_cmd *qc); 897 unsigned int (*qc_issue)(struct ata_queued_cmd *qc); 898 bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); 924 void (*post_internal_cmd)(struct ata_queued_cmd *qc); 964 void (*sff_drain_fifo)(struct ata_queued_cmd *qc); 967 void (*bmdma_setup)(struct ata_queued_cmd *qc); 968 void (*bmdma_start)(struct ata_queued_cmd *qc); 969 void (*bmdma_stop)(struct ata_queued_cmd *qc); 1187 extern int ata_std_qc_defer(struct ata_queued_cmd *qc); 1188 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 1189 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 1199 extern void ata_qc_complete(struct ata_queued_cmd *qc); 1319 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 1320 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 1642 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) ata_qc_set_polling() argument 1644 qc->tf.ctl |= ATA_NIEN; ata_qc_set_polling() 1658 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); ata_qc_from_tag() local 1660 if (unlikely(!qc) || !ap->ops->error_handler) ata_qc_from_tag() 1661 return qc; ata_qc_from_tag() 1663 if ((qc->flags & (ATA_QCFLAG_ACTIVE | ata_qc_from_tag() 1665 return qc; ata_qc_from_tag() 1670 static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) ata_qc_raw_nbytes() argument 1672 return qc->nbytes - min(qc->extrabytes, qc->nbytes); ata_qc_raw_nbytes() 1690 static inline void ata_qc_reinit(struct ata_queued_cmd *qc) ata_qc_reinit() argument 1692 qc->dma_dir = DMA_NONE; ata_qc_reinit() 1693 qc->sg = NULL; ata_qc_reinit() 1694 qc->flags = 0; ata_qc_reinit() 1695 qc->cursg = NULL; ata_qc_reinit() 1696 qc->cursg_ofs = 0; ata_qc_reinit() 1697 qc->nbytes = qc->extrabytes = qc->curbytes = 0; ata_qc_reinit() 1698 qc->n_elem = 0; ata_qc_reinit() 1699 qc->err_mask = 0; ata_qc_reinit() 1700 qc->sect_size = ATA_SECT_SIZE; ata_qc_reinit() 1702 ata_tf_init(qc->dev, &qc->tf); ata_qc_reinit() 1705 qc->result_tf.command = ATA_DRDY; ata_qc_reinit() 1706 qc->result_tf.feature = 0; ata_qc_reinit() 1786 extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); 1831 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1837 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); 1838 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); 1840 struct ata_queued_cmd *qc); 1855 extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); 1880 extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); 1881 extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); 1882 extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); 1884 struct ata_queued_cmd *qc); 1887 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 1889 extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 1890 extern void ata_bmdma_start(struct ata_queued_cmd *qc); 1891 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
|
/linux-4.1.27/drivers/media/v4l2-core/ |
H A D | v4l2-ctrls.c | 2478 int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) v4l2_query_ext_ctrl() argument 2481 u32 id = qc->id & V4L2_CTRL_ID_MASK; v4l2_query_ext_ctrl() 2493 if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { v4l2_query_ext_ctrl() 2499 if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { v4l2_query_ext_ctrl() 2502 } else if ((qc->id & next_flags) == next_flags) { v4l2_query_ext_ctrl() 2507 /* Find the next control with ID > qc->id */ v4l2_query_ext_ctrl() 2546 memset(qc, 0, sizeof(*qc)); v4l2_query_ext_ctrl() 2548 qc->id = id; v4l2_query_ext_ctrl() 2550 qc->id = ctrl->id; v4l2_query_ext_ctrl() 2551 strlcpy(qc->name, ctrl->name, sizeof(qc->name)); v4l2_query_ext_ctrl() 2552 qc->flags = ctrl->flags; v4l2_query_ext_ctrl() 2553 qc->type = ctrl->type; v4l2_query_ext_ctrl() 2555 qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; v4l2_query_ext_ctrl() 2556 qc->elem_size = ctrl->elem_size; v4l2_query_ext_ctrl() 2557 qc->elems = ctrl->elems; v4l2_query_ext_ctrl() 2558 qc->nr_of_dims = ctrl->nr_of_dims; v4l2_query_ext_ctrl() 2559 memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); v4l2_query_ext_ctrl() 2560 qc->minimum = ctrl->minimum; v4l2_query_ext_ctrl() 2561 qc->maximum = ctrl->maximum; v4l2_query_ext_ctrl() 2562 qc->default_value = ctrl->default_value; v4l2_query_ext_ctrl() 2565 qc->step = 1; v4l2_query_ext_ctrl() 2567 qc->step = ctrl->step; v4l2_query_ext_ctrl() 2573 int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) v4l2_queryctrl() argument 2575 struct v4l2_query_ext_ctrl qec = { qc->id }; v4l2_queryctrl() 2582 qc->id = qec.id; v4l2_queryctrl() 2583 qc->type = qec.type; v4l2_queryctrl() 2584 qc->flags = qec.flags; v4l2_queryctrl() 2585 strlcpy(qc->name, qec.name, sizeof(qc->name)); v4l2_queryctrl() 2586 switch (qc->type) { v4l2_queryctrl() 2593 qc->minimum = qec.minimum; v4l2_queryctrl() 2594 qc->maximum = qec.maximum; v4l2_queryctrl() 2595 qc->step = qec.step; v4l2_queryctrl() 2596 qc->default_value = qec.default_value; v4l2_queryctrl() 2599 qc->minimum = 0; v4l2_queryctrl() 2600 qc->maximum = 0; v4l2_queryctrl() 2601 qc->step = 0; v4l2_queryctrl() 2602 qc->default_value = 0; v4l2_queryctrl() 2609 int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) v4l2_subdev_queryctrl() argument 2611 if (qc->id & (V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND)) v4l2_subdev_queryctrl() 2613 return v4l2_queryctrl(sd->ctrl_handler, qc); v4l2_subdev_queryctrl()
|
/linux-4.1.27/drivers/media/usb/gspca/stv06xx/ |
H A D | stv06xx_st6422.h | 8 * Strongly based on qc-usb-messenger, which is:
|
H A D | stv06xx_st6422.c | 8 * Strongly based on qc-usb-messenger, which is:
|
/linux-4.1.27/drivers/staging/rtl8192e/ |
H A D | rtllib_crypt_ccmp.c | 114 u8 *pos, qc = 0; ccmp_init_blocks() local 133 qc = *pos & 0x0f; ccmp_init_blocks() 143 b0[1] = qc; ccmp_init_blocks() 169 aad[a4_included ? 30 : 24] = qc; ccmp_init_blocks()
|
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/ |
H A D | ieee80211_crypt_ccmp.c | 119 u8 *pos, qc = 0; ccmp_init_blocks() local 142 qc = *pos & 0x0f; ccmp_init_blocks() 151 b0[1] = qc; ccmp_init_blocks() 177 aad[a4_included ? 30 : 24] = qc; ccmp_init_blocks()
|
/linux-4.1.27/net/wireless/ |
H A D | lib80211_crypt_ccmp.c | 115 u8 *pos, qc = 0; ccmp_init_blocks() local 130 qc = *pos & 0x0f; ccmp_init_blocks() 140 b0[1] = qc; ccmp_init_blocks() 166 aad[a4_included ? 30 : 24] = qc; ccmp_init_blocks()
|
/linux-4.1.27/drivers/media/usb/uvc/ |
H A D | uvc_v4l2.c | 877 struct v4l2_queryctrl *qc) uvc_ioctl_queryctrl() 882 return uvc_query_v4l2_ctrl(chain, qc); uvc_ioctl_queryctrl() 890 struct v4l2_queryctrl qc = { qec->id }; uvc_ioctl_query_ext_ctrl() local 893 ret = uvc_query_v4l2_ctrl(chain, &qc); uvc_ioctl_query_ext_ctrl() 897 qec->id = qc.id; uvc_ioctl_query_ext_ctrl() 898 qec->type = qc.type; uvc_ioctl_query_ext_ctrl() 899 strlcpy(qec->name, qc.name, sizeof(qec->name)); uvc_ioctl_query_ext_ctrl() 900 qec->minimum = qc.minimum; uvc_ioctl_query_ext_ctrl() 901 qec->maximum = qc.maximum; uvc_ioctl_query_ext_ctrl() 902 qec->step = qc.step; uvc_ioctl_query_ext_ctrl() 903 qec->default_value = qc.default_value; uvc_ioctl_query_ext_ctrl() 904 qec->flags = qc.flags; uvc_ioctl_query_ext_ctrl() 876 uvc_ioctl_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc) uvc_ioctl_queryctrl() argument
|
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/ |
H A D | txrx.c | 121 u8 *qc, tid; wcn36xx_tx_start_ampdu() local 134 qc = ieee80211_get_qos_ctl(hdr); wcn36xx_tx_start_ampdu() 135 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; wcn36xx_tx_start_ampdu()
|
/linux-4.1.27/fs/gfs2/ |
H A D | quota.c | 619 struct gfs2_quota_change *qc = qd->qd_bh_qc; do_qc() local 626 qc->qc_change = 0; do_qc() 627 qc->qc_flags = 0; do_qc() 629 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); do_qc() 630 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); do_qc() 633 x = be64_to_cpu(qc->qc_change) + change; do_qc() 634 qc->qc_change = cpu_to_be64(x); do_qc() 643 qc->qc_flags = 0; do_qc() 644 qc->qc_id = 0; do_qc() 1285 const struct gfs2_quota_change *qc; gfs2_quota_init() local 1303 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); gfs2_quota_init() 1307 s64 qc_change = be64_to_cpu(qc->qc_change); gfs2_quota_init() 1308 u32 qc_flags = be32_to_cpu(qc->qc_flags); gfs2_quota_init() 1312 be32_to_cpu(qc->qc_id)); gfs2_quota_init() 1313 qc++; gfs2_quota_init()
|
H A D | ops_fstype.c | 878 fs_err(sdp, "can't find local \"qc\" file: %d\n", error); init_per_node() 897 fs_err(sdp, "can't lock local \"qc\" file: %d\n", error); init_per_node()
|
/linux-4.1.27/drivers/scsi/ |
H A D | ipr.c | 676 ipr_cmd->qc = NULL; ipr_reinit_ipr_cmnd() 833 struct ata_queued_cmd *qc = ipr_cmd->qc; ipr_sata_eh_done() local 834 struct ipr_sata_port *sata_port = qc->ap->private_data; ipr_sata_eh_done() 836 qc->err_mask |= AC_ERR_OTHER; ipr_sata_eh_done() 839 ata_qc_complete(qc); ipr_sata_eh_done() 893 else if (ipr_cmd->qc) for_each_hrrq() 5157 if (ipr_cmd->qc) for_each_hrrq() 5159 if (ipr_cmd->qc && for_each_hrrq() 5160 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { for_each_hrrq() 5161 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; for_each_hrrq() 5162 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; for_each_hrrq() 6545 * @qc: ATA queued command 6550 static void ipr_ata_post_internal(struct ata_queued_cmd *qc) ipr_ata_post_internal() argument 6552 struct ipr_sata_port *sata_port = qc->ap->private_data; ipr_ata_post_internal() 6568 if (ipr_cmd->qc == qc) { for_each_hrrq() 6617 struct ata_queued_cmd *qc = ipr_cmd->qc; ipr_sata_done() local 6618 struct ipr_sata_port *sata_port = qc->ap->private_data; ipr_sata_done() 6635 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); ipr_sata_done() 6637 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); ipr_sata_done() 6640 ata_qc_complete(qc); ipr_sata_done() 6646 * @qc: ATA queued command 6650 struct ata_queued_cmd *qc) ipr_build_ata_ioadl64() 6656 int len = qc->nbytes; ipr_build_ata_ioadl64() 6664 if (qc->dma_dir == DMA_TO_DEVICE) { ipr_build_ata_ioadl64() 6667 } else if (qc->dma_dir == DMA_FROM_DEVICE) ipr_build_ata_ioadl64() 6676 for_each_sg(qc->sg, sg, qc->n_elem, si) { ipr_build_ata_ioadl64() 6692 * @qc: ATA queued command 6696 struct ata_queued_cmd *qc) ipr_build_ata_ioadl() 6702 int len = qc->nbytes; ipr_build_ata_ioadl() 6709 if (qc->dma_dir == DMA_TO_DEVICE) { ipr_build_ata_ioadl() 6715 } else if (qc->dma_dir == DMA_FROM_DEVICE) { ipr_build_ata_ioadl() 6722 for_each_sg(qc->sg, sg, qc->n_elem, si) { ipr_build_ata_ioadl() 6736 * @qc: queued command 6741 static int ipr_qc_defer(struct ata_queued_cmd *qc) ipr_qc_defer() argument 6743 struct ata_port *ap = qc->ap; ipr_qc_defer() 6753 qc->lldd_task = NULL; ipr_qc_defer() 6771 qc->lldd_task = ipr_cmd; ipr_qc_defer() 6777 * ipr_qc_issue - Issue a SATA qc to a device 6778 * @qc: queued command 6783 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) ipr_qc_issue() argument 6785 struct ata_port *ap = qc->ap; ipr_qc_issue() 6793 if (qc->lldd_task == NULL) ipr_qc_issue() 6794 ipr_qc_defer(qc); ipr_qc_issue() 6796 ipr_cmd = qc->lldd_task; ipr_qc_issue() 6800 qc->lldd_task = NULL; ipr_qc_issue() 6822 ipr_cmd->qc = qc; ipr_qc_issue() 6828 ipr_cmd->dma_use_sg = qc->n_elem; ipr_qc_issue() 6831 ipr_build_ata_ioadl64(ipr_cmd, qc); ipr_qc_issue() 6833 ipr_build_ata_ioadl(ipr_cmd, qc); ipr_qc_issue() 6836 ipr_copy_sata_tf(regs, &qc->tf); ipr_qc_issue() 6837 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); ipr_qc_issue() 6840 switch (qc->tf.protocol) { ipr_qc_issue() 6873 * @qc: ATA queued command 6878 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) ipr_qc_fill_rtf() argument 6880 struct ipr_sata_port *sata_port = qc->ap->private_data; ipr_qc_fill_rtf() 6882 struct ata_taskfile *tf = &qc->result_tf; ipr_qc_fill_rtf() 6649 ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, struct ata_queued_cmd *qc) ipr_build_ata_ioadl64() argument 6695 ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, struct ata_queued_cmd *qc) ipr_build_ata_ioadl() argument
|
H A D | ipr.h | 1596 struct ata_queued_cmd *qc; member in struct:ipr_cmnd
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/ |
H A D | usb.c | 596 u8 *qc = ieee80211_get_qos_ctl(hdr); _rtl_rx_get_padding() local 603 if ((unsigned long)qc - (unsigned long)hdr < len && _rtl_rx_get_padding() 604 *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) _rtl_rx_get_padding() 971 u8 *qc = NULL; _rtl_usb_tx_preprocess() local 995 qc = ieee80211_get_qos_ctl(hdr); _rtl_usb_tx_preprocess() 996 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; _rtl_usb_tx_preprocess() 1005 if (qc) _rtl_usb_tx_preprocess()
|
/linux-4.1.27/drivers/staging/rtl8712/ |
H A D | ieee80211.h | 172 u16 qc; member in struct:ieee80211_hdr_qos 182 u16 qc; member in struct:ieee80211_hdr_3addr_qos 296 #define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
|
H A D | rtl871x_xmit.c | 476 u16 *qc; make_wlanhdr() local 518 qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); make_wlanhdr() 520 SetPriority(qc, pattrib->priority); make_wlanhdr() 521 SetAckpolicy(qc, pattrib->ack_policy); make_wlanhdr()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | htc_drv_txrx.c | 271 u8 *qc, *tx_fhdr; ath9k_htc_tx_data() local 301 qc = ieee80211_get_qos_ctl(hdr); ath9k_htc_tx_data() 302 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; ath9k_htc_tx_data() 422 u8 *qc, tid; ath9k_htc_check_tx_aggr() local 425 qc = ieee80211_get_qos_ctl(hdr); ath9k_htc_check_tx_aggr() 426 tid = qc[0] & 0xf; ath9k_htc_check_tx_aggr()
|
/linux-4.1.27/drivers/scsi/pm8001/ |
H A D | pm8001_sas.c | 280 struct ata_queued_cmd *qc = task->uldd_task; pm8001_get_ncq_tag() local 281 if (qc) { pm8001_get_ncq_tag() 282 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || pm8001_get_ncq_tag() 283 qc->tf.command == ATA_CMD_FPDMA_READ) { pm8001_get_ncq_tag() 284 *tag = qc->tag; pm8001_get_ncq_tag()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/ |
H A D | tx.c | 98 u8 *qc = ieee80211_get_qos_ctl(hdr); iwlagn_tx_cmd_build_basic() local 99 tx_cmd->tid_tspec = qc[0] & 0xf; iwlagn_tx_cmd_build_basic() 394 u8 *qc = NULL; iwlagn_tx_skb() local 396 qc = ieee80211_get_qos_ctl(hdr); iwlagn_tx_skb() 397 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; iwlagn_tx_skb()
|
H A D | rs.c | 294 u8 *qc = ieee80211_get_qos_ctl(hdr); rs_tl_add_packet() local 295 tid = qc[0] & 0xf; rs_tl_add_packet()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/ |
H A D | tx.c | 98 u8 *qc = ieee80211_get_qos_ctl(hdr); iwl_mvm_set_tx_cmd() local 99 tx_cmd->tid_tspec = qc[0] & 0xf; iwl_mvm_set_tx_cmd() 428 u8 *qc = NULL; iwl_mvm_tx_skb() local 429 qc = ieee80211_get_qos_ctl(hdr); iwl_mvm_tx_skb() 430 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; iwl_mvm_tx_skb()
|
H A D | rs.c | 1107 u8 *qc = ieee80211_get_qos_ctl(hdr); rs_get_tid() local 1108 tid = qc[0] & 0xf; rs_get_tid()
|
/linux-4.1.27/drivers/staging/rtl8188eu/include/ |
H A D | ieee80211.h | 334 u16 qc; member in struct:rtw_ieee80211_hdr_qos 344 u16 qc; member in struct:rtw_ieee80211_hdr_3addr_qos 475 #define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
|
/linux-4.1.27/drivers/char/ |
H A D | misc.c | 26 * Idea by Jacques Gelinas <jack@solucorp.qc.ca>,
|
/linux-4.1.27/drivers/media/platform/ |
H A D | fsl-viu.c | 1014 struct v4l2_queryctrl *qc) vidioc_queryctrl() 1019 if (qc->id && qc->id == viu_qctrl[i].id) { vidioc_queryctrl() 1020 memcpy(qc, &(viu_qctrl[i]), sizeof(*qc)); vidioc_queryctrl() 1013 vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) vidioc_queryctrl() argument
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192cu/ |
H A D | trx.c | 507 u8 *qc = ieee80211_get_qos_ctl(hdr); rtl92cu_tx_fill_desc() local 508 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; rtl92cu_tx_fill_desc()
|
/linux-4.1.27/drivers/staging/rtl8188eu/hal/ |
H A D | rtl8188e_cmd.c | 412 SetPriority(&pwlanqoshdr->qc, AC); ConstructNullFunctionData() 413 SetEOSP(&pwlanqoshdr->qc, bEosp); ConstructNullFunctionData()
|
/linux-4.1.27/drivers/scsi/mvsas/ |
H A D | mv_sas.c | 428 struct ata_queued_cmd *qc = task->uldd_task; mvs_get_ncq_tag() local 430 if (qc) { mvs_get_ncq_tag() 431 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || mvs_get_ncq_tag() 432 qc->tf.command == ATA_CMD_FPDMA_READ) { mvs_get_ncq_tag() 433 *tag = qc->tag; mvs_get_ncq_tag()
|
/linux-4.1.27/drivers/staging/rtl8188eu/core/ |
H A D | rtw_xmit.c | 777 u16 *qc; rtw_make_wlanhdr() local 847 qc = (unsigned short *)(hdr + pattrib->hdrlen - 2); rtw_make_wlanhdr() 850 SetPriority(qc, pattrib->priority); rtw_make_wlanhdr() 852 SetEOSP(qc, pattrib->eosp); rtw_make_wlanhdr() 854 SetAckpolicy(qc, pattrib->ack_policy); rtw_make_wlanhdr()
|
H A D | rtw_mlme_ext.c | 2991 unsigned short *qc; _issue_qos_nulldata() local 3029 qc = (unsigned short *)(pframe + pattrib->hdrlen - 2); _issue_qos_nulldata() 3031 SetPriority(qc, tid); _issue_qos_nulldata() 3033 SetEOSP(qc, pattrib->eosp); _issue_qos_nulldata() 3035 SetAckpolicy(qc, pattrib->ack_policy); _issue_qos_nulldata()
|
/linux-4.1.27/drivers/scsi/isci/ |
H A D | request.c | 3156 struct ata_queued_cmd *qc = task->uldd_task; isci_request_stp_request_construct() local 3171 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || isci_request_stp_request_construct() 3172 qc->tf.command == ATA_CMD_FPDMA_READ)) { isci_request_stp_request_construct() 3173 fis->sector_count = qc->tag << 3; isci_request_stp_request_construct() 3174 ireq->tc->type.stp.ncq_tag = qc->tag; isci_request_stp_request_construct()
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
H A D | 4965-mac.c | 1528 u8 *qc = ieee80211_get_qos_ctl(hdr); il4965_tx_cmd_build_basic() local 1529 tx_cmd->tid_tspec = qc[0] & 0xf; il4965_tx_cmd_build_basic() 1681 u8 *qc = NULL; il4965_tx_skb() local 1746 qc = ieee80211_get_qos_ctl(hdr); il4965_tx_skb() 1747 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; il4965_tx_skb() 2791 u8 *qc = NULL; il4965_hdl_tx() local 2809 qc = ieee80211_get_qos_ctl(hdr); il4965_hdl_tx() 2810 tid = qc[0] & 0xf; il4965_hdl_tx() 2836 WARN_ON(!qc); il4965_hdl_tx() 2852 if (qc) il4965_hdl_tx() 2875 if (qc && likely(sta_id != IL_INVALID_STATION)) il4965_hdl_tx() 2884 if (qc && likely(sta_id != IL_INVALID_STATION)) il4965_hdl_tx()
|
H A D | 3945-mac.c | 435 u8 *qc = ieee80211_get_qos_ctl(hdr); il3945_build_tx_cmd_basic() local 436 tx_cmd->tid_tspec = qc[0] & 0xf; il3945_build_tx_cmd_basic() 527 u8 *qc = ieee80211_get_qos_ctl(hdr); il3945_tx_skb() local 528 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; il3945_tx_skb()
|
H A D | 4965-rs.c | 280 u8 *qc = ieee80211_get_qos_ctl(hdr); il4965_rs_tl_add_packet() local 281 tid = qc[0] & 0xf; il4965_rs_tl_add_packet()
|
/linux-4.1.27/include/media/ |
H A D | v4l2-ctrls.h | 809 int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc); 810 int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc); 822 int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc);
|
H A D | v4l2-subdev.h | 155 int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc);
|
/linux-4.1.27/drivers/staging/media/bcm2048/ |
H A D | radio-bcm2048.c | 2353 struct v4l2_queryctrl *qc) bcm2048_vidioc_queryctrl() 2358 if (qc->id && qc->id == bcm2048_v4l2_queryctrl[i].id) { bcm2048_vidioc_queryctrl() 2359 *qc = bcm2048_v4l2_queryctrl[i]; bcm2048_vidioc_queryctrl() 2352 bcm2048_vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) bcm2048_vidioc_queryctrl() argument
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
H A D | rx.c | 469 u8 *qc = ieee80211_get_qos_ctl(hdr); carl9170_rx_copy_data() local 472 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) carl9170_rx_copy_data()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
H A D | htt_rx.c | 875 u8 *qc; ath10k_get_tid() local 881 qc = ieee80211_get_qos_ctl(hdr); ath10k_get_tid() 882 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; ath10k_get_tid()
|