Lines Matching refs:hrrq
707 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq) in __ipr_get_free_ipr_cmnd() argument
711 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
756 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
757 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
758 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_eh_done()
862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_eh_done()
877 struct ipr_hrr_queue *hrrq; in ipr_fail_all_ops() local
880 for_each_hrrq(hrrq, ioa_cfg) { in ipr_fail_all_ops()
881 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
883 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
901 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
1055 unsigned int hrrq; in ipr_get_hrrq_index() local
1058 hrrq = 0; in ipr_get_hrrq_index()
1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1063 return hrrq; in ipr_get_hrrq_index()
1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1087 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1475 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
2577 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
3292 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_worker_thread()
3541 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3573 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3576 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3577 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3578 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3701 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3708 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3710 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
4249 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4934 struct ipr_hrr_queue *hrrq; in ipr_wait_for_ops() local
4942 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4943 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4944 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_wait_for_ops()
4950 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4959 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4960 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4961 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_wait_for_ops()
4967 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4992 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5007 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5062 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5132 struct ipr_hrr_queue *hrrq; in __ipr_eh_dev_reset() local
5148 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5151 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5152 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5153 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in __ipr_eh_dev_reset()
5166 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5177 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5178 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5180 &hrrq->hrrq_pending_q, queue) { in __ipr_eh_dev_reset()
5187 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5247 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5308 struct ipr_hrr_queue *hrrq; in ipr_cancel_op() local
5319 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5334 for_each_hrrq(hrrq, ioa_cfg) { in ipr_cancel_op()
5335 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5336 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_cancel_op()
5343 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5370 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5392 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5574 struct ipr_hrr_queue *hrrq; in ipr_iopoll() local
5580 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); in ipr_iopoll()
5581 ioa_cfg = hrrq->ioa_cfg; in ipr_iopoll()
5583 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5584 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); in ipr_iopoll()
5588 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5609 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; in ipr_isr() local
5610 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5619 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5621 if (!hrrq->allow_interrupts) { in ipr_isr()
5622 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5627 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5658 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5677 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; in ipr_isr_mhrrq() local
5678 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5684 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5687 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5688 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5693 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5694 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5695 if (!blk_iopoll_sched_prep(&hrrq->iopoll)) in ipr_isr_mhrrq()
5696 blk_iopoll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5697 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5701 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5702 hrrq->toggle_bit) in ipr_isr_mhrrq()
5704 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5708 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_done()
6253 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6285 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6287 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6313 struct ipr_hrr_queue *hrrq; in ipr_queuecommand() local
6329 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6331 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6337 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6338 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6346 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6347 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6351 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); in ipr_queuecommand()
6353 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6356 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6393 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6394 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6395 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6396 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6402 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6403 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6404 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6414 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6417 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6421 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6425 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6524 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6555 struct ipr_hrr_queue *hrrq; in ipr_ata_post_internal() local
6565 for_each_hrrq(hrrq, ioa_cfg) { in ipr_ata_post_internal()
6566 spin_lock(&hrrq->_lock); in ipr_ata_post_internal()
6567 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_ata_post_internal()
6573 spin_unlock(&hrrq->_lock); in ipr_ata_post_internal()
6622 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6638 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_done()
6639 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6747 struct ipr_hrr_queue *hrrq; in ipr_qc_defer() local
6751 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
6754 spin_lock(&hrrq->_lock); in ipr_qc_defer()
6755 if (unlikely(hrrq->ioa_is_dead)) { in ipr_qc_defer()
6756 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6760 if (unlikely(!hrrq->allow_cmds)) { in ipr_qc_defer()
6761 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6765 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); in ipr_qc_defer()
6767 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6772 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6801 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6802 if (unlikely(!ipr_cmd->hrrq->allow_cmds || in ipr_qc_issue()
6803 ipr_cmd->hrrq->ioa_is_dead)) { in ipr_qc_issue()
6804 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_qc_issue()
6805 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6821 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_qc_issue()
6861 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6866 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6975 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6985 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6986 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6987 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6991 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
7019 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7020 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7021 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7047 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
7054 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ioa_reset_done()
7388 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
7806 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
7859 struct ipr_hrr_queue *hrrq; in ipr_ioafp_identify_hrrq() local
7866 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7881 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
7883 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
7885 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7887 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
7889 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7891 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
7899 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
7901 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
7903 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
7905 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
7973 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
7991 struct ipr_hrr_queue *hrrq; in ipr_init_ioa_mem() local
7993 for_each_hrrq(hrrq, ioa_cfg) { in ipr_init_ioa_mem()
7994 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
7995 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
7998 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
7999 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
8000 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
8001 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
8002 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
8068 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
8095 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8096 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8097 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8139 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
8634 struct ipr_hrr_queue *hrrq; in ipr_reset_cancel_hcam_done() local
8641 for_each_hrrq(hrrq, ioa_cfg) { in ipr_reset_cancel_hcam_done()
8642 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8643 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
8646 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
8650 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8675 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam() local
8680 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
8682 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
8801 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8850 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
8889 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8890 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
8891 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8894 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) in _ipr_initiate_ioa_reset()
8922 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
8938 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8939 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
8940 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8950 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
8981 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8982 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
8983 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8986 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
9075 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9076 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9077 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9195 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9196 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9197 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9232 &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9234 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); in ipr_free_irqs()
9303 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9304 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9310 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9313 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9319 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9320 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9322 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9328 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9330 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9331 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9373 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9374 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
9375 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9418 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9419 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9420 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9423 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9426 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9427 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9428 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9431 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9479 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9480 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9481 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9619 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9620 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9621 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9622 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9624 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9626 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9729 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9733 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10064 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10070 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10173 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10174 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10175 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10283 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10285 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_probe()
10314 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10468 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10490 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()