Lines Matching refs:hrrq

707 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)  in __ipr_get_free_ipr_cmnd()  argument
711 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
756 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
757 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
758 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
838 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_eh_done()
862 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_eh_done()
877 struct ipr_hrr_queue *hrrq; in ipr_fail_all_ops() local
880 for_each_hrrq(hrrq, ioa_cfg) { in ipr_fail_all_ops()
881 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
883 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
901 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
952 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
1055 unsigned int hrrq; in ipr_get_hrrq_index() local
1058 hrrq = 0; in ipr_get_hrrq_index()
1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1063 return hrrq; in ipr_get_hrrq_index()
1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1087 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1476 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
2578 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
3293 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_worker_thread()
3542 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3577 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3578 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3579 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3702 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3709 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3711 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
4254 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4939 struct ipr_hrr_queue *hrrq; in ipr_wait_for_ops() local
4947 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4948 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4949 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_wait_for_ops()
4955 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4964 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4965 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4966 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_wait_for_ops()
4972 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4997 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5012 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5067 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5137 struct ipr_hrr_queue *hrrq; in __ipr_eh_dev_reset() local
5153 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5156 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5157 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5158 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in __ipr_eh_dev_reset()
5171 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5182 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5183 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5185 &hrrq->hrrq_pending_q, queue) { in __ipr_eh_dev_reset()
5192 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5252 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5313 struct ipr_hrr_queue *hrrq; in ipr_cancel_op() local
5324 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5339 for_each_hrrq(hrrq, ioa_cfg) { in ipr_cancel_op()
5340 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5341 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_cancel_op()
5348 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5375 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5397 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5579 struct ipr_hrr_queue *hrrq; in ipr_iopoll() local
5585 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll); in ipr_iopoll()
5586 ioa_cfg = hrrq->ioa_cfg; in ipr_iopoll()
5588 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5589 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq); in ipr_iopoll()
5593 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5614 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; in ipr_isr() local
5615 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5624 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5626 if (!hrrq->allow_interrupts) { in ipr_isr()
5627 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5632 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5663 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5682 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; in ipr_isr_mhrrq() local
5683 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5689 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5692 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5693 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5698 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5699 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5700 if (!blk_iopoll_sched_prep(&hrrq->iopoll)) in ipr_isr_mhrrq()
5701 blk_iopoll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5702 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5706 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5707 hrrq->toggle_bit) in ipr_isr_mhrrq()
5709 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5713 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5867 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_done()
6258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6284 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6285 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6287 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6290 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6292 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6318 struct ipr_hrr_queue *hrrq; in ipr_queuecommand() local
6334 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6336 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6342 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6343 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6351 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6352 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6356 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); in ipr_queuecommand()
6358 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6361 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6406 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6407 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6408 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6409 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6415 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6416 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6417 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6427 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6430 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6434 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6438 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6536 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6567 struct ipr_hrr_queue *hrrq; in ipr_ata_post_internal() local
6577 for_each_hrrq(hrrq, ioa_cfg) { in ipr_ata_post_internal()
6578 spin_lock(&hrrq->_lock); in ipr_ata_post_internal()
6579 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_ata_post_internal()
6585 spin_unlock(&hrrq->_lock); in ipr_ata_post_internal()
6634 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6650 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_done()
6651 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6759 struct ipr_hrr_queue *hrrq; in ipr_qc_defer() local
6763 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
6766 spin_lock(&hrrq->_lock); in ipr_qc_defer()
6767 if (unlikely(hrrq->ioa_is_dead)) { in ipr_qc_defer()
6768 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6772 if (unlikely(!hrrq->allow_cmds)) { in ipr_qc_defer()
6773 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6777 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq); in ipr_qc_defer()
6779 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6784 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
6813 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6814 if (unlikely(!ipr_cmd->hrrq->allow_cmds || in ipr_qc_issue()
6815 ipr_cmd->hrrq->ioa_is_dead)) { in ipr_qc_issue()
6816 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_qc_issue()
6817 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6833 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_qc_issue()
6873 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6878 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
6987 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6997 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6998 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6999 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7003 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
7031 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7032 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7033 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7059 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
7066 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ioa_reset_done()
7400 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
7908 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
7961 struct ipr_hrr_queue *hrrq; in ipr_ioafp_identify_hrrq() local
7968 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7983 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
7985 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
7987 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7989 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
7991 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7993 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
8001 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
8003 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
8005 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
8007 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
8075 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
8093 struct ipr_hrr_queue *hrrq; in ipr_init_ioa_mem() local
8095 for_each_hrrq(hrrq, ioa_cfg) { in ipr_init_ioa_mem()
8096 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
8097 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
8100 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
8101 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
8102 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
8103 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
8104 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
8170 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
8197 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8198 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8199 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8241 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
8763 struct ipr_hrr_queue *hrrq; in ipr_reset_cancel_hcam_done() local
8770 for_each_hrrq(hrrq, ioa_cfg) { in ipr_reset_cancel_hcam_done()
8771 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8772 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
8775 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
8779 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8804 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam() local
8809 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
8811 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
8930 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8979 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
9018 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9019 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9020 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9023 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) in _ipr_initiate_ioa_reset()
9051 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9067 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9068 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9069 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9079 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9110 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9111 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9112 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9115 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
9204 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9205 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9206 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9324 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9325 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9326 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9361 &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9363 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); in ipr_free_irqs()
9432 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9433 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9439 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9442 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9448 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9449 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9451 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9457 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9459 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9460 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9502 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9503 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
9504 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9547 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9548 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9549 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9552 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9555 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9556 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9557 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9560 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9608 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9609 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9610 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9748 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9749 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9750 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9751 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9753 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9755 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9858 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9862 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10193 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10199 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10302 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10303 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10304 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10412 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10414 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_probe()
10443 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10597 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10619 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()