Lines Matching refs:ioa_cfg
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook() local
604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
605 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
609 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done() local
634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
662 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
729 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_free_ipr_cmnd() argument
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, in ipr_mask_and_clear_interrupts() argument
755 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
756 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
757 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
758 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
763 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
769 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
770 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_save_pcix_cmd_reg() argument
784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_set_pcix_cmd_reg() argument
808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
874 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) in ipr_fail_all_ops() argument
880 for_each_hrrq(hrrq, ioa_cfg) { in ipr_fail_all_ops()
919 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command() local
922 if (ioa_cfg->sis64) { in ipr_send_command()
930 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
932 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
1006 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd() local
1048 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1050 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1053 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_hrrq_index() argument
1057 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1079 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, in ipr_send_hcam() argument
1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1086 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_send_hcam()
1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1112 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1154 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry() local
1166 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1182 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1190 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1191 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1192 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1199 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1200 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1201 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1204 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1205 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1206 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1208 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1209 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1210 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1241 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1290 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, in ipr_format_res_path() argument
1296 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1316 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1341 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1373 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target() local
1375 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1379 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1381 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1383 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1386 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1389 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1400 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_config_change() argument
1409 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1417 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1425 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1426 ipr_send_hcam(ioa_cfg, in ipr_handle_config_change()
1432 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1437 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1446 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1449 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1453 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1456 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_handle_config_change()
1471 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn() local
1481 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_process_ccn()
1486 ipr_handle_config_change(ioa_cfg, hostrcb); in ipr_process_ccn()
1599 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_cache_error() argument
1604 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1635 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_cache_error() argument
1667 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_config_error() argument
1685 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1707 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_config_error() argument
1750 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_config_error() argument
1768 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1797 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_array_error() argument
1811 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1832 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1833 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1848 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_array_error() argument
1862 ioa_cfg->host->host_no, in ipr_log_array_error()
1882 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1883 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1904 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len) in ipr_log_hex_data() argument
1911 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1931 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_dual_ioa_error() argument
1936 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1947 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1961 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_dual_ioa_error() argument
1973 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2074 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2082 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2224 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2234 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2248 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_fabric_error() argument
2274 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); in ipr_log_fabric_error()
2285 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_array_error() argument
2300 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2322 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2325 ipr_format_res_path(ioa_cfg, in ipr_log_sis64_array_error()
2341 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_fabric_error() argument
2368 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len); in ipr_log_sis64_fabric_error()
2379 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_generic_error() argument
2382 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2394 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_device_error() argument
2414 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2416 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2419 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2454 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_log_data() argument
2465 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2467 if (ioa_cfg->sis64) in ipr_handle_log_data()
2472 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2475 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2489 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2496 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2498 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2505 ipr_log_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2508 ipr_log_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2512 ipr_log_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2515 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2518 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2521 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2525 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2528 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2531 ipr_log_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2534 ipr_log_sis64_device_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2537 ipr_log_sis64_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2541 ipr_log_sis64_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2544 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2549 ipr_log_generic_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error() local
2572 if (ioa_cfg->sis64) in ipr_process_error()
2581 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_process_error()
2583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_process_error()
2586 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2590 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); in ipr_process_error()
2606 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout() local
2609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2611 ioa_cfg->errors_logged++; in ipr_timeout()
2612 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2615 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2616 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2618 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2619 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_timeout()
2621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout() local
2641 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2643 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2644 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2647 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2648 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2650 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2652 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_oper_timeout()
2656 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2705 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) in ipr_get_max_scsi_speed() argument
2712 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2738 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) in ipr_wait_iodbg_ack() argument
2745 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2771 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_sis64_dump_data_section() argument
2778 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2779 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2796 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_ldump_data_section() argument
2803 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2804 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, in ipr_get_ldump_data_section()
2809 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2812 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2814 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2821 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2824 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2828 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2832 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2834 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2840 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2847 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2853 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2856 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2860 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2865 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2889 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, in ipr_sdt_copy() argument
2896 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2898 if (ioa_cfg->sis64) in ipr_sdt_copy()
2924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2925 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
2928 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_sdt_copy()
2933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2971 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioa_type_data() argument
2974 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
2982 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
2997 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_version_data() argument
3018 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_trace_data() argument
3027 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3039 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_location_data() argument
3048 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3060 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) in ipr_get_ioa_dump() argument
3074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3076 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3081 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3082 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3087 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3089 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3090 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3096 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3108 ipr_dump_version_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3109 ipr_dump_location_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3110 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3111 ipr_dump_trace_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3128 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3138 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, in ipr_get_ioa_dump()
3144 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3148 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3160 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3165 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3175 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3193 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, in ipr_get_ioa_dump()
3206 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3211 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3216 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) argument
3229 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump() local
3234 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3235 ioa_cfg->dump = NULL; in ipr_release_dump()
3236 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3237 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3264 struct ipr_ioa_cfg *ioa_cfg = in ipr_worker_thread() local
3270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3272 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3273 dump = ioa_cfg->dump; in ipr_worker_thread()
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3280 ipr_get_ioa_dump(ioa_cfg, dump); in ipr_worker_thread()
3283 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3284 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3285 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_worker_thread()
3286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3293 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_worker_thread()
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3298 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_worker_thread()
3304 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_worker_thread()
3307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3317 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_worker_thread()
3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3324 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_worker_thread()
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3330 ioa_cfg->scan_done = 1; in ipr_worker_thread()
3331 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3332 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_worker_thread()
3355 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace() local
3359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3360 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3389 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version() local
3390 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3394 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3423 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level() local
3427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3428 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3446 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level() local
3449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3450 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3451 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3481 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics() local
3488 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3489 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3491 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3495 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3496 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_diagnostics()
3498 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3499 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3500 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3505 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3509 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3510 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3537 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state() local
3541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3542 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3546 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3566 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state() local
3573 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3574 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3576 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3577 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3578 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3579 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3582 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3583 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3584 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_store_adapter_state()
3586 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3587 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3617 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter() local
3624 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3625 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3626 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_reset_adapter()
3627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3628 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3654 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight() local
3659 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3678 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight() local
3683 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3684 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3691 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3695 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3696 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n"); in ipr_store_iopoll_weight()
3700 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3701 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3702 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3706 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3707 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3708 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3709 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3710 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3711 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3943 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, in ipr_update_ioa_ucode() argument
3948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3949 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3951 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3955 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3957 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3962 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3968 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3973 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
3974 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_update_ioa_ucode()
3975 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3976 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3978 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3979 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
3980 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4000 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw() local
4018 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4019 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4030 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4038 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4045 result = ipr_update_ioa_ucode(ioa_cfg, sglist); in ipr_store_update_fw()
4075 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type() local
4079 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4080 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4124 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump() local
4134 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4135 dump = ioa_cfg->dump; in ipr_read_dump()
4137 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4168 if (ioa_cfg->sis64) in ipr_read_dump()
4214 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_dump() argument
4227 if (ioa_cfg->sis64) in ipr_alloc_dump()
4241 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4243 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4245 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4252 ioa_cfg->dump = dump; in ipr_alloc_dump()
4253 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4254 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4255 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4256 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4270 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_dump() argument
4277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4278 dump = ioa_cfg->dump; in ipr_free_dump()
4280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4284 ioa_cfg->dump = NULL; in ipr_free_dump()
4285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4311 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump() local
4318 rc = ipr_alloc_dump(ioa_cfg); in ipr_write_dump()
4320 rc = ipr_free_dump(ioa_cfg); in ipr_write_dump()
4340 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; in ipr_free_dump() argument
4354 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth() local
4358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4363 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle() local
4386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4390 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path() local
4421 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4423 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4428 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4455 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id() local
4460 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4462 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4467 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4491 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type() local
4496 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode() local
4531 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4554 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode() local
4559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4639 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget() local
4642 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4667 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc() local
4673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4678 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4683 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4686 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4698 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4715 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy() local
4717 if (ioa_cfg->sis64) { in ipr_target_destroy()
4720 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4722 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4724 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4744 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev() local
4747 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4767 struct ipr_ioa_cfg *ioa_cfg; in ipr_slave_destroy() local
4770 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4772 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure() local
4801 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4818 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4825 if (ioa_cfg->sis64) in ipr_slave_configure()
4827 ipr_format_res_path(ioa_cfg, in ipr_slave_configure()
4831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4880 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc() local
4887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4904 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4933 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, in ipr_wait_for_ops() argument
4947 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4964 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4976 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
4989 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_host_reset() local
4994 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
4995 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4997 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4998 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_eh_host_reset()
4999 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
5002 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
5003 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5007 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5012 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5017 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5036 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_device_reset() argument
5046 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_device_reset()
5050 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5069 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5095 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset() local
5101 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5102 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5104 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5110 rc = ipr_device_reset(ioa_cfg, res); in ipr_sata_reset()
5114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5133 struct ipr_ioa_cfg *ioa_cfg; in __ipr_eh_dev_reset() local
5140 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5151 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5153 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5156 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5182 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5195 rc = ipr_device_reset(ioa_cfg, res); in __ipr_eh_dev_reset()
5206 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_dev_reset() local
5208 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5215 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done() local
5235 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5236 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5238 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout() local
5275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5276 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5282 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_abort_timeout()
5292 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5308 struct ipr_ioa_cfg *ioa_cfg; in ipr_cancel_op() local
5316 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5323 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5324 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5334 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5339 for_each_hrrq(hrrq, ioa_cfg) { in ipr_cancel_op()
5354 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_cancel_op()
5393 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished() local
5397 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5399 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5416 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_abort() local
5420 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5427 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5440 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_other_interrupt() argument
5446 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5453 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5454 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5455 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5459 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5460 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5461 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5462 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5463 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5473 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5474 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5476 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5477 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5478 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5480 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5482 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5484 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5485 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5490 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5492 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5495 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5498 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5499 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5501 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_handle_other_interrupt()
5502 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_handle_other_interrupt()
5516 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) in ipr_isr_eh() argument
5518 ioa_cfg->errors_logged++; in ipr_isr_eh()
5519 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5521 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5522 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5524 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_isr_eh()
5533 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq() local
5549 ipr_isr_eh(ioa_cfg, in ipr_process_hrrq()
5555 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5578 struct ipr_ioa_cfg *ioa_cfg; in ipr_iopoll() local
5586 ioa_cfg = hrrq->ioa_cfg; in ipr_iopoll()
5615 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr() local
5635 if (!ioa_cfg->clear_isr) in ipr_isr()
5642 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5643 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5648 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5652 ipr_isr_eh(ioa_cfg, in ipr_isr()
5661 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); in ipr_isr()
5683 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq() local
5697 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5731 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl64() argument
5749 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5783 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl() argument
5800 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
5892 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
5986 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioasa() argument
6002 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6010 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6022 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); in ipr_dump_ioasa()
6025 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6027 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6150 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6172 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, in ipr_erp_start() argument
6188 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_erp_start()
6222 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6274 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done() local
6289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6291 ipr_erp_start(ioa_cfg, ipr_cmd); in ipr_scsi_done()
6293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6312 struct ipr_ioa_cfg *ioa_cfg; in ipr_queuecommand() local
6321 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6327 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6329 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6333 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_queuecommand()
6334 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6401 if (ioa_cfg->sis64) in ipr_queuecommand()
6402 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6404 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6475 struct ipr_ioa_cfg *ioa_cfg; in ipr_ioa_info() local
6478 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6481 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6525 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset() local
6529 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6530 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6532 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6533 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6536 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6539 rc = ipr_device_reset(ioa_cfg, res); in ipr_ata_phy_reset()
6551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6565 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal() local
6570 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6571 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6573 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6574 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6577 for_each_hrrq(hrrq, ioa_cfg) { in ipr_ata_post_internal()
6581 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done() local
6635 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6641 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_sata_done()
6644 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
6757 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer() local
6762 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_qc_defer()
6763 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
6800 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue() local
6824 if (ioa_cfg->sis64) { in ipr_qc_issue()
6842 if (ioa_cfg->sis64) in ipr_qc_issue()
6955 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) in ipr_invalid_adapter() argument
6959 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
6968 #define ipr_invalid_adapter(ioa_cfg) 0 argument
6983 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done() local
6987 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6989 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_ioa_bringdown_done()
6990 scsi_unblock_requests(ioa_cfg->host); in ipr_ioa_bringdown_done()
6991 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_ioa_bringdown_done()
6994 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
6995 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
6996 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
6997 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6998 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6999 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7004 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7023 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done() local
7029 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7030 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7031 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7032 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7033 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7036 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7037 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7039 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7045 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7047 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { in ipr_ioa_reset_done()
7050 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); in ipr_ioa_reset_done()
7052 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_ioa_reset_done()
7055 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7056 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7058 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7060 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7062 spin_unlock(ioa_cfg->host->host_lock); in ipr_ioa_reset_done()
7063 scsi_unblock_requests(ioa_cfg->host); in ipr_ioa_reset_done()
7064 spin_lock(ioa_cfg->host->host_lock); in ipr_ioa_reset_done()
7066 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ioa_reset_done()
7067 scsi_block_requests(ioa_cfg->host); in ipr_ioa_reset_done()
7069 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7104 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs() local
7105 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7111 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7128 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7136 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7195 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, in ipr_check_term_power() argument
7212 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7232 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) in ipr_scsi_bus_speed_limit() argument
7238 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, in ipr_scsi_bus_speed_limit()
7239 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7241 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7242 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7256 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, in ipr_modify_ioafp_mode_page_28() argument
7274 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7280 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7331 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28() local
7332 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7336 ipr_scsi_bus_speed_limit(ioa_cfg); in ipr_ioafp_mode_select_page28()
7337 ipr_check_term_power(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7338 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7343 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7347 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed() local
7395 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7399 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cmd_failed()
7416 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed() local
7421 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7441 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28() local
7445 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24() local
7470 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7485 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7529 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24() local
7533 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7560 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table() local
7567 if (ioa_cfg->sis64) in ipr_init_res_table()
7568 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7570 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7573 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7575 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7578 if (ioa_cfg->sis64) in ipr_init_res_table()
7579 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7581 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7584 if (ioa_cfg->sis64) in ipr_init_res_table()
7585 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7587 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7592 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7599 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7600 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7605 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7607 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7621 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7627 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7630 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg() local
7653 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7654 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7658 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7659 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7666 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7667 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7668 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7670 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7712 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters() local
7713 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry() local
7801 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
7802 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
7810 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
7833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry() local
7834 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
7835 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
7843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
7864 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry() local
7871 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
7890 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry() local
7896 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
7898 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
7900 if (ipr_invalid_adapter(ioa_cfg)) { in ipr_ioafp_page0_inquiry()
7901 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
7905 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
7906 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_ioafp_page0_inquiry()
7908 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
7916 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
7934 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry() local
7940 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
7959 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq() local
7965 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
7967 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
7968 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7974 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
7977 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
7997 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7999 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
8012 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8017 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
8043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done() local
8046 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8048 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
8053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8091 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_ioa_mem() argument
8095 for_each_hrrq(hrrq, ioa_cfg) { in ipr_init_ioa_mem()
8108 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8109 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8110 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8112 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8115 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage() local
8133 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8148 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8149 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8150 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8153 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8158 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8159 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa() local
8194 ipr_init_ioa_mem(ioa_cfg); in ipr_reset_enable_ioa()
8196 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8197 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8198 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8199 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8202 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8204 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8205 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8208 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8212 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8213 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8218 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8220 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8223 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8225 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8227 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8229 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8231 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8237 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8259 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump() local
8261 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8262 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8263 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8264 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8266 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8282 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) in ipr_unit_check_no_data() argument
8284 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8285 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8298 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_unit_check_buffer() argument
8306 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8308 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8309 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8314 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, in ipr_get_unit_check_buffer()
8320 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8332 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8337 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_get_unit_check_buffer()
8343 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_get_unit_check_buffer()
8346 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8347 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8349 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8351 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job() local
8368 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8369 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_get_unit_check_job()
8379 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait() local
8383 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
8386 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
8387 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
8391 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
8394 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
8395 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
8396 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
8401 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
8426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space() local
8430 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8431 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8433 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { in ipr_reset_restore_cfg_space()
8438 ipr_fail_all_ops(ioa_cfg); in ipr_reset_restore_cfg_space()
8440 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8442 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8443 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8446 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8447 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8452 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8453 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_restore_cfg_space()
8460 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8462 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
8484 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done() local
8487 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8488 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8489 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8506 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist() local
8510 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8512 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8514 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8521 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8522 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8523 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8560 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work() local
8561 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8570 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8587 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset() local
8591 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait() local
8611 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8612 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8613 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8621 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8622 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8641 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8654 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) in ipr_reset_allowed() argument
8658 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8679 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist() local
8682 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8707 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert() local
8712 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8715 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_reset_alert()
8716 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done() local
8744 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_reset_quiesce_done()
8761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done() local
8770 for_each_hrrq(hrrq, ioa_cfg) { in ipr_reset_cancel_hcam_done()
8774 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cancel_hcam_done()
8800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam() local
8804 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
8810 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
8856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done() local
8857 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
8859 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
8878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download() local
8879 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
8895 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
8921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa() local
8930 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8940 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
8968 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job() local
8973 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
9009 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in _ipr_initiate_ioa_reset() argument
9016 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
9017 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
9018 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9019 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9020 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9023 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) in _ipr_initiate_ioa_reset()
9024 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
9026 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in _ipr_initiate_ioa_reset()
9027 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
9046 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_reset() argument
9051 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9054 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
9055 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
9056 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
9057 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
9058 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
9061 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
9062 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
9065 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
9066 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
9067 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9068 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9069 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9073 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
9074 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
9075 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
9076 ipr_fail_all_ops(ioa_cfg); in ipr_initiate_ioa_reset()
9077 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
9079 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9080 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_initiate_ioa_reset()
9081 scsi_unblock_requests(ioa_cfg->host); in ipr_initiate_ioa_reset()
9082 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_initiate_ioa_reset()
9086 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
9091 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, in ipr_initiate_ioa_reset()
9105 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze() local
9109 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
9110 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9111 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9112 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9130 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_mmio_enabled() local
9132 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9133 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9135 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9150 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_frozen() local
9152 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9153 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9154 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); in ipr_pci_frozen()
9155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9169 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_slot_reset() local
9171 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9172 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9173 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9174 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_slot_reset()
9176 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, in ipr_pci_slot_reset()
9179 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9194 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_perm_failure() local
9197 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9198 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9199 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9200 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9201 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9202 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9203 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9204 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9205 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9206 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9209 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_perm_failure()
9211 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9212 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9253 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) in ipr_probe_ioa_part2() argument
9259 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9260 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9261 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9262 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9263 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9264 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_probe_ioa_part2()
9266 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, in ipr_probe_ioa_part2()
9268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9281 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_cmd_blks() argument
9285 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9287 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9288 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9289 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9290 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9292 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9296 if (ioa_cfg->ipr_cmd_pool) in ipr_free_cmd_blks()
9297 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9299 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9300 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9301 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9302 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9303 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9313 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_mem() argument
9317 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9318 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9319 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9320 ipr_free_cmd_blks(ioa_cfg); in ipr_free_mem()
9322 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9323 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9324 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9325 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9326 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9328 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9329 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9332 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9334 ioa_cfg->hostrcb[i], in ipr_free_mem()
9335 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9338 ipr_free_dump(ioa_cfg); in ipr_free_mem()
9339 kfree(ioa_cfg->trace); in ipr_free_mem()
9352 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_irqs() argument
9354 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9356 if (ioa_cfg->intr_flag == IPR_USE_MSI || in ipr_free_irqs()
9357 ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_free_irqs()
9359 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9360 free_irq(ioa_cfg->vectors_info[i].vec, in ipr_free_irqs()
9361 &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9363 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); in ipr_free_irqs()
9365 if (ioa_cfg->intr_flag == IPR_USE_MSI) { in ipr_free_irqs()
9367 ioa_cfg->intr_flag &= ~IPR_USE_MSI; in ipr_free_irqs()
9368 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_free_irqs()
9370 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; in ipr_free_irqs()
9384 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_all_resources() argument
9386 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9389 ipr_free_irqs(ioa_cfg); in ipr_free_all_resources()
9390 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9391 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9392 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9394 ipr_free_mem(ioa_cfg); in ipr_free_all_resources()
9395 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9407 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_cmd_blks() argument
9414 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9417 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9420 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9421 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9423 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9424 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9428 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9429 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9432 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9433 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9438 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9439 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9442 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9448 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9449 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9451 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9454 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9457 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9459 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9460 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9464 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); in ipr_alloc_cmd_blks()
9467 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9472 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9473 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9477 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9483 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9497 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9502 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9504 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9518 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_mem() argument
9520 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9524 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * in ipr_alloc_mem()
9525 ioa_cfg->max_devs_supported, GFP_KERNEL); in ipr_alloc_mem()
9527 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9530 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9531 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9532 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9535 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9537 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9540 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9543 if (ipr_alloc_cmd_blks(ioa_cfg)) in ipr_alloc_mem()
9546 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9547 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9548 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9549 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9552 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9555 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9556 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9557 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9560 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9563 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9564 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9565 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9568 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9572 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9574 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9577 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9580 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9581 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9582 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9583 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9586 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * in ipr_alloc_mem()
9589 if (!ioa_cfg->trace) in ipr_alloc_mem()
9600 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9601 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9603 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9604 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9606 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9608 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9609 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9610 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9613 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_mem()
9616 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9618 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9629 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) in ipr_initialize_bus_attr() argument
9634 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9635 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9636 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9638 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9640 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9651 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_regs() argument
9657 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9658 t = &ioa_cfg->regs; in ipr_init_regs()
9659 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9678 if (ioa_cfg->sis64) { in ipr_init_regs()
9695 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, in ipr_init_ioa_cfg() argument
9700 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9701 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9702 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9703 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9704 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9705 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9706 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9707 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9708 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9709 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9711 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9712 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9713 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9714 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9715 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9716 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9717 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9718 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9719 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9721 ipr_initialize_bus_attr(ioa_cfg); in ipr_init_ioa_cfg()
9722 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9724 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9728 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9729 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9731 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9736 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9737 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9739 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9745 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9746 pci_set_drvdata(pdev, ioa_cfg); in ipr_init_ioa_cfg()
9748 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9749 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9750 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9751 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9753 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9755 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9786 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) in ipr_wait_for_pci_err_recovery() argument
9788 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
9791 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
9798 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) in ipr_enable_msix() argument
9806 vectors = pci_enable_msix_range(ioa_cfg->pdev, in ipr_enable_msix()
9809 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_enable_msix()
9814 ioa_cfg->vectors_info[i].vec = entries[i].vector; in ipr_enable_msix()
9815 ioa_cfg->nvectors = vectors; in ipr_enable_msix()
9820 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) in ipr_enable_msi() argument
9824 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); in ipr_enable_msi()
9826 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_enable_msi()
9831 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; in ipr_enable_msi()
9832 ioa_cfg->nvectors = vectors; in ipr_enable_msi()
9837 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) in name_msi_vectors() argument
9839 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
9841 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
9842 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
9843 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
9844 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
9845 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
9849 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) in ipr_request_other_msi_irqs() argument
9853 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
9854 rc = request_irq(ioa_cfg->vectors_info[i].vec, in ipr_request_other_msi_irqs()
9857 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
9858 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9861 free_irq(ioa_cfg->vectors_info[i].vec, in ipr_request_other_msi_irqs()
9862 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9881 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; in ipr_test_intr() local
9885 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
9886 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9888 ioa_cfg->msi_received = 1; in ipr_test_intr()
9889 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
9891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9907 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) in ipr_test_msi() argument
9915 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9916 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
9917 ioa_cfg->msi_received = 0; in ipr_test_msi()
9918 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
9919 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
9920 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
9921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9923 if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_test_msi()
9924 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); in ipr_test_msi()
9926 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); in ipr_test_msi()
9933 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
9934 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
9935 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
9936 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9937 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
9939 if (!ioa_cfg->msi_received) { in ipr_test_msi()
9946 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9948 if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_test_msi()
9949 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); in ipr_test_msi()
9951 free_irq(pdev->irq, ioa_cfg); in ipr_test_msi()
9968 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe_ioa() local
9979 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); in ipr_probe_ioa()
9987 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
9988 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); in ipr_probe_ioa()
9989 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
9991 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
9993 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
10000 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
10001 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
10002 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
10003 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
10006 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
10008 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10010 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10012 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
10014 ipr_init_ioa_cfg(ioa_cfg, host, pdev); in ipr_probe_ioa()
10029 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10035 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10049 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
10050 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
10051 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
10053 ipr_init_regs(ioa_cfg); in ipr_probe_ioa()
10055 if (ioa_cfg->sis64) { in ipr_probe_ioa()
10071 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
10075 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10081 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
10082 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10090 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && in ipr_probe_ioa()
10091 ipr_enable_msix(ioa_cfg) == 0) in ipr_probe_ioa()
10092 ioa_cfg->intr_flag = IPR_USE_MSIX; in ipr_probe_ioa()
10093 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && in ipr_probe_ioa()
10094 ipr_enable_msi(ioa_cfg) == 0) in ipr_probe_ioa()
10095 ioa_cfg->intr_flag = IPR_USE_MSI; in ipr_probe_ioa()
10097 ioa_cfg->intr_flag = IPR_USE_LSI; in ipr_probe_ioa()
10098 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10105 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10113 if (ioa_cfg->intr_flag == IPR_USE_MSI || in ipr_probe_ioa()
10114 ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
10115 rc = ipr_test_msi(ioa_cfg, pdev); in ipr_probe_ioa()
10117 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10118 if (ioa_cfg->intr_flag == IPR_USE_MSI) { in ipr_probe_ioa()
10119 ioa_cfg->intr_flag &= ~IPR_USE_MSI; in ipr_probe_ioa()
10121 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
10122 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; in ipr_probe_ioa()
10126 ioa_cfg->intr_flag = IPR_USE_LSI; in ipr_probe_ioa()
10127 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10132 if (ioa_cfg->intr_flag == IPR_USE_MSI) in ipr_probe_ioa()
10135 ioa_cfg->nvectors, pdev->irq); in ipr_probe_ioa()
10136 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_probe_ioa()
10139 ioa_cfg->nvectors); in ipr_probe_ioa()
10143 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10147 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10150 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10153 rc = ipr_alloc_mem(ioa_cfg); in ipr_probe_ioa()
10173 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10174 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10175 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10177 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10179 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10181 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10183 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10184 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_probe_ioa()
10185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10187 if (ioa_cfg->intr_flag == IPR_USE_MSI in ipr_probe_ioa()
10188 || ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
10189 name_msi_vectors(ioa_cfg); in ipr_probe_ioa()
10190 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, in ipr_probe_ioa()
10192 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10193 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10195 rc = ipr_request_other_msi_irqs(ioa_cfg); in ipr_probe_ioa()
10199 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10208 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10209 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10210 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10212 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10215 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10220 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10223 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10231 ipr_free_irqs(ioa_cfg); in ipr_probe_ioa()
10233 ipr_free_mem(ioa_cfg); in ipr_probe_ioa()
10235 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10236 if (ioa_cfg->intr_flag == IPR_USE_MSI) in ipr_probe_ioa()
10238 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_probe_ioa()
10265 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_bringdown() argument
10269 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10270 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10271 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10272 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10273 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); in ipr_initiate_ioa_bringdown()
10289 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in __ipr_remove() local
10294 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10295 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10297 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10298 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10301 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10302 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10303 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10304 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10307 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); in __ipr_remove()
10309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10310 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10311 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10312 if (ioa_cfg->reset_work_q) in __ipr_remove()
10313 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10314 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10315 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10318 list_del(&ioa_cfg->queue); in __ipr_remove()
10321 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10322 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10325 ipr_free_all_resources(ioa_cfg); in __ipr_remove()
10341 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_remove() local
10345 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10347 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10349 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10364 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe() local
10372 ioa_cfg = pci_get_drvdata(pdev); in ipr_probe()
10373 rc = ipr_probe_ioa_part2(ioa_cfg); in ipr_probe()
10380 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10387 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10391 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10396 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10400 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10402 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10407 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10408 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10410 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10411 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10412 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10413 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10414 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_probe()
10418 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10434 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_shutdown() local
10439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10440 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10441 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10442 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10443 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10446 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10448 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10452 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10455 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); in ipr_shutdown()
10456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10457 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10458 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10459 ipr_free_irqs(ioa_cfg); in ipr_shutdown()
10460 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10609 struct ipr_ioa_cfg *ioa_cfg; in ipr_halt() local
10617 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { in ipr_halt()
10618 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10619 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10620 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10625 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_halt()
10632 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()