Lines Matching refs:ioa_cfg
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook() local
604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
605 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
609 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done() local
634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
662 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
729 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_free_ipr_cmnd() argument
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, in ipr_mask_and_clear_interrupts() argument
755 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
756 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
757 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
758 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
763 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
769 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
770 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_save_pcix_cmd_reg() argument
784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) in ipr_set_pcix_cmd_reg() argument
808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
874 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) in ipr_fail_all_ops() argument
880 for_each_hrrq(hrrq, ioa_cfg) { in ipr_fail_all_ops()
919 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command() local
922 if (ioa_cfg->sis64) { in ipr_send_command()
930 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
932 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
1006 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1043 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd() local
1048 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1050 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1053 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_hrrq_index() argument
1057 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1079 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, in ipr_send_hcam() argument
1085 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1086 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_send_hcam()
1088 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1112 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1154 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry() local
1166 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1181 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1189 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1190 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1191 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1198 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1199 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1200 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1203 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1204 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1205 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1207 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1208 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1209 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1240 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1289 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg, in ipr_format_res_path() argument
1295 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1315 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1340 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1372 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target() local
1374 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1378 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1380 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1382 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1385 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1388 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1399 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_config_change() argument
1408 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1416 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1424 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1425 ipr_send_hcam(ioa_cfg, in ipr_handle_config_change()
1431 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1436 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1445 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1448 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1452 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1455 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_handle_config_change()
1470 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn() local
1480 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1483 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_process_ccn()
1485 ipr_handle_config_change(ioa_cfg, hostrcb); in ipr_process_ccn()
1598 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_cache_error() argument
1603 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1634 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_cache_error() argument
1666 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_config_error() argument
1684 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1706 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_config_error() argument
1749 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_config_error() argument
1767 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1796 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_array_error() argument
1810 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1831 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1832 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1847 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_array_error() argument
1861 ioa_cfg->host->host_no, in ipr_log_array_error()
1881 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1882 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1903 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len) in ipr_log_hex_data() argument
1910 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1930 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_enhanced_dual_ioa_error() argument
1935 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1946 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1960 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_dual_ioa_error() argument
1972 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2073 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2081 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2223 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2233 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2247 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_fabric_error() argument
2273 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); in ipr_log_fabric_error()
2284 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_array_error() argument
2299 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2321 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2324 ipr_format_res_path(ioa_cfg, in ipr_log_sis64_array_error()
2340 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_fabric_error() argument
2367 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); in ipr_log_sis64_fabric_error()
2378 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_generic_error() argument
2381 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2393 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, in ipr_log_sis64_device_error() argument
2413 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2415 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2418 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2453 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_log_data() argument
2464 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2466 if (ioa_cfg->sis64) in ipr_handle_log_data()
2471 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2474 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2488 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2495 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2497 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2504 ipr_log_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2507 ipr_log_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2511 ipr_log_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2514 ipr_log_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2517 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2520 ipr_log_enhanced_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2524 ipr_log_enhanced_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2527 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2530 ipr_log_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2533 ipr_log_sis64_device_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2536 ipr_log_sis64_config_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2540 ipr_log_sis64_array_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2543 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2548 ipr_log_generic_error(ioa_cfg, hostrcb); in ipr_handle_log_data()
2566 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error() local
2571 if (ioa_cfg->sis64) in ipr_process_error()
2580 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_process_error()
2582 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_process_error()
2585 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2589 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); in ipr_process_error()
2605 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout() local
2608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2610 ioa_cfg->errors_logged++; in ipr_timeout()
2611 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2614 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2615 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2617 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2618 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_timeout()
2620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout() local
2640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2642 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2643 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2646 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2647 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2649 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2651 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2652 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_oper_timeout()
2655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2704 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) in ipr_get_max_scsi_speed() argument
2711 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2737 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) in ipr_wait_iodbg_ack() argument
2744 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2770 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_sis64_dump_data_section() argument
2777 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2778 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2795 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, in ipr_get_ldump_data_section() argument
2802 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2803 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, in ipr_get_ldump_data_section()
2808 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2811 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2813 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2820 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2823 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2827 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2831 if (ipr_wait_iodbg_ack(ioa_cfg, in ipr_get_ldump_data_section()
2833 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2839 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2846 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2852 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2855 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2859 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2864 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2888 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, in ipr_sdt_copy() argument
2895 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2897 if (ioa_cfg->sis64) in ipr_sdt_copy()
2923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2924 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
2927 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_sdt_copy()
2932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2970 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioa_type_data() argument
2973 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
2981 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
2996 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_version_data() argument
3017 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_trace_data() argument
3026 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3038 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_location_data() argument
3047 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3059 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) in ipr_get_ioa_dump() argument
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3075 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3080 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3083 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3086 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3088 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3089 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3095 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3107 ipr_dump_version_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3108 ipr_dump_location_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3109 ipr_dump_ioa_type_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3110 ipr_dump_trace_data(ioa_cfg, driver_dump); in ipr_get_ioa_dump()
3127 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3137 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, in ipr_get_ioa_dump()
3143 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3147 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3148 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3159 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3174 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3192 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, in ipr_get_ioa_dump()
3205 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3210 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3215 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) argument
3228 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump() local
3233 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3234 ioa_cfg->dump = NULL; in ipr_release_dump()
3235 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3236 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3263 struct ipr_ioa_cfg *ioa_cfg = in ipr_worker_thread() local
3269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3271 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3272 dump = ioa_cfg->dump; in ipr_worker_thread()
3274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3279 ipr_get_ioa_dump(ioa_cfg, dump); in ipr_worker_thread()
3282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3283 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3284 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_worker_thread()
3285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3292 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_worker_thread()
3293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3297 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_worker_thread()
3303 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_worker_thread()
3306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3309 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3316 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_worker_thread()
3322 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3323 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_worker_thread()
3324 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3329 ioa_cfg->scan_done = 1; in ipr_worker_thread()
3330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3331 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_worker_thread()
3354 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace() local
3358 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3359 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3388 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version() local
3389 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3398 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3422 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level() local
3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3427 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3428 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3445 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level() local
3448 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3449 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3480 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics() local
3487 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3488 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3491 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3494 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3495 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_diagnostics()
3497 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3499 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3504 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3509 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3536 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state() local
3540 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3541 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3565 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state() local
3572 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3573 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3575 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3576 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3577 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3578 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3581 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3582 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3583 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_store_adapter_state()
3585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3586 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3616 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter() local
3623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3624 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3625 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_store_reset_adapter()
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3627 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3653 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight() local
3658 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3677 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight() local
3682 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3683 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3690 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3694 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3695 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n"); in ipr_store_iopoll_weight()
3699 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3700 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3701 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3705 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3706 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3707 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3708 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3709 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3710 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3942 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, in ipr_update_ioa_ucode() argument
3947 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3948 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
3949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3950 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3951 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3954 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
3955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3956 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3961 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3967 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3972 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
3973 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); in ipr_update_ioa_ucode()
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3975 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3977 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3978 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
3979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3999 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw() local
4013 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4014 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4025 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4033 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4040 result = ipr_update_ioa_ucode(ioa_cfg, sglist); in ipr_store_update_fw()
4070 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type() local
4074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4075 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4076 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4119 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump() local
4129 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4130 dump = ioa_cfg->dump; in ipr_read_dump()
4132 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4137 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4163 if (ioa_cfg->sis64) in ipr_read_dump()
4209 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_dump() argument
4222 if (ioa_cfg->sis64) in ipr_alloc_dump()
4236 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4238 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4240 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4247 ioa_cfg->dump = dump; in ipr_alloc_dump()
4248 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4249 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4250 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4251 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4265 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_dump() argument
4272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4273 dump = ioa_cfg->dump; in ipr_free_dump()
4275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4279 ioa_cfg->dump = NULL; in ipr_free_dump()
4280 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump() local
4313 rc = ipr_alloc_dump(ioa_cfg); in ipr_write_dump()
4315 rc = ipr_free_dump(ioa_cfg); in ipr_write_dump()
4335 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; in ipr_free_dump() argument
4349 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth() local
4353 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4376 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle() local
4381 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4385 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4410 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path() local
4416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4418 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4423 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4426 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4450 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id() local
4455 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4457 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4486 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type() local
4491 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4521 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode() local
4526 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4532 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4549 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode() local
4554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4567 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4634 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget() local
4637 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc() local
4668 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4673 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4678 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4681 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4693 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4710 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy() local
4712 if (ioa_cfg->sis64) { in ipr_target_destroy()
4715 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4717 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4719 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4739 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev() local
4742 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4762 struct ipr_ioa_cfg *ioa_cfg; in ipr_slave_destroy() local
4765 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4767 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4790 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure() local
4796 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4820 if (ioa_cfg->sis64) in ipr_slave_configure()
4822 ipr_format_res_path(ioa_cfg, in ipr_slave_configure()
4826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4875 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc() local
4882 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4894 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
4928 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, in ipr_wait_for_ops() argument
4942 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4959 for_each_hrrq(hrrq, ioa_cfg) { in ipr_wait_for_ops()
4971 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
4984 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_host_reset() local
4989 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
4990 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4992 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4993 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); in ipr_eh_host_reset()
4994 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
4997 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
4998 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5001 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5002 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5003 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5007 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5031 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_device_reset() argument
5041 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_device_reset()
5045 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5064 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5090 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset() local
5096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5097 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5099 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5105 rc = ipr_device_reset(ioa_cfg, res); in ipr_sata_reset()
5109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5128 struct ipr_ioa_cfg *ioa_cfg; in __ipr_eh_dev_reset() local
5135 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5146 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5148 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5151 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5177 for_each_hrrq(hrrq, ioa_cfg) { in __ipr_eh_dev_reset()
5190 rc = ipr_device_reset(ioa_cfg, res); in __ipr_eh_dev_reset()
5201 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_dev_reset() local
5203 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5210 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done() local
5230 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5231 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5233 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5265 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout() local
5270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5271 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5277 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_abort_timeout()
5287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5303 struct ipr_ioa_cfg *ioa_cfg; in ipr_cancel_op() local
5311 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5318 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5319 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5329 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5334 for_each_hrrq(hrrq, ioa_cfg) { in ipr_cancel_op()
5349 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_cancel_op()
5388 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished() local
5392 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5394 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5411 struct ipr_ioa_cfg *ioa_cfg; in ipr_eh_abort() local
5415 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5422 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5435 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, in ipr_handle_other_interrupt() argument
5441 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5448 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5449 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5450 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5454 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5455 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5456 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5457 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5458 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5468 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5469 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5471 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5472 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5473 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5475 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5477 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5479 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5480 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5485 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5487 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5490 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5493 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5494 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5496 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_handle_other_interrupt()
5497 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_handle_other_interrupt()
5511 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number) in ipr_isr_eh() argument
5513 ioa_cfg->errors_logged++; in ipr_isr_eh()
5514 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5516 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5517 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5519 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_isr_eh()
5528 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq() local
5544 ipr_isr_eh(ioa_cfg, in ipr_process_hrrq()
5550 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5573 struct ipr_ioa_cfg *ioa_cfg; in ipr_iopoll() local
5581 ioa_cfg = hrrq->ioa_cfg; in ipr_iopoll()
5610 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr() local
5630 if (!ioa_cfg->clear_isr) in ipr_isr()
5637 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5638 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5643 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5647 ipr_isr_eh(ioa_cfg, in ipr_isr()
5656 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); in ipr_isr()
5678 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq() local
5692 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5726 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl64() argument
5744 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5778 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, in ipr_build_ioadl() argument
5795 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
5887 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
5981 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, in ipr_dump_ioasa() argument
5997 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6005 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6017 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); in ipr_dump_ioasa()
6020 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6022 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6145 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6167 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, in ipr_erp_start() argument
6183 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_erp_start()
6217 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done() local
6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6286 ipr_erp_start(ioa_cfg, ipr_cmd); in ipr_scsi_done()
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6307 struct ipr_ioa_cfg *ioa_cfg; in ipr_queuecommand() local
6316 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6328 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_queuecommand()
6329 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6388 if (ioa_cfg->sis64) in ipr_queuecommand()
6389 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6391 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); in ipr_queuecommand()
6462 struct ipr_ioa_cfg *ioa_cfg; in ipr_ioa_info() local
6465 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6468 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6513 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset() local
6517 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6518 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6520 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6521 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6524 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6527 rc = ipr_device_reset(ioa_cfg, res); in ipr_ata_phy_reset()
6539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6553 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal() local
6558 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6559 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6560 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6561 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6562 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6565 for_each_hrrq(hrrq, ioa_cfg) { in ipr_ata_post_internal()
6569 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done() local
6623 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6629 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); in ipr_sata_done()
6632 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
6745 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer() local
6750 hrrq_id = ipr_get_hrrq_index(ioa_cfg); in ipr_qc_defer()
6751 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
6788 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue() local
6812 if (ioa_cfg->sis64) { in ipr_qc_issue()
6830 if (ioa_cfg->sis64) in ipr_qc_issue()
6943 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) in ipr_invalid_adapter() argument
6947 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
6956 #define ipr_invalid_adapter(ioa_cfg) 0 argument
6971 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done() local
6975 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6977 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_ioa_bringdown_done()
6978 scsi_unblock_requests(ioa_cfg->host); in ipr_ioa_bringdown_done()
6979 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_ioa_bringdown_done()
6982 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
6983 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
6984 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
6985 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6986 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6987 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6992 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7011 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done() local
7017 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7018 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7019 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7020 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7021 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7024 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7025 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7027 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7033 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7035 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { in ipr_ioa_reset_done()
7038 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); in ipr_ioa_reset_done()
7040 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); in ipr_ioa_reset_done()
7043 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7044 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7046 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7048 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7050 spin_unlock(ioa_cfg->host->host_lock); in ipr_ioa_reset_done()
7051 scsi_unblock_requests(ioa_cfg->host); in ipr_ioa_reset_done()
7052 spin_lock(ioa_cfg->host->host_lock); in ipr_ioa_reset_done()
7054 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ioa_reset_done()
7055 scsi_block_requests(ioa_cfg->host); in ipr_ioa_reset_done()
7057 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7092 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs() local
7093 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7099 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7116 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7124 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7183 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, in ipr_check_term_power() argument
7200 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7220 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) in ipr_scsi_bus_speed_limit() argument
7226 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, in ipr_scsi_bus_speed_limit()
7227 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7229 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7230 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7244 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, in ipr_modify_ioafp_mode_page_28() argument
7262 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7268 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28() local
7320 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7324 ipr_scsi_bus_speed_limit(ioa_cfg); in ipr_ioafp_mode_select_page28()
7325 ipr_check_term_power(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7326 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); in ipr_ioafp_mode_select_page28()
7331 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7335 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7380 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed() local
7383 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7387 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cmd_failed()
7404 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed() local
7409 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7429 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28() local
7433 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24() local
7458 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7473 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7517 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24() local
7521 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table() local
7555 if (ioa_cfg->sis64) in ipr_init_res_table()
7556 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7558 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7561 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7563 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7566 if (ioa_cfg->sis64) in ipr_init_res_table()
7567 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7569 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7572 if (ioa_cfg->sis64) in ipr_init_res_table()
7573 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7575 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7580 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7587 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7588 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7593 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7595 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7609 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7615 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7618 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7639 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg() local
7641 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7642 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7646 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7647 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7654 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7655 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7656 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7658 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7731 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry() local
7732 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
7733 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
7741 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
7762 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry() local
7769 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
7788 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry() local
7794 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
7796 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
7798 if (ipr_invalid_adapter(ioa_cfg)) { in ipr_ioafp_page0_inquiry()
7799 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
7803 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
7804 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_ioafp_page0_inquiry()
7806 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
7814 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
7832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry() local
7838 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
7857 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq() local
7863 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
7865 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
7866 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7872 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
7875 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
7895 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7897 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
7910 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7915 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
7941 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done() local
7944 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7946 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
7951 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7989 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_ioa_mem() argument
7993 for_each_hrrq(hrrq, ioa_cfg) { in ipr_init_ioa_mem()
8006 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8007 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8008 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8010 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8013 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8028 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage() local
8031 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8046 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8047 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8048 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8051 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8056 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8057 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8085 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa() local
8092 ipr_init_ioa_mem(ioa_cfg); in ipr_reset_enable_ioa()
8094 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8095 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8096 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8097 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8100 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8102 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8103 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8106 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8110 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8111 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8116 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8118 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8121 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8123 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8125 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8127 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8129 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8135 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump() local
8159 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8160 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8161 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8162 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8164 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8180 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) in ipr_unit_check_no_data() argument
8182 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8183 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8196 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) in ipr_get_unit_check_buffer() argument
8204 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8206 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8207 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8212 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, in ipr_get_unit_check_buffer()
8218 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8230 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8235 rc = ipr_get_ldump_data_section(ioa_cfg, in ipr_get_unit_check_buffer()
8241 ipr_handle_log_data(ioa_cfg, hostrcb); in ipr_get_unit_check_buffer()
8244 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8245 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8247 ipr_unit_check_no_data(ioa_cfg); in ipr_get_unit_check_buffer()
8249 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job() local
8266 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8267 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_get_unit_check_job()
8288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space() local
8292 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8293 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8295 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { in ipr_reset_restore_cfg_space()
8300 ipr_fail_all_ops(ioa_cfg); in ipr_reset_restore_cfg_space()
8302 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8304 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8305 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8308 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8309 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8314 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8315 ipr_get_unit_check_buffer(ioa_cfg); in ipr_reset_restore_cfg_space()
8322 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8327 if (GET_DUMP == ioa_cfg->sdt_state) { in ipr_reset_restore_cfg_space()
8328 ioa_cfg->sdt_state = READ_DUMP; in ipr_reset_restore_cfg_space()
8329 ioa_cfg->dump_timeout = 0; in ipr_reset_restore_cfg_space()
8330 if (ioa_cfg->sis64) in ipr_reset_restore_cfg_space()
8335 schedule_work(&ioa_cfg->work_q); in ipr_reset_restore_cfg_space()
8355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done() local
8358 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8359 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8360 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8377 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist() local
8381 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8383 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8385 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8392 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8393 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8394 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work() local
8432 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8440 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8441 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8458 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset() local
8462 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait() local
8482 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8483 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8484 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8492 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8493 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8512 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8525 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) in ipr_reset_allowed() argument
8529 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist() local
8553 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert() local
8583 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8586 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); in ipr_reset_alert()
8587 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done() local
8615 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_reset_quiesce_done()
8632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done() local
8641 for_each_hrrq(hrrq, ioa_cfg) { in ipr_reset_cancel_hcam_done()
8645 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_reset_cancel_hcam_done()
8671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam() local
8675 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
8681 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
8727 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done() local
8728 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
8730 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
8749 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download() local
8750 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
8766 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
8792 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa() local
8801 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8811 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
8839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job() local
8844 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
8880 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in _ipr_initiate_ioa_reset() argument
8887 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
8888 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
8889 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8890 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
8891 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8894 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) in _ipr_initiate_ioa_reset()
8895 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
8897 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in _ipr_initiate_ioa_reset()
8898 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
8917 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_reset() argument
8922 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
8925 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
8926 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
8927 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
8928 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
8929 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
8932 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
8933 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
8936 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
8937 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
8938 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8939 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
8940 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8944 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
8945 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
8946 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
8947 ipr_fail_all_ops(ioa_cfg); in ipr_initiate_ioa_reset()
8948 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
8950 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
8951 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_initiate_ioa_reset()
8952 scsi_unblock_requests(ioa_cfg->host); in ipr_initiate_ioa_reset()
8953 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_initiate_ioa_reset()
8957 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
8962 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, in ipr_initiate_ioa_reset()
8976 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze() local
8980 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
8981 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8982 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
8983 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9001 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_mmio_enabled() local
9003 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9004 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9021 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_frozen() local
9023 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9024 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9025 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); in ipr_pci_frozen()
9026 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9040 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_slot_reset() local
9042 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9043 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9044 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9045 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_slot_reset()
9047 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, in ipr_pci_slot_reset()
9050 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9065 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_pci_perm_failure() local
9068 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9069 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9070 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9071 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9072 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9073 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9075 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9076 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9077 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9080 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_pci_perm_failure()
9082 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9124 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) in ipr_probe_ioa_part2() argument
9130 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9131 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9132 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9133 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9134 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9135 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); in ipr_probe_ioa_part2()
9137 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, in ipr_probe_ioa_part2()
9139 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9152 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_cmd_blks() argument
9156 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9158 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9159 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9160 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9161 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9163 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9167 if (ioa_cfg->ipr_cmd_pool) in ipr_free_cmd_blks()
9168 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9170 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9171 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9172 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9173 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9174 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9184 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_mem() argument
9188 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9189 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9190 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9191 ipr_free_cmd_blks(ioa_cfg); in ipr_free_mem()
9193 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9194 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9195 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9196 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9197 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9199 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9200 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9203 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9205 ioa_cfg->hostrcb[i], in ipr_free_mem()
9206 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9209 ipr_free_dump(ioa_cfg); in ipr_free_mem()
9210 kfree(ioa_cfg->trace); in ipr_free_mem()
9223 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_irqs() argument
9225 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9227 if (ioa_cfg->intr_flag == IPR_USE_MSI || in ipr_free_irqs()
9228 ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_free_irqs()
9230 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9231 free_irq(ioa_cfg->vectors_info[i].vec, in ipr_free_irqs()
9232 &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9234 free_irq(pdev->irq, &ioa_cfg->hrrq[0]); in ipr_free_irqs()
9236 if (ioa_cfg->intr_flag == IPR_USE_MSI) { in ipr_free_irqs()
9238 ioa_cfg->intr_flag &= ~IPR_USE_MSI; in ipr_free_irqs()
9239 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_free_irqs()
9241 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; in ipr_free_irqs()
9255 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) in ipr_free_all_resources() argument
9257 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9260 ipr_free_irqs(ioa_cfg); in ipr_free_all_resources()
9261 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9262 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9263 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9265 ipr_free_mem(ioa_cfg); in ipr_free_all_resources()
9266 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9278 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_cmd_blks() argument
9285 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9288 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9291 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9292 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9294 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9295 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9299 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9300 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9303 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9304 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9309 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9310 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9313 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9319 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9320 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9322 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9325 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9328 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9330 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9331 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9335 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); in ipr_alloc_cmd_blks()
9338 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_cmd_blks()
9343 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9344 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9348 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9354 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9368 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9373 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9375 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9389 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) in ipr_alloc_mem() argument
9391 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9395 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * in ipr_alloc_mem()
9396 ioa_cfg->max_devs_supported, GFP_KERNEL); in ipr_alloc_mem()
9398 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9401 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9402 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9403 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9406 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9408 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9411 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9414 if (ipr_alloc_cmd_blks(ioa_cfg)) in ipr_alloc_mem()
9417 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9418 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9419 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9420 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9423 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9426 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9427 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9428 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9431 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9434 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9435 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9436 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9439 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9443 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9445 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9448 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9451 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9452 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9453 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9454 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9457 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) * in ipr_alloc_mem()
9460 if (!ioa_cfg->trace) in ipr_alloc_mem()
9471 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9472 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9474 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9475 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9477 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9479 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9480 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9481 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9484 ipr_free_cmd_blks(ioa_cfg); in ipr_alloc_mem()
9487 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9489 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9500 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) in ipr_initialize_bus_attr() argument
9505 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9506 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9507 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9509 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9511 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9522 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) in ipr_init_regs() argument
9528 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9529 t = &ioa_cfg->regs; in ipr_init_regs()
9530 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9549 if (ioa_cfg->sis64) { in ipr_init_regs()
9566 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, in ipr_init_ioa_cfg() argument
9571 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9572 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9573 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9574 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9575 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9576 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9577 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9578 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9579 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9580 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9582 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9583 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9584 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9585 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9586 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9587 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9588 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9589 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9590 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9592 ipr_initialize_bus_attr(ioa_cfg); in ipr_init_ioa_cfg()
9593 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9595 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9599 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9600 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9602 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9607 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9608 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9610 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9616 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9617 pci_set_drvdata(pdev, ioa_cfg); in ipr_init_ioa_cfg()
9619 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9620 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9621 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9622 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9624 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9626 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9657 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) in ipr_wait_for_pci_err_recovery() argument
9659 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
9662 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
9669 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) in ipr_enable_msix() argument
9677 vectors = pci_enable_msix_range(ioa_cfg->pdev, in ipr_enable_msix()
9680 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_enable_msix()
9685 ioa_cfg->vectors_info[i].vec = entries[i].vector; in ipr_enable_msix()
9686 ioa_cfg->nvectors = vectors; in ipr_enable_msix()
9691 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) in ipr_enable_msi() argument
9695 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); in ipr_enable_msi()
9697 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_enable_msi()
9702 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; in ipr_enable_msi()
9703 ioa_cfg->nvectors = vectors; in ipr_enable_msi()
9708 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) in name_msi_vectors() argument
9710 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
9712 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
9713 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
9714 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
9715 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
9716 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
9720 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg) in ipr_request_other_msi_irqs() argument
9724 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
9725 rc = request_irq(ioa_cfg->vectors_info[i].vec, in ipr_request_other_msi_irqs()
9728 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
9729 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9732 free_irq(ioa_cfg->vectors_info[i].vec, in ipr_request_other_msi_irqs()
9733 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; in ipr_test_intr() local
9756 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
9757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9759 ioa_cfg->msi_received = 1; in ipr_test_intr()
9760 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
9762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9778 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) in ipr_test_msi() argument
9786 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9787 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
9788 ioa_cfg->msi_received = 0; in ipr_test_msi()
9789 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
9790 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
9791 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
9792 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9794 if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_test_msi()
9795 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); in ipr_test_msi()
9797 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); in ipr_test_msi()
9804 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
9805 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
9806 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
9807 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9808 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_test_msi()
9810 if (!ioa_cfg->msi_received) { in ipr_test_msi()
9817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9819 if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_test_msi()
9820 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); in ipr_test_msi()
9822 free_irq(pdev->irq, ioa_cfg); in ipr_test_msi()
9839 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe_ioa() local
9850 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); in ipr_probe_ioa()
9858 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
9859 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); in ipr_probe_ioa()
9860 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
9862 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
9864 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
9871 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
9872 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
9873 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
9874 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
9877 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
9879 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9881 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9883 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
9885 ipr_init_ioa_cfg(ioa_cfg, host, pdev); in ipr_probe_ioa()
9900 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9906 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9920 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
9921 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
9922 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
9924 ipr_init_regs(ioa_cfg); in ipr_probe_ioa()
9926 if (ioa_cfg->sis64) { in ipr_probe_ioa()
9942 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
9946 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9952 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
9953 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9961 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && in ipr_probe_ioa()
9962 ipr_enable_msix(ioa_cfg) == 0) in ipr_probe_ioa()
9963 ioa_cfg->intr_flag = IPR_USE_MSIX; in ipr_probe_ioa()
9964 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && in ipr_probe_ioa()
9965 ipr_enable_msi(ioa_cfg) == 0) in ipr_probe_ioa()
9966 ioa_cfg->intr_flag = IPR_USE_MSI; in ipr_probe_ioa()
9968 ioa_cfg->intr_flag = IPR_USE_LSI; in ipr_probe_ioa()
9969 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
9976 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9984 if (ioa_cfg->intr_flag == IPR_USE_MSI || in ipr_probe_ioa()
9985 ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
9986 rc = ipr_test_msi(ioa_cfg, pdev); in ipr_probe_ioa()
9988 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
9989 if (ioa_cfg->intr_flag == IPR_USE_MSI) { in ipr_probe_ioa()
9990 ioa_cfg->intr_flag &= ~IPR_USE_MSI; in ipr_probe_ioa()
9992 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
9993 ioa_cfg->intr_flag &= ~IPR_USE_MSIX; in ipr_probe_ioa()
9997 ioa_cfg->intr_flag = IPR_USE_LSI; in ipr_probe_ioa()
9998 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10003 if (ioa_cfg->intr_flag == IPR_USE_MSI) in ipr_probe_ioa()
10006 ioa_cfg->nvectors, pdev->irq); in ipr_probe_ioa()
10007 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_probe_ioa()
10010 ioa_cfg->nvectors); in ipr_probe_ioa()
10014 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10018 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10021 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) in ipr_probe_ioa()
10024 rc = ipr_alloc_mem(ioa_cfg); in ipr_probe_ioa()
10044 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10045 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10046 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10048 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10050 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10052 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10055 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); in ipr_probe_ioa()
10056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10058 if (ioa_cfg->intr_flag == IPR_USE_MSI in ipr_probe_ioa()
10059 || ioa_cfg->intr_flag == IPR_USE_MSIX) { in ipr_probe_ioa()
10060 name_msi_vectors(ioa_cfg); in ipr_probe_ioa()
10061 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr, in ipr_probe_ioa()
10063 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10064 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10066 rc = ipr_request_other_msi_irqs(ioa_cfg); in ipr_probe_ioa()
10070 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10079 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10080 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10081 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10083 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10086 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10091 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10094 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10102 ipr_free_irqs(ioa_cfg); in ipr_probe_ioa()
10104 ipr_free_mem(ioa_cfg); in ipr_probe_ioa()
10106 ipr_wait_for_pci_err_recovery(ioa_cfg); in ipr_probe_ioa()
10107 if (ioa_cfg->intr_flag == IPR_USE_MSI) in ipr_probe_ioa()
10109 else if (ioa_cfg->intr_flag == IPR_USE_MSIX) in ipr_probe_ioa()
10136 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, in ipr_initiate_ioa_bringdown() argument
10140 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10141 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10142 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10143 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10144 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); in ipr_initiate_ioa_bringdown()
10160 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in __ipr_remove() local
10165 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10166 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10168 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10169 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10172 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10173 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10174 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10175 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10178 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); in __ipr_remove()
10180 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10181 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10182 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10183 if (ioa_cfg->reset_work_q) in __ipr_remove()
10184 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10185 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10186 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10189 list_del(&ioa_cfg->queue); in __ipr_remove()
10192 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10193 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10196 ipr_free_all_resources(ioa_cfg); in __ipr_remove()
10212 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_remove() local
10216 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10218 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10220 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10235 struct ipr_ioa_cfg *ioa_cfg; in ipr_probe() local
10243 ioa_cfg = pci_get_drvdata(pdev); in ipr_probe()
10244 rc = ipr_probe_ioa_part2(ioa_cfg); in ipr_probe()
10251 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10258 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10262 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10267 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10271 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10273 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10278 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10279 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10281 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10282 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10283 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10284 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10285 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll); in ipr_probe()
10289 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10305 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); in ipr_shutdown() local
10310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10311 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10312 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10313 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10314 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10317 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10319 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10320 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10323 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10326 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type); in ipr_shutdown()
10327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10328 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10329 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10330 ipr_free_irqs(ioa_cfg); in ipr_shutdown()
10331 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10480 struct ipr_ioa_cfg *ioa_cfg; in ipr_halt() local
10488 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { in ipr_halt()
10489 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10490 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10491 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10496 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); in ipr_halt()
10503 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()