• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/

Lines Matching defs:ioa_cfg

543 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
545 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
549 if (ipr_cmd->ioa_cfg->sis64)
581 if (ipr_cmd->ioa_cfg->sis64) {
617 * @ioa_cfg: ioa config struct
623 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
627 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
636 * @ioa_cfg: ioa config struct
645 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
651 ioa_cfg->allow_interrupts = 0;
654 if (ioa_cfg->sis64)
655 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
657 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
660 if (ioa_cfg->sis64)
661 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
662 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
663 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
668 * @ioa_cfg: ioa config struct
673 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
675 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
680 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
681 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
682 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
686 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
692 * @ioa_cfg: ioa config struct
697 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
699 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
702 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
703 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
704 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
730 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
746 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
753 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
758 * @ioa_cfg: ioa config struct
765 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
770 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
805 if (ioa_cfg->sis64) {
813 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
837 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
893 if (ipr_cmd->ioa_cfg->sis64) {
930 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
935 spin_unlock_irq(ioa_cfg->host->host_lock);
937 spin_lock_irq(ioa_cfg->host->host_lock);
942 * @ioa_cfg: ioa config struct
953 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
959 if (ioa_cfg->allow_cmds) {
960 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
961 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
962 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
988 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1030 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1041 if (ioa_cfg->sis64) {
1054 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1062 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1063 ioa_cfg->max_devs_supported);
1064 set_bit(res->target, ioa_cfg->target_ids);
1074 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1075 ioa_cfg->max_devs_supported);
1076 set_bit(res->target, ioa_cfg->array_ids);
1079 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1080 ioa_cfg->max_devs_supported);
1081 set_bit(res->target, ioa_cfg->vset_ids);
1083 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1084 ioa_cfg->max_devs_supported);
1085 set_bit(res->target, ioa_cfg->target_ids);
1115 if (res->ioa_cfg->sis64) {
1168 if (res->ioa_cfg->sis64) {
1225 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1227 if (!ioa_cfg->sis64)
1231 clear_bit(res->target, ioa_cfg->array_ids);
1233 clear_bit(res->target, ioa_cfg->vset_ids);
1235 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1238 clear_bit(res->target, ioa_cfg->target_ids);
1241 clear_bit(res->target, ioa_cfg->target_ids);
1246 * @ioa_cfg: ioa config struct
1252 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1261 if (ioa_cfg->sis64) {
1269 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1277 if (list_empty(&ioa_cfg->free_res_q)) {
1278 ipr_send_hcam(ioa_cfg,
1284 res = list_entry(ioa_cfg->free_res_q.next,
1289 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1298 if (ioa_cfg->allow_ml_add_del)
1299 schedule_work(&ioa_cfg->work_q);
1302 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1306 if (ioa_cfg->allow_ml_add_del)
1307 schedule_work(&ioa_cfg->work_q);
1310 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1325 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1330 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1334 dev_err(&ioa_cfg->pdev->dev,
1337 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1339 ipr_handle_config_change(ioa_cfg, hostrcb);
1446 * @ioa_cfg: ioa config struct
1452 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1457 if (ioa_cfg->sis64)
1482 * @ioa_cfg: ioa config struct
1488 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1514 * @ioa_cfg: ioa config struct
1520 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1538 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1554 * @ioa_cfg: ioa config struct
1560 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1597 * @ioa_cfg: ioa config struct
1603 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1621 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1644 * @ioa_cfg: ioa config struct
1650 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1664 ioa_cfg->host->host_no,
1685 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1695 * @ioa_cfg: ioa config struct
1701 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1715 ioa_cfg->host->host_no,
1735 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1750 * @ioa_cfg: ioa config struct
1757 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1764 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1778 * @ioa_cfg: ioa config struct
1784 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1789 if (ioa_cfg->sis64)
1800 ipr_log_hex_data(ioa_cfg, error->data,
1808 * @ioa_cfg: ioa config struct
1814 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1826 ipr_log_hex_data(ioa_cfg, error->data,
2091 * @ioa_cfg: ioa config struct
2097 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2123 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2128 * @ioa_cfg: ioa config struct
2134 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2182 * @ioa_cfg: ioa config struct
2188 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2215 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2220 * @ioa_cfg: ioa config struct
2226 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2229 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2257 * @ioa_cfg: ioa config struct
2265 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2275 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2277 if (ioa_cfg->sis64)
2282 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2285 scsi_report_bus_reset(ioa_cfg->host,
2297 ioa_cfg->errors_logged++;
2299 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2306 ipr_log_cache_error(ioa_cfg, hostrcb);
2309 ipr_log_config_error(ioa_cfg, hostrcb);
2313 ipr_log_array_error(ioa_cfg, hostrcb);
2316 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2319 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2322 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2326 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2329 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2332 ipr_log_fabric_error(ioa_cfg, hostrcb);
2335 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2339 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2342 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2347 ipr_log_generic_error(ioa_cfg, hostrcb);
2365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2370 if (ioa_cfg->sis64)
2376 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2379 ipr_handle_log_data(ioa_cfg, hostrcb);
2381 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2383 dev_err(&ioa_cfg->pdev->dev,
2387 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2403 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2408 ioa_cfg->errors_logged++;
2409 dev_err(&ioa_cfg->pdev->dev,
2412 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413 ioa_cfg->sdt_state = GET_DUMP;
2415 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2435 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2440 ioa_cfg->errors_logged++;
2441 dev_err(&ioa_cfg->pdev->dev,
2444 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445 ioa_cfg->sdt_state = GET_DUMP;
2447 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2449 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2459 * @ioa_cfg: ioa config struct
2467 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2470 if (!ioa_cfg->in_reset_reload)
2471 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2473 spin_unlock_irq(ioa_cfg->host->host_lock);
2474 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475 spin_lock_irq(ioa_cfg->host->host_lock);
2479 if (ioa_cfg->ioa_is_dead) {
2522 * @ioa_cfg: ioa config struct
2532 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2539 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2557 * @ioa_cfg: ioa config struct
2565 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2572 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2590 * @ioa_cfg: ioa config struct
2598 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2605 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2615 * @ioa_cfg: ioa config struct
2623 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2630 if (ioa_cfg->sis64)
2631 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2636 ioa_cfg->regs.set_uproc_interrupt_reg32);
2639 if (ipr_wait_iodbg_ack(ioa_cfg,
2641 dev_err(&ioa_cfg->pdev->dev,
2648 ioa_cfg->regs.clr_interrupt_reg);
2651 writel(start_addr, ioa_cfg->ioa_mailbox);
2655 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2659 if (ipr_wait_iodbg_ack(ioa_cfg,
2661 dev_err(&ioa_cfg->pdev->dev,
2667 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2674 ioa_cfg->regs.clr_interrupt_reg);
2680 ioa_cfg->regs.set_uproc_interrupt_reg32);
2683 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2687 ioa_cfg->regs.clr_interrupt_reg);
2692 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2707 * @ioa_cfg: ioa config struct
2716 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2723 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2750 rc = ipr_get_ldump_data_section(ioa_cfg,
2755 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2787 * @ioa_cfg: ioa config struct
2793 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2796 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2804 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2813 * @ioa_cfg: ioa config struct
2819 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2834 * @ioa_cfg: ioa config struct
2840 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2849 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2855 * @ioa_cfg: ioa config struct
2861 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2870 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2876 * @ioa_cfg: ioa config struct
2882 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2896 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2898 if (ioa_cfg->sdt_state != GET_DUMP) {
2899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2903 start_addr = readl(ioa_cfg->ioa_mailbox);
2905 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2906 dev_err(&ioa_cfg->pdev->dev,
2908 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2912 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2924 ipr_dump_version_data(ioa_cfg, driver_dump);
2925 ipr_dump_location_data(ioa_cfg, driver_dump);
2926 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2927 ipr_dump_trace_data(ioa_cfg, driver_dump);
2944 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2950 dev_err(&ioa_cfg->pdev->dev,
2954 ioa_cfg->sdt_state = DUMP_OBTAINED;
2955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2974 if (ioa_cfg->sis64)
2992 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3005 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3010 ioa_cfg->sdt_state = DUMP_OBTAINED;
3015 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3028 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3033 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3034 ioa_cfg->dump = NULL;
3035 ioa_cfg->sdt_state = INACTIVE;
3036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3062 struct ipr_ioa_cfg *ioa_cfg =
3068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3070 if (ioa_cfg->sdt_state == GET_DUMP) {
3071 dump = ioa_cfg->dump;
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3078 ipr_get_ioa_dump(ioa_cfg, dump);
3081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3082 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3083 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3092 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3096 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3101 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3118 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3119 scsi_add_device(ioa_cfg->host, bus, target, lun);
3120 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3126 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3149 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3153 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3154 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3183 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3184 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3193 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3221 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3222 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3240 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3243 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3244 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3275 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3283 while(ioa_cfg->in_reset_reload) {
3284 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3285 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3286 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3289 ioa_cfg->errors_logged = 0;
3290 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3292 if (ioa_cfg->in_reset_reload) {
3293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3331 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336 if (ioa_cfg->ioa_is_dead)
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3360 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3368 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3369 ioa_cfg->ioa_is_dead = 0;
3370 ioa_cfg->reset_retries = 0;
3371 ioa_cfg->in_ioa_bringdown = 0;
3372 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3405 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3412 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3413 if (!ioa_cfg->in_reset_reload)
3414 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3415 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3416 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3636 * @ioa_cfg: ioa config struct
3644 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3649 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3650 while(ioa_cfg->in_reset_reload) {
3651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3652 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3656 if (ioa_cfg->ucode_sglist) {
3657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3658 dev_err(&ioa_cfg->pdev->dev,
3663 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668 dev_err(&ioa_cfg->pdev->dev,
3673 ioa_cfg->ucode_sglist = sglist;
3674 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3675 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3676 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3678 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3679 ioa_cfg->ucode_sglist = NULL;
3680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3700 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3714 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3715 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3722 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3723 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3724 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3734 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3742 dev_err(&ioa_cfg->pdev->dev,
3747 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3777 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3782 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3825 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3835 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3836 dump = ioa_cfg->dump;
3838 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3902 * @ioa_cfg: ioa config struct
3907 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3920 dump->ioa_cfg = ioa_cfg;
3922 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3924 if (INACTIVE != ioa_cfg->sdt_state) {
3925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3930 ioa_cfg->dump = dump;
3931 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3932 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3933 ioa_cfg->dump_taken = 1;
3934 schedule_work(&ioa_cfg->work_q);
3936 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3943 * @ioa_cfg: ioa config struct
3948 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3955 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3956 dump = ioa_cfg->dump;
3958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962 ioa_cfg->dump = NULL;
3963 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3996 rc = ipr_alloc_dump(ioa_cfg);
3998 rc = ipr_free_dump(ioa_cfg);
4018 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4033 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4040 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4045 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4085 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4100 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4105 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4133 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4141 if (res && ioa_cfg->sis64)
4146 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4172 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4177 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4247 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4250 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4276 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4292 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4294 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4295 sata_port->ioa_cfg = ioa_cfg;
4307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4324 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4326 if (ioa_cfg->sis64) {
4328 clear_bit(starget->id, ioa_cfg->array_ids);
4330 clear_bit(starget->id, ioa_cfg->vset_ids);
4332 clear_bit(starget->id, ioa_cfg->target_ids);
4351 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4354 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4374 struct ipr_ioa_cfg *ioa_cfg;
4377 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4402 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4408 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4431 if (ioa_cfg->sis64)
4437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4482 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4489 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4520 struct ipr_ioa_cfg *ioa_cfg;
4524 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4526 dev_err(&ioa_cfg->pdev->dev,
4529 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4530 ioa_cfg->sdt_state = GET_DUMP;
4532 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4551 * @ioa_cfg: ioa config struct
4563 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4573 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4577 if (ipr_cmd->ioa_cfg->sis64) {
4594 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4596 if (ipr_cmd->ioa_cfg->sis64)
4622 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4628 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4629 while(ioa_cfg->in_reset_reload) {
4630 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4631 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4632 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4637 rc = ipr_device_reset(ioa_cfg, res);
4641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4660 struct ipr_ioa_cfg *ioa_cfg;
4666 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4677 if (ioa_cfg->in_reset_reload)
4679 if (ioa_cfg->ioa_is_dead)
4682 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4704 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4711 rc = ipr_device_reset(ioa_cfg, res);
4740 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4744 if (!ioa_cfg->sis64)
4745 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4747 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4761 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4779 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4784 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4785 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4791 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4801 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4817 struct ipr_ioa_cfg *ioa_cfg;
4824 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4831 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4836 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4847 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4868 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4900 * @ioa_cfg: ioa config struct
4905 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4910 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4911 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4917 if (ioa_cfg->sis64) {
4918 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4919 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4923 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4924 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4925 list_del(&ioa_cfg->reset_cmd->queue);
4926 del_timer(&ioa_cfg->reset_cmd->timer);
4927 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4937 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4940 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4941 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4943 list_del(&ioa_cfg->reset_cmd->queue);
4944 del_timer(&ioa_cfg->reset_cmd->timer);
4945 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4948 ioa_cfg->ioa_unit_checked = 1;
4950 dev_err(&ioa_cfg->pdev->dev,
4953 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4954 ioa_cfg->sdt_state = GET_DUMP;
4956 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4957 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4965 * @ioa_cfg: ioa config struct
4971 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4973 ioa_cfg->errors_logged++;
4974 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4976 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4977 ioa_cfg->sdt_state = GET_DUMP;
4979 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4992 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5004 if (!ioa_cfg->allow_interrupts) {
5005 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5012 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5013 ioa_cfg->toggle_bit) {
5015 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5019 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5020 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5024 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5036 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5037 ioa_cfg->hrrq_curr++;
5039 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5040 ioa_cfg->toggle_bit ^= 1u;
5047 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5053 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5063 rc = ipr_handle_other_interrupt(ioa_cfg);
5065 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5071 * @ioa_cfg: ioa config struct
5077 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5094 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5122 * @ioa_cfg: ioa config struct
5128 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5145 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5224 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5242 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5267 if (ipr_cmd->ioa_cfg->sis64)
5350 * @ioa_cfg: ioa config struct
5361 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5377 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5385 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5397 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5400 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5402 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5525 if (ipr_cmd->ioa_cfg->sis64)
5538 * @ioa_cfg: ioa config struct
5547 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5563 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5597 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5626 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5642 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5650 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5653 ipr_erp_start(ioa_cfg, ipr_cmd);
5671 struct ipr_ioa_cfg *ioa_cfg;
5678 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5682 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5685 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5695 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5697 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5725 if (ioa_cfg->sis64)
5726 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5728 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5735 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5775 struct ipr_ioa_cfg *ioa_cfg;
5778 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5781 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5825 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5829 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5830 while(ioa_cfg->in_reset_reload) {
5831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5832 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5833 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5836 if (!ioa_cfg->allow_cmds)
5839 rc = ipr_device_reset(ioa_cfg, res);
5851 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5865 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5869 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5870 while(ioa_cfg->in_reset_reload) {
5871 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5872 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5873 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5876 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5878 ipr_device_reset(ioa_cfg, sata_port->res);
5882 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5923 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5929 if (ipr_cmd->ioa_cfg->sis64)
5935 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5938 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5944 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6051 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6056 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6059 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6062 if (ioa_cfg->sis64) {
6071 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6080 if (ioa_cfg->sis64)
6184 * @ioa_cfg: ioa cfg struct
6193 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6197 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6206 #define ipr_invalid_adapter(ioa_cfg) 0
6221 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6224 ioa_cfg->in_reset_reload = 0;
6225 ioa_cfg->reset_retries = 0;
6226 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6227 wake_up_all(&ioa_cfg->reset_wait_q);
6229 spin_unlock_irq(ioa_cfg->host->host_lock);
6230 scsi_unblock_requests(ioa_cfg->host);
6231 spin_lock_irq(ioa_cfg->host->host_lock);
6250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6256 ioa_cfg->in_reset_reload = 0;
6257 ioa_cfg->allow_cmds = 1;
6258 ioa_cfg->reset_cmd = NULL;
6259 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6261 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6262 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6267 schedule_work(&ioa_cfg->work_q);
6269 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6272 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6274 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6277 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6278 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6280 ioa_cfg->reset_retries = 0;
6281 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6282 wake_up_all(&ioa_cfg->reset_wait_q);
6284 spin_unlock(ioa_cfg->host->host_lock);
6285 scsi_unblock_requests(ioa_cfg->host);
6286 spin_lock(ioa_cfg->host->host_lock);
6288 if (!ioa_cfg->allow_cmds)
6289 scsi_block_requests(ioa_cfg->host);
6325 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6326 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6332 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6349 ioa_cfg->vpd_cbs_dma +
6357 if (!ioa_cfg->sis64)
6406 * @ioa_cfg: ioa config struct
6414 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6431 dev_err(&ioa_cfg->pdev->dev,
6442 * @ioa_cfg: ioa config struct
6451 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6457 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6458 ioa_cfg->bus_attr[i].bus_width);
6460 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6461 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6467 * @ioa_cfg: ioa config struct
6475 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6493 dev_err(&ioa_cfg->pdev->dev,
6499 bus_attr = &ioa_cfg->bus_attr[i];
6550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6551 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6555 ipr_scsi_bus_speed_limit(ioa_cfg);
6556 ipr_check_term_power(ioa_cfg, mode_pages);
6557 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6562 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6566 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6611 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6614 dev_err(&ioa_cfg->pdev->dev,
6618 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6619 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6640 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6664 0x28, ioa_cfg->vpd_cbs_dma +
6688 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6689 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6704 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6752 0x24, ioa_cfg->vpd_cbs_dma +
6779 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6786 if (ioa_cfg->sis64)
6787 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6789 flag = ioa_cfg->u.cfg_table->hdr.flags;
6792 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6794 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6797 if (ioa_cfg->sis64)
6798 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6800 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6803 if (ioa_cfg->sis64)
6804 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6806 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6811 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6818 if (list_empty(&ioa_cfg->free_res_q)) {
6819 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6824 res = list_entry(ioa_cfg->free_res_q.next,
6826 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6840 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6846 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6849 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6870 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6872 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6873 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6877 ioa_cfg->dual_raid = 1;
6878 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6885 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6886 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6887 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6889 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6963 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6964 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6972 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6993 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7000 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7025 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7027 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7032 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7050 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7056 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7075 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7079 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7085 if (ioa_cfg->sis64)
7088 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7090 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7092 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7094 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7100 if (ioa_cfg->sis64) {
7102 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7104 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7106 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7108 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7137 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7139 if (ioa_cfg->reset_cmd == ipr_cmd) {
7144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7164 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7174 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7175 * @ioa_cfg: ioa cfg struct
7180 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7182 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7185 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7186 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7187 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7188 ioa_cfg->toggle_bit = 1;
7191 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7206 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7209 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7224 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7225 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7226 stage_time = ioa_cfg->transop_timeout;
7229 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7234 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7235 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7245 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7268 ipr_init_ioa_mem(ioa_cfg);
7270 ioa_cfg->allow_interrupts = 1;
7271 if (ioa_cfg->sis64) {
7273 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7274 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7277 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7281 ioa_cfg->regs.clr_interrupt_mask_reg32);
7282 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7287 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7289 if (ioa_cfg->sis64) {
7292 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7294 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7296 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7298 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7300 if (ioa_cfg->sis64) {
7306 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7310 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7328 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7330 if (ioa_cfg->sdt_state == GET_DUMP)
7331 ioa_cfg->sdt_state = ABORT_DUMP;
7340 * @ioa_cfg: ioa config struct
7348 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7350 ioa_cfg->errors_logged++;
7351 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7356 * @ioa_cfg: ioa config struct
7364 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7372 mailbox = readl(ioa_cfg->ioa_mailbox);
7374 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7375 ipr_unit_check_no_data(ioa_cfg);
7380 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7386 ipr_unit_check_no_data(ioa_cfg);
7398 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7403 rc = ipr_get_ldump_data_section(ioa_cfg,
7409 ipr_handle_log_data(ioa_cfg, hostrcb);
7412 ioa_cfg->sdt_state == GET_DUMP)
7413 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7415 ipr_unit_check_no_data(ioa_cfg);
7417 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7433 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7438 ioa_cfg->pdev->state_saved = true;
7439 rc = pci_restore_state(ioa_cfg->pdev);
7446 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7451 ipr_fail_all_ops(ioa_cfg);
7453 if (ioa_cfg->sis64) {
7455 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7456 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7459 if (ioa_cfg->ioa_unit_checked) {
7460 ioa_cfg->ioa_unit_checked = 0;
7461 ipr_get_unit_check_buffer(ioa_cfg);
7467 if (ioa_cfg->in_ioa_bringdown) {
7472 if (GET_DUMP == ioa_cfg->sdt_state) {
7475 schedule_work(&ioa_cfg->work_q);
7496 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7513 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7517 pci_block_user_cfg_access(ioa_cfg->pdev);
7519 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7521 ioa_cfg->regs.set_uproc_interrupt_reg32);
7523 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7530 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7551 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7570 struct pci_dev *pdev = ioa_cfg->pdev;
7583 * @ioa_cfg: ioa config struct
7588 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7592 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7616 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7620 ipr_cmd->job_step = ioa_cfg->reset;
7641 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7646 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7649 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7650 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7653 ipr_cmd->job_step = ioa_cfg->reset;
7674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7675 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7677 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7697 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7713 if (ioa_cfg->sis64)
7739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7745 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7755 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7783 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7788 if (ioa_cfg->reset_cmd != ipr_cmd) {
7793 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7811 * @ioa_cfg: ioa config struct
7823 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7829 ioa_cfg->in_reset_reload = 1;
7830 ioa_cfg->allow_cmds = 0;
7831 scsi_block_requests(ioa_cfg->host);
7833 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7834 ioa_cfg->reset_cmd = ipr_cmd;
7843 * @ioa_cfg: ioa config struct
7853 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7856 if (ioa_cfg->ioa_is_dead)
7859 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7860 ioa_cfg->sdt_state = ABORT_DUMP;
7862 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7863 dev_err(&ioa_cfg->pdev->dev,
7866 ioa_cfg->reset_retries = 0;
7867 ioa_cfg->ioa_is_dead = 1;
7869 if (ioa_cfg->in_ioa_bringdown) {
7870 ioa_cfg->reset_cmd = NULL;
7871 ioa_cfg->in_reset_reload = 0;
7872 ipr_fail_all_ops(ioa_cfg);
7873 wake_up_all(&ioa_cfg->reset_wait_q);
7875 spin_unlock_irq(ioa_cfg->host->host_lock);
7876 scsi_unblock_requests(ioa_cfg->host);
7877 spin_lock_irq(ioa_cfg->host->host_lock);
7880 ioa_cfg->in_ioa_bringdown = 1;
7885 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7900 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7901 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7917 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7919 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7920 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7921 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7935 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7937 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7938 if (ioa_cfg->needs_warm_reset)
7939 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7941 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7943 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7957 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7959 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7960 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7961 ioa_cfg->sdt_state = ABORT_DUMP;
7962 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7963 ioa_cfg->in_ioa_bringdown = 1;
7964 ioa_cfg->allow_cmds = 0;
7965 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7998 * @ioa_cfg: ioa cfg struct
8007 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8013 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8014 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8015 if (ioa_cfg->needs_hard_reset) {
8016 ioa_cfg->needs_hard_reset = 0;
8017 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8019 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8022 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8023 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8024 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8026 if (ioa_cfg->ioa_is_dead) {
8028 } else if (ipr_invalid_adapter(ioa_cfg)) {
8032 dev_err(&ioa_cfg->pdev->dev,
8036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8044 * @ioa_cfg: ioa config struct
8049 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8054 if (ioa_cfg->ipr_cmnd_list[i])
8055 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8056 ioa_cfg->ipr_cmnd_list[i],
8057 ioa_cfg->ipr_cmnd_list_dma[i]);
8059 ioa_cfg->ipr_cmnd_list[i] = NULL;
8062 if (ioa_cfg->ipr_cmd_pool)
8063 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8065 ioa_cfg->ipr_cmd_pool = NULL;
8070 * @ioa_cfg: ioa cfg struct
8075 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8079 kfree(ioa_cfg->res_entries);
8080 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8081 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8082 ipr_free_cmd_blks(ioa_cfg);
8083 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8084 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8085 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8086 ioa_cfg->u.cfg_table,
8087 ioa_cfg->cfg_table_dma);
8090 pci_free_consistent(ioa_cfg->pdev,
8092 ioa_cfg->hostrcb[i],
8093 ioa_cfg->hostrcb_dma[i]);
8096 ipr_free_dump(ioa_cfg);
8097 kfree(ioa_cfg->trace);
8110 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8112 struct pci_dev *pdev = ioa_cfg->pdev;
8115 free_irq(pdev->irq, ioa_cfg);
8117 iounmap(ioa_cfg->hdw_dma_regs);
8119 ipr_free_mem(ioa_cfg);
8120 scsi_host_put(ioa_cfg->host);
8127 * @ioa_cfg: ioa config struct
8132 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8139 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8142 if (!ioa_cfg->ipr_cmd_pool)
8146 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8149 ipr_free_cmd_blks(ioa_cfg);
8154 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8155 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8159 if (ioa_cfg->sis64)
8165 if (ioa_cfg->sis64) {
8179 ipr_cmd->ioa_cfg = ioa_cfg;
8183 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8191 * @ioa_cfg: ioa config struct
8196 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8198 struct pci_dev *pdev = ioa_cfg->pdev;
8202 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8203 ioa_cfg->max_devs_supported, GFP_KERNEL);
8205 if (!ioa_cfg->res_entries)
8208 if (ioa_cfg->sis64) {
8209 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8210 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8211 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8212 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8213 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8214 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8217 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8218 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8219 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8222 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8224 &ioa_cfg->vpd_cbs_dma);
8226 if (!ioa_cfg->vpd_cbs)
8229 if (ipr_alloc_cmd_blks(ioa_cfg))
8232 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8234 &ioa_cfg->host_rrq_dma);
8236 if (!ioa_cfg->host_rrq)
8239 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8240 ioa_cfg->cfg_table_size,
8241 &ioa_cfg->cfg_table_dma);
8243 if (!ioa_cfg->u.cfg_table)
8247 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8249 &ioa_cfg->hostrcb_dma[i]);
8251 if (!ioa_cfg->hostrcb[i])
8254 ioa_cfg->hostrcb[i]->hostrcb_dma =
8255 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8256 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8257 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8260 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8263 if (!ioa_cfg->trace)
8274 ioa_cfg->hostrcb[i],
8275 ioa_cfg->hostrcb_dma[i]);
8277 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8278 ioa_cfg->u.cfg_table,
8279 ioa_cfg->cfg_table_dma);
8282 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8284 ipr_free_cmd_blks(ioa_cfg);
8287 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8289 kfree(ioa_cfg->res_entries);
8295 * @ioa_cfg: ioa config struct
8300 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8305 ioa_cfg->bus_attr[i].bus = i;
8306 ioa_cfg->bus_attr[i].qas_enabled = 0;
8307 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8309 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8311 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8317 * @ioa_cfg: ioa config struct
8324 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8331 ioa_cfg->host = host;
8332 ioa_cfg->pdev = pdev;
8333 ioa_cfg->log_level = ipr_log_level;
8334 ioa_cfg->doorbell = IPR_DOORBELL;
8335 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8336 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8337 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8338 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8339 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8340 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8341 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8342 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8344 INIT_LIST_HEAD(&ioa_cfg->free_q);
8345 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8346 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8347 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8348 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8349 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8350 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8351 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8352 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8353 ioa_cfg->sdt_state = INACTIVE;
8355 ipr_initialize_bus_attr(ioa_cfg);
8356 ioa_cfg->max_devs_supported = ipr_max_devs;
8358 if (ioa_cfg->sis64) {
8362 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8367 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8372 pci_set_drvdata(pdev, ioa_cfg);
8374 p = &ioa_cfg->chip_cfg->regs;
8375 t = &ioa_cfg->regs;
8376 base = ioa_cfg->hdw_dma_regs;
8395 if (ioa_cfg->sis64) {
8434 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8440 ioa_cfg->msi_received = 1;
8441 wake_up(&ioa_cfg->msi_wait_q);
8443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8459 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8468 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8469 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8470 ioa_cfg->msi_received = 0;
8471 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8472 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8473 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8476 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8483 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8484 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8485 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8486 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8488 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8489 if (!ioa_cfg->msi_received) {
8496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8498 free_irq(pdev->irq, ioa_cfg);
8516 struct ipr_ioa_cfg *ioa_cfg;
8532 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8540 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8541 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8542 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8545 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8547 if (!ioa_cfg->ipr_chip) {
8554 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8555 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8558 ioa_cfg->transop_timeout = ipr_transop_timeout;
8560 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8562 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8564 ioa_cfg->revid = pdev->revision;
8584 ioa_cfg->hdw_dma_regs = ipr_regs;
8585 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8586 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8588 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8592 if (ioa_cfg->sis64) {
8608 ioa_cfg->chip_cfg->cache_line_size);
8617 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8618 rc = ipr_test_msi(ioa_cfg, pdev);
8637 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8640 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8643 if (ioa_cfg->sis64)
8644 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8646 * ioa_cfg->max_devs_supported)));
8648 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8650 * ioa_cfg->max_devs_supported)));
8652 rc = ipr_alloc_mem(ioa_cfg);
8663 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8664 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8665 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8667 ioa_cfg->needs_hard_reset = 1;
8669 ioa_cfg->needs_hard_reset = 1;
8671 ioa_cfg->ioa_unit_checked = 1;
8673 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8675 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8676 IPR_NAME, ioa_cfg);
8685 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8686 ioa_cfg->needs_warm_reset = 1;
8687 ioa_cfg->reset = ipr_reset_slot_reset;
8689 ioa_cfg->reset = ipr_reset_start_bist;
8692 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8700 ipr_free_mem(ioa_cfg);
8716 * @ioa_cfg: ioa config struct
8724 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8730 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8735 * @ioa_cfg: ioa config struct
8747 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8751 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8752 ioa_cfg->sdt_state = ABORT_DUMP;
8753 ioa_cfg->reset_retries = 0;
8754 ioa_cfg->in_ioa_bringdown = 1;
8755 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8771 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8774 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8775 while(ioa_cfg->in_reset_reload) {
8776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8777 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8778 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8781 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8784 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8786 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8789 list_del(&ioa_cfg->queue);
8792 if (ioa_cfg->sdt_state == ABORT_DUMP)
8793 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8794 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8796 ipr_free_all_resources(ioa_cfg);
8812 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8816 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8818 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8820 scsi_remove_host(ioa_cfg->host);
8836 struct ipr_ioa_cfg *ioa_cfg;
8844 ioa_cfg = pci_get_drvdata(pdev);
8845 rc = ipr_probe_ioa_part2(ioa_cfg);
8852 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8859 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8863 scsi_remove_host(ioa_cfg->host);
8868 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8872 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8874 scsi_remove_host(ioa_cfg->host);
8879 scsi_scan_host(ioa_cfg->host);
8880 ipr_scan_vsets(ioa_cfg);
8881 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8882 ioa_cfg->allow_ml_add_del = 1;
8883 ioa_cfg->host->max_channel = IPR_VSET_BUS;
8884 schedule_work(&ioa_cfg->work_q);
8900 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8903 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8904 while(ioa_cfg->in_reset_reload) {
8905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8906 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8907 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8910 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8912 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9011 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9013 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9025 struct ipr_ioa_cfg *ioa_cfg;
9033 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9034 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9035 if (!ioa_cfg->allow_cmds) {
9036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9040 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);