• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/scsi/mpt2sas/

Lines Matching defs:ioc

67 static int _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type,
85 * @ioc: per adapter object
87 * Context: Calling function should acquire ioc->sas_device_lock
93 _ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
98 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
112 * @ioc: per adapter object
122 _ctl_display_some_debug(struct MPT2SAS_ADAPTER *ioc, u16 smid,
128 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
131 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
138 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
142 desc = ioc->tmp_string;
159 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
164 desc = ioc->tmp_string;
190 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
194 desc = ioc->tmp_string;
218 ioc->name, calling_function_name, desc, smid);
226 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
237 spin_lock_irqsave(&ioc->sas_device_lock, flags);
238 sas_device = _ctl_sas_device_find_by_handle(ioc,
242 "phy(%d)\n", ioc->name, (unsigned long long)
246 ioc->name, sas_device->enclosure_logical_id,
249 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
253 "(0x%02x)\n", ioc->name,
262 * @ioc: per adapter object
268 * The callback handler when using ioc->ctl_cb_idx.
274 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
282 if (ioc->ctl_cmds.status == MPT2_CMD_NOT_USED)
284 if (ioc->ctl_cmds.smid != smid)
286 ioc->ctl_cmds.status |= MPT2_CMD_COMPLETE;
287 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
289 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
290 ioc->ctl_cmds.status |= MPT2_CMD_REPLY_VALID;
300 sense_data = mpt2sas_base_get_sense_buffer(ioc,
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
307 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
309 ioc->ctl_cmds.status &= ~MPT2_CMD_PENDING;
310 complete(&ioc->ctl_cmds.done);
316 * @ioc: per adapter object
319 * The bitmask in ioc->event_type[] indicates which events should be
325 _ctl_check_event_type(struct MPT2SAS_ADAPTER *ioc, u16 event)
330 if (event >= 128 || !event || !ioc->event_log)
337 return desired_event & ioc->event_type[i];
342 * @ioc: per adapter object
348 mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
357 if (!ioc->event_log)
362 if (_ctl_check_event_type(ioc, event)) {
365 i = ioc->event_context % MPT2SAS_CTL_EVENT_LOG_SIZE;
366 event_log = ioc->event_log;
368 event_log[i].context = ioc->event_context++;
382 (send_aen && !ioc->aen_event_read_flag)) {
383 ioc->aen_event_read_flag = 1;
392 * @ioc: per adapter object
397 * This function merely adds a new work task into ioc->firmware_event_thread.
404 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
409 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
410 mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
416 * @ioc: per adapter object
417 * @iocpp: The ioc pointer is returned in this.
424 struct MPT2SAS_ADAPTER *ioc;
426 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
427 if (ioc->id != ioc_number)
429 *iocpp = ioc;
438 * @ioc: per adapter object
447 mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
454 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
455 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
457 if (!(ioc->diag_buffer_status[i] &
460 if ((ioc->diag_buffer_status[i] &
463 _ctl_send_release(ioc, i, &issue_reset);
467 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
468 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
469 if (ioc->ctl_cmds.status & MPT2_CMD_PENDING) {
470 ioc->ctl_cmds.status |= MPT2_CMD_RESET;
471 mpt2sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
472 complete(&ioc->ctl_cmds.done);
476 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
477 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
480 if (!(ioc->diag_buffer_status[i] &
483 if ((ioc->diag_buffer_status[i] &
486 ioc->diag_buffer_status[i] |=
529 struct MPT2SAS_ADAPTER *ioc;
533 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
534 if (ioc->aen_event_read_flag)
542 * @ioc: per adapter object
550 _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
574 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
575 for (i = ioc->scsiio_depth; i && !found; i--) {
576 scmd = ioc->scsi_lookup[i - 1].scmd;
587 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
590 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
593 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
594 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
596 tm_reply = ioc->ctl_cmds.reply;
603 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
604 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
611 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
612 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
620 * @ioc: per adapter object
626 _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
650 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
652 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
655 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
657 ioc->name, __func__);
663 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
667 "%s: failed due to ioc not operational\n",
668 ioc->name, __func__);
673 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
675 "operational state(count=%d)\n", ioc->name,
679 printk(MPT2SAS_INFO_FMT "%s: ioc is operational\n",
680 ioc->name, __func__);
682 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
685 "mpi_request\n", ioc->name, __func__);
699 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
702 ioc->name, __func__);
708 smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
711 ioc->name, __func__);
718 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
719 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
720 request = mpt2sas_base_get_msg_frame(ioc, smid);
722 ioc->ctl_cmds.smid = smid;
730 ioc->facts.MaxDevHandle) {
732 mpt2sas_base_free_smid(ioc, smid);
739 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
745 mpt2sas_base_free_smid(ioc, smid);
753 mpt2sas_base_free_smid(ioc, smid);
759 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
765 mpt2sas_base_free_smid(ioc, smid);
774 mpt2sas_base_build_zero_len_sge(ioc, psge);
780 ioc->base_add_sg_single(psge, sgl_flags |
784 psge += ioc->sge_size;
791 ioc->base_add_sg_single(psge, sgl_flags |
798 ioc->base_add_sg_single(psge, sgl_flags |
805 ioc->base_add_sg_single(psge, sgl_flags |
811 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
822 mpt2sas_base_get_sense_buffer_dma(ioc, smid);
823 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
825 mpt2sas_base_put_smid_scsi_io(ioc, smid,
828 mpt2sas_base_put_smid_default(ioc, smid);
836 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "TASK_MGMT: "
837 "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
844 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
845 mpt2sas_base_free_smid(ioc, smid);
850 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
852 mpt2sas_base_put_smid_hi_priority(ioc, smid);
861 /* ioc determines which port to use */
870 ioc->ioc_link_reset_in_progress = 1;
871 ioc->ignore_loginfos = 1;
873 mpt2sas_base_put_smid_default(ioc, smid);
884 ioc->ioc_link_reset_in_progress = 1;
885 ioc->ignore_loginfos = 1;
887 mpt2sas_base_put_smid_default(ioc, smid);
891 mpt2sas_base_put_smid_default(ioc, smid);
899 init_completion(&ioc->ctl_cmds.done);
900 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
905 mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
909 ioc->ioc_link_reset_in_progress) {
910 ioc->ioc_link_reset_in_progress = 0;
911 ioc->ignore_loginfos = 0;
913 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
914 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
917 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
922 mpi_reply = ioc->ctl_cmds.reply;
927 (ioc->logging_level & MPT_DEBUG_TM)) {
933 "TerminationCount(0x%08x)\n", ioc->name,
952 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
953 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
968 ioc->ctl_cmds.sense, sz)) {
983 "= (0x%04x)\n", ioc->name,
985 mpt2sas_halt_firmware(ioc);
986 mpt2sas_scsih_issue_tm(ioc,
990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
992 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1000 pci_free_consistent(ioc->pdev, data_in_sz, data_in,
1004 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
1008 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1009 mutex_unlock(&ioc->ctl_cmds.mutex);
1021 struct MPT2SAS_ADAPTER *ioc;
1029 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1032 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1037 if (ioc->pfacts)
1038 karg.port_number = ioc->pfacts[0].PortNumber;
1039 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
1041 karg.pci_id = ioc->pdev->device;
1042 karg.subsystem_device = ioc->pdev->subsystem_device;
1043 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1044 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1045 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1046 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1047 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1048 karg.firmware_version = ioc->facts.FWVersion.Word;
1052 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1070 struct MPT2SAS_ADAPTER *ioc;
1077 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1080 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1084 memcpy(karg.event_types, ioc->event_type,
1103 struct MPT2SAS_ADAPTER *ioc;
1110 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1113 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1116 if (ioc->event_log)
1118 memcpy(ioc->event_type, karg.event_types,
1120 mpt2sas_base_validate_event_type(ioc, ioc->event_type);
1123 ioc->event_context = 0;
1124 ioc->aen_event_read_flag = 0;
1125 ioc->event_log = kcalloc(MPT2SAS_CTL_EVENT_LOG_SIZE,
1127 if (!ioc->event_log) {
1143 struct MPT2SAS_ADAPTER *ioc;
1152 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1155 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1166 if (!max || !ioc->event_log)
1170 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1177 ioc->aen_event_read_flag = 0;
1189 struct MPT2SAS_ADAPTER *ioc;
1197 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1200 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
1203 retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1206 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1212 * @ioc: per adapter object
1216 _ctl_btdh_search_sas_device(struct MPT2SAS_ADAPTER *ioc,
1223 if (list_empty(&ioc->sas_device_list))
1226 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1227 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1242 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1248 * @ioc: per adapter object
1252 _ctl_btdh_search_raid_device(struct MPT2SAS_ADAPTER *ioc,
1259 if (list_empty(&ioc->raid_device_list))
1262 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1263 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1278 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1290 struct MPT2SAS_ADAPTER *ioc;
1298 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1301 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1304 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1306 _ctl_btdh_search_raid_device(ioc, &karg);
1318 * @ioc: per adapter object
1324 _ctl_diag_capability(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type)
1330 if (ioc->facts.IOCCapabilities &
1335 if (ioc->facts.IOCCapabilities &
1340 if (ioc->facts.IOCCapabilities &
1350 * @ioc: per adapter object
1355 _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1370 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1373 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1375 ioc->name, __func__);
1381 if (!_ctl_diag_capability(ioc, buffer_type)) {
1383 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1387 if (ioc->diag_buffer_status[buffer_type] &
1390 "buffer for buffer_type(0x%02x)\n", ioc->name, __func__,
1397 "is not 4 byte aligned\n", ioc->name, __func__);
1401 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1404 ioc->name, __func__);
1410 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
1411 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1412 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1413 ioc->ctl_cmds.smid = smid;
1415 request_data = ioc->diag_buffer[buffer_type];
1417 ioc->unique_id[buffer_type] = diag_register->unique_id;
1418 ioc->diag_buffer_status[buffer_type] = 0;
1419 memcpy(ioc->product_specific[buffer_type],
1421 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1424 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1425 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1426 pci_free_consistent(ioc->pdev,
1427 ioc->diag_buffer_sz[buffer_type],
1434 ioc->diag_buffer_sz[buffer_type] = 0;
1435 ioc->diag_buffer_dma[buffer_type] = 0;
1437 ioc->pdev, request_data_sz, &request_data_dma);
1441 ioc->name, __func__, request_data_sz);
1442 mpt2sas_base_free_smid(ioc, smid);
1445 ioc->diag_buffer[buffer_type] = request_data;
1446 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1447 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1458 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(0x%p), "
1459 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
1465 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1467 mpt2sas_base_put_smid_default(ioc, smid);
1468 init_completion(&ioc->ctl_cmds.done);
1469 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1472 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
1473 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
1477 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
1483 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
1485 ioc->name, __func__);
1490 mpi_reply = ioc->ctl_cmds.reply;
1494 ioc->diag_buffer_status[buffer_type] |=
1496 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
1497 ioc->name, __func__));
1500 "log_info(0x%08x)\n", ioc->name, __func__,
1507 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1513 pci_free_consistent(ioc->pdev, request_data_sz,
1516 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1522 * @ioc: per adapter object
1529 mpt2sas_enable_diag_buffer(struct MPT2SAS_ADAPTER *ioc, u8 bits_to_register)
1537 ioc->name);
1542 _ctl_diag_register_2(ioc, &diag_register);
1547 ioc->name);
1552 _ctl_diag_register_2(ioc, &diag_register);
1557 ioc->name);
1562 _ctl_diag_register_2(ioc, &diag_register);
1578 struct MPT2SAS_ADAPTER *ioc;
1586 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1589 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1591 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1593 rc = _ctl_diag_register_2(ioc, &karg);
1594 mutex_unlock(&ioc->ctl_cmds.mutex);
1609 struct MPT2SAS_ADAPTER *ioc;
1620 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1623 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1627 if (!_ctl_diag_capability(ioc, buffer_type)) {
1629 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1633 if ((ioc->diag_buffer_status[buffer_type] &
1636 "registered\n", ioc->name, __func__, buffer_type);
1639 if ((ioc->diag_buffer_status[buffer_type] &
1642 "released\n", ioc->name, __func__, buffer_type);
1646 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1648 "registered\n", ioc->name, __func__, karg.unique_id);
1652 request_data = ioc->diag_buffer[buffer_type];
1655 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1659 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1660 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1661 pci_free_consistent(ioc->pdev, request_data_sz,
1663 ioc->diag_buffer[buffer_type] = NULL;
1664 ioc->diag_buffer_status[buffer_type] = 0;
1680 struct MPT2SAS_ADAPTER *ioc;
1690 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1693 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1699 if (!_ctl_diag_capability(ioc, buffer_type)) {
1701 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1705 if ((ioc->diag_buffer_status[buffer_type] &
1708 "registered\n", ioc->name, __func__, buffer_type);
1713 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1715 "registered\n", ioc->name, __func__,
1721 request_data = ioc->diag_buffer[buffer_type];
1724 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1728 if (ioc->diag_buffer_status[buffer_type] & MPT2_DIAG_BUFFER_IS_RELEASED)
1738 ioc->product_specific[buffer_type][i];
1740 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1742 karg.unique_id = ioc->unique_id[buffer_type];
1743 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1747 "data @ %p\n", ioc->name, __func__, arg);
1755 * @ioc: per adapter object
1761 _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
1771 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1777 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1779 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
1780 "skipping due to FAULT state\n", ioc->name,
1786 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
1788 ioc->name, __func__);
1793 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1796 ioc->name, __func__);
1801 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
1802 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1803 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
1804 ioc->ctl_cmds.smid = smid;
1811 mpt2sas_base_put_smid_default(ioc, smid);
1812 init_completion(&ioc->ctl_cmds.done);
1813 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
1816 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
1817 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
1821 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
1828 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
1830 ioc->name, __func__);
1835 mpi_reply = ioc->ctl_cmds.reply;
1839 ioc->diag_buffer_status[buffer_type] |=
1841 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
1842 ioc->name, __func__));
1845 "log_info(0x%08x)\n", ioc->name, __func__,
1851 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
1868 struct MPT2SAS_ADAPTER *ioc;
1879 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1882 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1886 if (!_ctl_diag_capability(ioc, buffer_type)) {
1888 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1892 if ((ioc->diag_buffer_status[buffer_type] &
1895 "registered\n", ioc->name, __func__, buffer_type);
1899 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1901 "registered\n", ioc->name, __func__, karg.unique_id);
1905 if (ioc->diag_buffer_status[buffer_type] &
1908 "is already released\n", ioc->name, __func__,
1913 request_data = ioc->diag_buffer[buffer_type];
1917 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1922 if ((ioc->diag_buffer_status[buffer_type] &
1924 ioc->diag_buffer_status[buffer_type] |=
1926 ioc->diag_buffer_status[buffer_type] &=
1929 "was released due to host reset\n", ioc->name, __func__,
1934 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
1936 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
1939 rc = _ctl_send_release(ioc, buffer_type, &issue_reset);
1942 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1945 mutex_unlock(&ioc->ctl_cmds.mutex);
1959 struct MPT2SAS_ADAPTER *ioc;
1975 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
1978 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1982 if (!_ctl_diag_capability(ioc, buffer_type)) {
1984 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
1988 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1990 "registered\n", ioc->name, __func__, karg.unique_id);
1994 request_data = ioc->diag_buffer[buffer_type];
1997 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type);
2003 "or bytes_to_read are not 4 byte aligned\n", ioc->name,
2009 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), "
2010 "offset(%d), sz(%d)\n", ioc->name, __func__,
2016 "mpt_diag_read_buffer_t data @ %p\n", ioc->name,
2024 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: Reregister "
2025 "buffer_type(0x%02x)\n", ioc->name, __func__, buffer_type));
2026 if ((ioc->diag_buffer_status[buffer_type] &
2028 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
2029 "buffer_type(0x%02x) is still registered\n", ioc->name,
2035 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
2037 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
2040 if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
2042 ioc->name, __func__);
2047 smid = mpt2sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2050 ioc->name, __func__);
2056 ioc->ctl_cmds.status = MPT2_CMD_PENDING;
2057 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2058 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2059 ioc->ctl_cmds.smid = smid;
2064 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2066 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2069 cpu_to_le32(ioc->product_specific[buffer_type][i]);
2073 mpt2sas_base_put_smid_default(ioc, smid);
2074 init_completion(&ioc->ctl_cmds.done);
2075 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
2078 if (!(ioc->ctl_cmds.status & MPT2_CMD_COMPLETE)) {
2079 printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name,
2083 if (!(ioc->ctl_cmds.status & MPT2_CMD_RESET))
2089 if ((ioc->ctl_cmds.status & MPT2_CMD_REPLY_VALID) == 0) {
2091 ioc->name, __func__);
2096 mpi_reply = ioc->ctl_cmds.reply;
2100 ioc->diag_buffer_status[buffer_type] |=
2102 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: success\n",
2103 ioc->name, __func__));
2106 "log_info(0x%08x)\n", ioc->name, __func__,
2113 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2118 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
2119 mutex_unlock(&ioc->ctl_cmds.mutex);
2147 struct MPT2SAS_ADAPTER *ioc;
2155 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
2156 !ioc)
2159 if (ioc->shost_recovery || ioc->pci_error_recovery)
2164 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2210 struct MPT2SAS_ADAPTER *ioc;
2218 if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
2219 !ioc)
2222 dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
2223 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2262 struct MPT2SAS_ADAPTER *ioc;
2275 if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
2278 if (ioc->shost_recovery || ioc->pci_error_recovery)
2296 return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
2336 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2339 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2340 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2341 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2342 ioc->facts.FWVersion.Word & 0x000000FF);
2358 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2360 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2382 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2385 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2401 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2403 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2420 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2423 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2440 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2443 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2460 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2462 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2478 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2480 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2497 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2499 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2519 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2521 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2541 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2543 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2562 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2564 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2583 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2586 (unsigned long long)ioc->sas_hba.sas_address);
2603 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2605 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2618 ioc->logging_level = val;
2619 printk(MPT2SAS_INFO_FMT "logging_level=%08xh\n", ioc->name,
2620 ioc->logging_level);
2640 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2642 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2649 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2655 ioc->fwfault_debug = val;
2656 printk(MPT2SAS_INFO_FMT "fwfault_debug=%d\n", ioc->name,
2657 ioc->fwfault_debug);
2665 * _ctl_ioc_reset_count_show - ioc reset count
2678 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2680 return snprintf(buf, PAGE_SIZE, "%08d\n", ioc->ioc_reset_count);
2706 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2710 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2712 "registered\n", ioc->name, __func__);
2716 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2719 "registered\n", ioc->name, __func__);
2724 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
2730 ioc->ring_buffer_sz = size;
2752 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2756 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2758 "registered\n", ioc->name, __func__);
2762 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2765 "registered\n", ioc->name, __func__);
2769 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
2772 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
2774 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
2784 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2790 ioc->ring_buffer_offset = val;
2812 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2814 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
2815 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2818 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2830 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2840 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
2841 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2843 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2848 ioc->name);
2852 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
2853 _ctl_diag_register_2(ioc, &diag_register);
2856 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
2858 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2861 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2865 ioc->name);
2866 _ctl_send_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset);
2985 struct MPT2SAS_ADAPTER *ioc;
2988 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) {
2992 if (!ioc->diag_buffer[i])
2994 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
2995 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
2996 ioc->diag_buffer[i] = NULL;
2997 ioc->diag_buffer_status[i] = 0;
3000 kfree(ioc->event_log);