1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/idr.h> 26#include <linux/interrupt.h> 27#include <linux/kthread.h> 28#include <linux/pci.h> 29#include <linux/spinlock.h> 30#include <linux/ctype.h> 31#include <linux/aer.h> 32#include <linux/slab.h> 33 34#include <scsi/scsi.h> 35#include <scsi/scsi_device.h> 36#include <scsi/scsi_host.h> 37#include <scsi/scsi_transport_fc.h> 38 39#include "lpfc_hw4.h" 40#include "lpfc_hw.h" 41#include "lpfc_sli.h" 42#include "lpfc_sli4.h" 43#include "lpfc_nl.h" 44#include "lpfc_disc.h" 45#include "lpfc_scsi.h" 46#include "lpfc.h" 47#include "lpfc_logmsg.h" 48#include "lpfc_crtn.h" 49#include "lpfc_vport.h" 50#include "lpfc_version.h" 51 52char *_dump_buf_data; 53unsigned long _dump_buf_data_order; 54char *_dump_buf_dif; 55unsigned long _dump_buf_dif_order; 56spinlock_t _dump_buf_lock; 57 58static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *); 65static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66static void lpfc_free_sgl_list(struct lpfc_hba *); 67static int lpfc_init_sgl_list(struct lpfc_hba *); 68static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69static void lpfc_free_active_sgl(struct lpfc_hba *); 70static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76static struct scsi_transport_template *lpfc_transport_template = NULL; 77static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78static DEFINE_IDR(lpfc_hba_index); 79 80/** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94int 95lpfc_config_port_prep(struct lpfc_hba *phba) 96{ 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244} 245 246/** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256static void 257lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258{ 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265} 266 267/** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277static void 278lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279{ 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309} 310 311/** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324int 325lpfc_config_port_post(struct lpfc_hba *phba) 326{ 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LINK_SPEED_10G) 450 || ((phba->cfg_link_speed == LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb))) { 460 /* Reset link speed to auto */ 461 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 462 "1302 Invalid speed for this board: " 463 "Reset link speed to auto: x%x\n", 464 phba->cfg_link_speed); 465 phba->cfg_link_speed = LINK_SPEED_AUTO; 466 } 467 468 phba->link_state = LPFC_LINK_DOWN; 469 470 /* Only process IOCBs on ELS ring till hba_state is READY */ 471 if (psli->ring[psli->extra_ring].cmdringaddr) 472 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 473 if (psli->ring[psli->fcp_ring].cmdringaddr) 474 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->next_ring].cmdringaddr) 476 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 478 /* Post receive buffers for desired rings */ 479 if (phba->sli_rev != 3) 480 lpfc_post_rcv_buf(phba); 481 482 /* 483 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 484 */ 485 if (phba->intr_type == MSIX) { 486 rc = lpfc_config_msi(phba, pmb); 487 if (rc) { 488 mempool_free(pmb, phba->mbox_mem_pool); 489 return -EIO; 490 } 491 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 492 if (rc != MBX_SUCCESS) { 493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 494 "0352 Config MSI mailbox command " 495 "failed, mbxCmd x%x, mbxStatus x%x\n", 496 pmb->u.mb.mbxCommand, 497 pmb->u.mb.mbxStatus); 498 mempool_free(pmb, phba->mbox_mem_pool); 499 return -EIO; 500 } 501 } 502 503 spin_lock_irq(&phba->hbalock); 504 /* Initialize ERATT handling flag */ 505 phba->hba_flag &= ~HBA_ERATT_HANDLED; 506 507 /* Enable appropriate host interrupts */ 508 status = readl(phba->HCregaddr); 509 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 510 if (psli->num_rings > 0) 511 status |= HC_R0INT_ENA; 512 if (psli->num_rings > 1) 513 status |= HC_R1INT_ENA; 514 if (psli->num_rings > 2) 515 status |= HC_R2INT_ENA; 516 if (psli->num_rings > 3) 517 status |= HC_R3INT_ENA; 518 519 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 520 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 521 status &= ~(HC_R0INT_ENA); 522 523 writel(status, phba->HCregaddr); 524 readl(phba->HCregaddr); /* flush */ 525 spin_unlock_irq(&phba->hbalock); 526 527 /* Set up ring-0 (ELS) timer */ 528 timeout = phba->fc_ratov * 2; 529 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 530 /* Set up heart beat (HB) timer */ 531 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 532 phba->hb_outstanding = 0; 533 phba->last_completion_time = jiffies; 534 /* Set up error attention (ERATT) polling timer */ 535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 536 537 if (phba->hba_flag & LINK_DISABLED) { 538 lpfc_printf_log(phba, 539 KERN_ERR, LOG_INIT, 540 "2598 Adapter Link is disabled.\n"); 541 lpfc_down_link(phba, pmb); 542 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 544 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 545 lpfc_printf_log(phba, 546 KERN_ERR, LOG_INIT, 547 "2599 Adapter failed to issue DOWN_LINK" 548 " mbox command rc 0x%x\n", rc); 549 550 mempool_free(pmb, phba->mbox_mem_pool); 551 return -EIO; 552 } 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 554 lpfc_init_link(phba, pmb, phba->cfg_topology, 555 phba->cfg_link_speed); 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 557 lpfc_set_loopback_flag(phba); 558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 559 if (rc != MBX_SUCCESS) { 560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 561 "0454 Adapter failed to init, mbxCmd x%x " 562 "INIT_LINK, mbxStatus x%x\n", 563 mb->mbxCommand, mb->mbxStatus); 564 565 /* Clear all interrupt enable conditions */ 566 writel(0, phba->HCregaddr); 567 readl(phba->HCregaddr); /* flush */ 568 /* Clear all pending interrupts */ 569 writel(0xffffffff, phba->HAregaddr); 570 readl(phba->HAregaddr); /* flush */ 571 572 phba->link_state = LPFC_HBA_ERROR; 573 if (rc != MBX_BUSY) 574 mempool_free(pmb, phba->mbox_mem_pool); 575 return -EIO; 576 } 577 } 578 /* MBOX buffer will be freed in mbox compl */ 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 580 if (!pmb) { 581 phba->link_state = LPFC_HBA_ERROR; 582 return -ENOMEM; 583 } 584 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 586 pmb->mbox_cmpl = lpfc_config_async_cmpl; 587 pmb->vport = phba->pport; 588 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 589 590 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 591 lpfc_printf_log(phba, 592 KERN_ERR, 593 LOG_INIT, 594 "0456 Adapter failed to issue " 595 "ASYNCEVT_ENABLE mbox status x%x\n", 596 rc); 597 mempool_free(pmb, phba->mbox_mem_pool); 598 } 599 600 /* Get Option rom version */ 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 602 if (!pmb) { 603 phba->link_state = LPFC_HBA_ERROR; 604 return -ENOMEM; 605 } 606 607 lpfc_dump_wakeup_param(phba, pmb); 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 609 pmb->vport = phba->pport; 610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 611 612 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 614 "to get Option ROM version status x%x\n", rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 return 0; 619} 620 621/** 622 * lpfc_hba_init_link - Initialize the FC link 623 * @phba: pointer to lpfc hba data structure. 624 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 625 * 626 * This routine will issue the INIT_LINK mailbox command call. 627 * It is available to other drivers through the lpfc_hba data 628 * structure for use as a delayed link up mechanism with the 629 * module parameter lpfc_suppress_link_up. 630 * 631 * Return code 632 * 0 - success 633 * Any other value - error 634 **/ 635int 636lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 637{ 638 struct lpfc_vport *vport = phba->pport; 639 LPFC_MBOXQ_t *pmb; 640 MAILBOX_t *mb; 641 int rc; 642 643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 644 if (!pmb) { 645 phba->link_state = LPFC_HBA_ERROR; 646 return -ENOMEM; 647 } 648 mb = &pmb->u.mb; 649 pmb->vport = vport; 650 651 lpfc_init_link(phba, pmb, phba->cfg_topology, 652 phba->cfg_link_speed); 653 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 654 lpfc_set_loopback_flag(phba); 655 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 656 if (rc != MBX_SUCCESS) { 657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 658 "0498 Adapter failed to init, mbxCmd x%x " 659 "INIT_LINK, mbxStatus x%x\n", 660 mb->mbxCommand, mb->mbxStatus); 661 /* Clear all interrupt enable conditions */ 662 writel(0, phba->HCregaddr); 663 readl(phba->HCregaddr); /* flush */ 664 /* Clear all pending interrupts */ 665 writel(0xffffffff, phba->HAregaddr); 666 readl(phba->HAregaddr); /* flush */ 667 phba->link_state = LPFC_HBA_ERROR; 668 if (rc != MBX_BUSY || flag == MBX_POLL) 669 mempool_free(pmb, phba->mbox_mem_pool); 670 return -EIO; 671 } 672 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 673 if (flag == MBX_POLL) 674 mempool_free(pmb, phba->mbox_mem_pool); 675 676 return 0; 677} 678 679/** 680 * lpfc_hba_down_link - this routine downs the FC link 681 * @phba: pointer to lpfc hba data structure. 682 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 683 * 684 * This routine will issue the DOWN_LINK mailbox command call. 685 * It is available to other drivers through the lpfc_hba data 686 * structure for use to stop the link. 687 * 688 * Return code 689 * 0 - success 690 * Any other value - error 691 **/ 692int 693lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 694{ 695 LPFC_MBOXQ_t *pmb; 696 int rc; 697 698 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 699 if (!pmb) { 700 phba->link_state = LPFC_HBA_ERROR; 701 return -ENOMEM; 702 } 703 704 lpfc_printf_log(phba, 705 KERN_ERR, LOG_INIT, 706 "0491 Adapter Link is disabled.\n"); 707 lpfc_down_link(phba, pmb); 708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 709 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 710 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 711 lpfc_printf_log(phba, 712 KERN_ERR, LOG_INIT, 713 "2522 Adapter failed to issue DOWN_LINK" 714 " mbox command rc 0x%x\n", rc); 715 716 mempool_free(pmb, phba->mbox_mem_pool); 717 return -EIO; 718 } 719 if (flag == MBX_POLL) 720 mempool_free(pmb, phba->mbox_mem_pool); 721 722 return 0; 723} 724 725/** 726 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 727 * @phba: pointer to lpfc HBA data structure. 728 * 729 * This routine will do LPFC uninitialization before the HBA is reset when 730 * bringing down the SLI Layer. 731 * 732 * Return codes 733 * 0 - success. 734 * Any other value - error. 735 **/ 736int 737lpfc_hba_down_prep(struct lpfc_hba *phba) 738{ 739 struct lpfc_vport **vports; 740 int i; 741 742 if (phba->sli_rev <= LPFC_SLI_REV3) { 743 /* Disable interrupts */ 744 writel(0, phba->HCregaddr); 745 readl(phba->HCregaddr); /* flush */ 746 } 747 748 if (phba->pport->load_flag & FC_UNLOADING) 749 lpfc_cleanup_discovery_resources(phba->pport); 750 else { 751 vports = lpfc_create_vport_work_array(phba); 752 if (vports != NULL) 753 for (i = 0; i <= phba->max_vports && 754 vports[i] != NULL; i++) 755 lpfc_cleanup_discovery_resources(vports[i]); 756 lpfc_destroy_vport_work_array(phba, vports); 757 } 758 return 0; 759} 760 761/** 762 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 763 * @phba: pointer to lpfc HBA data structure. 764 * 765 * This routine will do uninitialization after the HBA is reset when bring 766 * down the SLI Layer. 767 * 768 * Return codes 769 * 0 - success. 770 * Any other value - error. 771 **/ 772static int 773lpfc_hba_down_post_s3(struct lpfc_hba *phba) 774{ 775 struct lpfc_sli *psli = &phba->sli; 776 struct lpfc_sli_ring *pring; 777 struct lpfc_dmabuf *mp, *next_mp; 778 LIST_HEAD(completions); 779 int i; 780 781 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 782 lpfc_sli_hbqbuf_free_all(phba); 783 else { 784 /* Cleanup preposted buffers on the ELS ring */ 785 pring = &psli->ring[LPFC_ELS_RING]; 786 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 787 list_del(&mp->list); 788 pring->postbufq_cnt--; 789 lpfc_mbuf_free(phba, mp->virt, mp->phys); 790 kfree(mp); 791 } 792 } 793 794 spin_lock_irq(&phba->hbalock); 795 for (i = 0; i < psli->num_rings; i++) { 796 pring = &psli->ring[i]; 797 798 /* At this point in time the HBA is either reset or DOA. Either 799 * way, nothing should be on txcmplq as it will NEVER complete. 800 */ 801 list_splice_init(&pring->txcmplq, &completions); 802 pring->txcmplq_cnt = 0; 803 spin_unlock_irq(&phba->hbalock); 804 805 /* Cancel all the IOCBs from the completions list */ 806 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 807 IOERR_SLI_ABORTED); 808 809 lpfc_sli_abort_iocb_ring(phba, pring); 810 spin_lock_irq(&phba->hbalock); 811 } 812 spin_unlock_irq(&phba->hbalock); 813 814 return 0; 815} 816/** 817 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 818 * @phba: pointer to lpfc HBA data structure. 819 * 820 * This routine will do uninitialization after the HBA is reset when bring 821 * down the SLI Layer. 822 * 823 * Return codes 824 * 0 - success. 825 * Any other value - error. 826 **/ 827static int 828lpfc_hba_down_post_s4(struct lpfc_hba *phba) 829{ 830 struct lpfc_scsi_buf *psb, *psb_next; 831 LIST_HEAD(aborts); 832 int ret; 833 unsigned long iflag = 0; 834 struct lpfc_sglq *sglq_entry = NULL; 835 836 ret = lpfc_hba_down_post_s3(phba); 837 if (ret) 838 return ret; 839 /* At this point in time the HBA is either reset or DOA. Either 840 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 841 * on the lpfc_sgl_list so that it can either be freed if the 842 * driver is unloading or reposted if the driver is restarting 843 * the port. 844 */ 845 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 846 /* scsl_buf_list */ 847 /* abts_sgl_list_lock required because worker thread uses this 848 * list. 849 */ 850 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 851 list_for_each_entry(sglq_entry, 852 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 853 sglq_entry->state = SGL_FREED; 854 855 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 856 &phba->sli4_hba.lpfc_sgl_list); 857 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 858 /* abts_scsi_buf_list_lock required because worker thread uses this 859 * list. 860 */ 861 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 862 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 863 &aborts); 864 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 865 spin_unlock_irq(&phba->hbalock); 866 867 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 868 psb->pCmd = NULL; 869 psb->status = IOSTAT_SUCCESS; 870 } 871 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 872 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 873 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 874 return 0; 875} 876 877/** 878 * lpfc_hba_down_post - Wrapper func for hba down post routine 879 * @phba: pointer to lpfc HBA data structure. 880 * 881 * This routine wraps the actual SLI3 or SLI4 routine for performing 882 * uninitialization after the HBA is reset when bring down the SLI Layer. 883 * 884 * Return codes 885 * 0 - success. 886 * Any other value - error. 887 **/ 888int 889lpfc_hba_down_post(struct lpfc_hba *phba) 890{ 891 return (*phba->lpfc_hba_down_post)(phba); 892} 893 894/** 895 * lpfc_hb_timeout - The HBA-timer timeout handler 896 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 897 * 898 * This is the HBA-timer timeout handler registered to the lpfc driver. When 899 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 900 * work-port-events bitmap and the worker thread is notified. This timeout 901 * event will be used by the worker thread to invoke the actual timeout 902 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 903 * be performed in the timeout handler and the HBA timeout event bit shall 904 * be cleared by the worker thread after it has taken the event bitmap out. 905 **/ 906static void 907lpfc_hb_timeout(unsigned long ptr) 908{ 909 struct lpfc_hba *phba; 910 uint32_t tmo_posted; 911 unsigned long iflag; 912 913 phba = (struct lpfc_hba *)ptr; 914 915 /* Check for heart beat timeout conditions */ 916 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 917 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 918 if (!tmo_posted) 919 phba->pport->work_port_events |= WORKER_HB_TMO; 920 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 921 922 /* Tell the worker thread there is work to do */ 923 if (!tmo_posted) 924 lpfc_worker_wake_up(phba); 925 return; 926} 927 928/** 929 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 930 * @phba: pointer to lpfc hba data structure. 931 * @pmboxq: pointer to the driver internal queue element for mailbox command. 932 * 933 * This is the callback function to the lpfc heart-beat mailbox command. 934 * If configured, the lpfc driver issues the heart-beat mailbox command to 935 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 936 * heart-beat mailbox command is issued, the driver shall set up heart-beat 937 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 938 * heart-beat outstanding state. Once the mailbox command comes back and 939 * no error conditions detected, the heart-beat mailbox command timer is 940 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 941 * state is cleared for the next heart-beat. If the timer expired with the 942 * heart-beat outstanding state set, the driver will put the HBA offline. 943 **/ 944static void 945lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 946{ 947 unsigned long drvr_flag; 948 949 spin_lock_irqsave(&phba->hbalock, drvr_flag); 950 phba->hb_outstanding = 0; 951 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 952 953 /* Check and reset heart-beat timer is necessary */ 954 mempool_free(pmboxq, phba->mbox_mem_pool); 955 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 956 !(phba->link_state == LPFC_HBA_ERROR) && 957 !(phba->pport->load_flag & FC_UNLOADING)) 958 mod_timer(&phba->hb_tmofunc, 959 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 960 return; 961} 962 963/** 964 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 965 * @phba: pointer to lpfc hba data structure. 966 * 967 * This is the actual HBA-timer timeout handler to be invoked by the worker 968 * thread whenever the HBA timer fired and HBA-timeout event posted. This 969 * handler performs any periodic operations needed for the device. If such 970 * periodic event has already been attended to either in the interrupt handler 971 * or by processing slow-ring or fast-ring events within the HBA-timer 972 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 973 * the timer for the next timeout period. If lpfc heart-beat mailbox command 974 * is configured and there is no heart-beat mailbox command outstanding, a 975 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 976 * has been a heart-beat mailbox command outstanding, the HBA shall be put 977 * to offline. 978 **/ 979void 980lpfc_hb_timeout_handler(struct lpfc_hba *phba) 981{ 982 struct lpfc_vport **vports; 983 LPFC_MBOXQ_t *pmboxq; 984 struct lpfc_dmabuf *buf_ptr; 985 int retval, i; 986 struct lpfc_sli *psli = &phba->sli; 987 LIST_HEAD(completions); 988 989 vports = lpfc_create_vport_work_array(phba); 990 if (vports != NULL) 991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 992 lpfc_rcv_seq_check_edtov(vports[i]); 993 lpfc_destroy_vport_work_array(phba, vports); 994 995 if ((phba->link_state == LPFC_HBA_ERROR) || 996 (phba->pport->load_flag & FC_UNLOADING) || 997 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 998 return; 999 1000 spin_lock_irq(&phba->pport->work_port_lock); 1001 1002 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1003 jiffies)) { 1004 spin_unlock_irq(&phba->pport->work_port_lock); 1005 if (!phba->hb_outstanding) 1006 mod_timer(&phba->hb_tmofunc, 1007 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1008 else 1009 mod_timer(&phba->hb_tmofunc, 1010 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1011 return; 1012 } 1013 spin_unlock_irq(&phba->pport->work_port_lock); 1014 1015 if (phba->elsbuf_cnt && 1016 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1017 spin_lock_irq(&phba->hbalock); 1018 list_splice_init(&phba->elsbuf, &completions); 1019 phba->elsbuf_cnt = 0; 1020 phba->elsbuf_prev_cnt = 0; 1021 spin_unlock_irq(&phba->hbalock); 1022 1023 while (!list_empty(&completions)) { 1024 list_remove_head(&completions, buf_ptr, 1025 struct lpfc_dmabuf, list); 1026 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1027 kfree(buf_ptr); 1028 } 1029 } 1030 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1031 1032 /* If there is no heart beat outstanding, issue a heartbeat command */ 1033 if (phba->cfg_enable_hba_heartbeat) { 1034 if (!phba->hb_outstanding) { 1035 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1036 (list_empty(&psli->mboxq))) { 1037 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1038 GFP_KERNEL); 1039 if (!pmboxq) { 1040 mod_timer(&phba->hb_tmofunc, 1041 jiffies + 1042 HZ * LPFC_HB_MBOX_INTERVAL); 1043 return; 1044 } 1045 1046 lpfc_heart_beat(phba, pmboxq); 1047 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1048 pmboxq->vport = phba->pport; 1049 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1050 MBX_NOWAIT); 1051 1052 if (retval != MBX_BUSY && 1053 retval != MBX_SUCCESS) { 1054 mempool_free(pmboxq, 1055 phba->mbox_mem_pool); 1056 mod_timer(&phba->hb_tmofunc, 1057 jiffies + 1058 HZ * LPFC_HB_MBOX_INTERVAL); 1059 return; 1060 } 1061 phba->skipped_hb = 0; 1062 phba->hb_outstanding = 1; 1063 } else if (time_before_eq(phba->last_completion_time, 1064 phba->skipped_hb)) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1066 "2857 Last completion time not " 1067 " updated in %d ms\n", 1068 jiffies_to_msecs(jiffies 1069 - phba->last_completion_time)); 1070 } else 1071 phba->skipped_hb = jiffies; 1072 1073 mod_timer(&phba->hb_tmofunc, 1074 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1075 return; 1076 } else { 1077 /* 1078 * If heart beat timeout called with hb_outstanding set 1079 * we need to take the HBA offline. 1080 */ 1081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1082 "0459 Adapter heartbeat failure, " 1083 "taking this port offline.\n"); 1084 1085 spin_lock_irq(&phba->hbalock); 1086 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1087 spin_unlock_irq(&phba->hbalock); 1088 1089 lpfc_offline_prep(phba); 1090 lpfc_offline(phba); 1091 lpfc_unblock_mgmt_io(phba); 1092 phba->link_state = LPFC_HBA_ERROR; 1093 lpfc_hba_down_post(phba); 1094 } 1095 } 1096} 1097 1098/** 1099 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1100 * @phba: pointer to lpfc hba data structure. 1101 * 1102 * This routine is called to bring the HBA offline when HBA hardware error 1103 * other than Port Error 6 has been detected. 1104 **/ 1105static void 1106lpfc_offline_eratt(struct lpfc_hba *phba) 1107{ 1108 struct lpfc_sli *psli = &phba->sli; 1109 1110 spin_lock_irq(&phba->hbalock); 1111 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1112 spin_unlock_irq(&phba->hbalock); 1113 lpfc_offline_prep(phba); 1114 1115 lpfc_offline(phba); 1116 lpfc_reset_barrier(phba); 1117 spin_lock_irq(&phba->hbalock); 1118 lpfc_sli_brdreset(phba); 1119 spin_unlock_irq(&phba->hbalock); 1120 lpfc_hba_down_post(phba); 1121 lpfc_sli_brdready(phba, HS_MBRDY); 1122 lpfc_unblock_mgmt_io(phba); 1123 phba->link_state = LPFC_HBA_ERROR; 1124 return; 1125} 1126 1127/** 1128 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1129 * @phba: pointer to lpfc hba data structure. 1130 * 1131 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1132 * other than Port Error 6 has been detected. 1133 **/ 1134static void 1135lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1136{ 1137 lpfc_offline_prep(phba); 1138 lpfc_offline(phba); 1139 lpfc_sli4_brdreset(phba); 1140 lpfc_hba_down_post(phba); 1141 lpfc_sli4_post_status_check(phba); 1142 lpfc_unblock_mgmt_io(phba); 1143 phba->link_state = LPFC_HBA_ERROR; 1144} 1145 1146/** 1147 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1148 * @phba: pointer to lpfc hba data structure. 1149 * 1150 * This routine is invoked to handle the deferred HBA hardware error 1151 * conditions. This type of error is indicated by HBA by setting ER1 1152 * and another ER bit in the host status register. The driver will 1153 * wait until the ER1 bit clears before handling the error condition. 1154 **/ 1155static void 1156lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1157{ 1158 uint32_t old_host_status = phba->work_hs; 1159 struct lpfc_sli_ring *pring; 1160 struct lpfc_sli *psli = &phba->sli; 1161 1162 /* If the pci channel is offline, ignore possible errors, 1163 * since we cannot communicate with the pci card anyway. 1164 */ 1165 if (pci_channel_offline(phba->pcidev)) { 1166 spin_lock_irq(&phba->hbalock); 1167 phba->hba_flag &= ~DEFER_ERATT; 1168 spin_unlock_irq(&phba->hbalock); 1169 return; 1170 } 1171 1172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1173 "0479 Deferred Adapter Hardware Error " 1174 "Data: x%x x%x x%x\n", 1175 phba->work_hs, 1176 phba->work_status[0], phba->work_status[1]); 1177 1178 spin_lock_irq(&phba->hbalock); 1179 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1180 spin_unlock_irq(&phba->hbalock); 1181 1182 1183 /* 1184 * Firmware stops when it triggred erratt. That could cause the I/Os 1185 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1186 * SCSI layer retry it after re-establishing link. 1187 */ 1188 pring = &psli->ring[psli->fcp_ring]; 1189 lpfc_sli_abort_iocb_ring(phba, pring); 1190 1191 /* 1192 * There was a firmware error. Take the hba offline and then 1193 * attempt to restart it. 1194 */ 1195 lpfc_offline_prep(phba); 1196 lpfc_offline(phba); 1197 1198 /* Wait for the ER1 bit to clear.*/ 1199 while (phba->work_hs & HS_FFER1) { 1200 msleep(100); 1201 phba->work_hs = readl(phba->HSregaddr); 1202 /* If driver is unloading let the worker thread continue */ 1203 if (phba->pport->load_flag & FC_UNLOADING) { 1204 phba->work_hs = 0; 1205 break; 1206 } 1207 } 1208 1209 /* 1210 * This is to ptrotect against a race condition in which 1211 * first write to the host attention register clear the 1212 * host status register. 1213 */ 1214 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1215 phba->work_hs = old_host_status & ~HS_FFER1; 1216 1217 spin_lock_irq(&phba->hbalock); 1218 phba->hba_flag &= ~DEFER_ERATT; 1219 spin_unlock_irq(&phba->hbalock); 1220 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1221 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1222} 1223 1224static void 1225lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1226{ 1227 struct lpfc_board_event_header board_event; 1228 struct Scsi_Host *shost; 1229 1230 board_event.event_type = FC_REG_BOARD_EVENT; 1231 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1232 shost = lpfc_shost_from_vport(phba->pport); 1233 fc_host_post_vendor_event(shost, fc_get_event_number(), 1234 sizeof(board_event), 1235 (char *) &board_event, 1236 LPFC_NL_VENDOR_ID); 1237} 1238 1239/** 1240 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1241 * @phba: pointer to lpfc hba data structure. 1242 * 1243 * This routine is invoked to handle the following HBA hardware error 1244 * conditions: 1245 * 1 - HBA error attention interrupt 1246 * 2 - DMA ring index out of range 1247 * 3 - Mailbox command came back as unknown 1248 **/ 1249static void 1250lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1251{ 1252 struct lpfc_vport *vport = phba->pport; 1253 struct lpfc_sli *psli = &phba->sli; 1254 struct lpfc_sli_ring *pring; 1255 uint32_t event_data; 1256 unsigned long temperature; 1257 struct temp_event temp_event_data; 1258 struct Scsi_Host *shost; 1259 1260 /* If the pci channel is offline, ignore possible errors, 1261 * since we cannot communicate with the pci card anyway. 1262 */ 1263 if (pci_channel_offline(phba->pcidev)) { 1264 spin_lock_irq(&phba->hbalock); 1265 phba->hba_flag &= ~DEFER_ERATT; 1266 spin_unlock_irq(&phba->hbalock); 1267 return; 1268 } 1269 1270 /* If resets are disabled then leave the HBA alone and return */ 1271 if (!phba->cfg_enable_hba_reset) 1272 return; 1273 1274 /* Send an internal error event to mgmt application */ 1275 lpfc_board_errevt_to_mgmt(phba); 1276 1277 if (phba->hba_flag & DEFER_ERATT) 1278 lpfc_handle_deferred_eratt(phba); 1279 1280 if (phba->work_hs & HS_FFER6) { 1281 /* Re-establishing Link */ 1282 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1283 "1301 Re-establishing Link " 1284 "Data: x%x x%x x%x\n", 1285 phba->work_hs, 1286 phba->work_status[0], phba->work_status[1]); 1287 1288 spin_lock_irq(&phba->hbalock); 1289 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1290 spin_unlock_irq(&phba->hbalock); 1291 1292 /* 1293 * Firmware stops when it triggled erratt with HS_FFER6. 1294 * That could cause the I/Os dropped by the firmware. 1295 * Error iocb (I/O) on txcmplq and let the SCSI layer 1296 * retry it after re-establishing link. 1297 */ 1298 pring = &psli->ring[psli->fcp_ring]; 1299 lpfc_sli_abort_iocb_ring(phba, pring); 1300 1301 /* 1302 * There was a firmware error. Take the hba offline and then 1303 * attempt to restart it. 1304 */ 1305 lpfc_offline_prep(phba); 1306 lpfc_offline(phba); 1307 lpfc_sli_brdrestart(phba); 1308 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1309 lpfc_unblock_mgmt_io(phba); 1310 return; 1311 } 1312 lpfc_unblock_mgmt_io(phba); 1313 } else if (phba->work_hs & HS_CRIT_TEMP) { 1314 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1315 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1316 temp_event_data.event_code = LPFC_CRIT_TEMP; 1317 temp_event_data.data = (uint32_t)temperature; 1318 1319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1320 "0406 Adapter maximum temperature exceeded " 1321 "(%ld), taking this port offline " 1322 "Data: x%x x%x x%x\n", 1323 temperature, phba->work_hs, 1324 phba->work_status[0], phba->work_status[1]); 1325 1326 shost = lpfc_shost_from_vport(phba->pport); 1327 fc_host_post_vendor_event(shost, fc_get_event_number(), 1328 sizeof(temp_event_data), 1329 (char *) &temp_event_data, 1330 SCSI_NL_VID_TYPE_PCI 1331 | PCI_VENDOR_ID_EMULEX); 1332 1333 spin_lock_irq(&phba->hbalock); 1334 phba->over_temp_state = HBA_OVER_TEMP; 1335 spin_unlock_irq(&phba->hbalock); 1336 lpfc_offline_eratt(phba); 1337 1338 } else { 1339 /* The if clause above forces this code path when the status 1340 * failure is a value other than FFER6. Do not call the offline 1341 * twice. This is the adapter hardware error path. 1342 */ 1343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1344 "0457 Adapter Hardware Error " 1345 "Data: x%x x%x x%x\n", 1346 phba->work_hs, 1347 phba->work_status[0], phba->work_status[1]); 1348 1349 event_data = FC_REG_DUMP_EVENT; 1350 shost = lpfc_shost_from_vport(vport); 1351 fc_host_post_vendor_event(shost, fc_get_event_number(), 1352 sizeof(event_data), (char *) &event_data, 1353 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1354 1355 lpfc_offline_eratt(phba); 1356 } 1357 return; 1358} 1359 1360/** 1361 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1362 * @phba: pointer to lpfc hba data structure. 1363 * 1364 * This routine is invoked to handle the SLI4 HBA hardware error attention 1365 * conditions. 1366 **/ 1367static void 1368lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1369{ 1370 struct lpfc_vport *vport = phba->pport; 1371 uint32_t event_data; 1372 struct Scsi_Host *shost; 1373 1374 /* If the pci channel is offline, ignore possible errors, since 1375 * we cannot communicate with the pci card anyway. 1376 */ 1377 if (pci_channel_offline(phba->pcidev)) 1378 return; 1379 /* If resets are disabled then leave the HBA alone and return */ 1380 if (!phba->cfg_enable_hba_reset) 1381 return; 1382 1383 /* Send an internal error event to mgmt application */ 1384 lpfc_board_errevt_to_mgmt(phba); 1385 1386 /* For now, the actual action for SLI4 device handling is not 1387 * specified yet, just treated it as adaptor hardware failure 1388 */ 1389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1390 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", 1391 phba->work_status[0], phba->work_status[1]); 1392 1393 event_data = FC_REG_DUMP_EVENT; 1394 shost = lpfc_shost_from_vport(vport); 1395 fc_host_post_vendor_event(shost, fc_get_event_number(), 1396 sizeof(event_data), (char *) &event_data, 1397 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1398 1399 lpfc_sli4_offline_eratt(phba); 1400} 1401 1402/** 1403 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1404 * @phba: pointer to lpfc HBA data structure. 1405 * 1406 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1407 * routine from the API jump table function pointer from the lpfc_hba struct. 1408 * 1409 * Return codes 1410 * 0 - success. 1411 * Any other value - error. 1412 **/ 1413void 1414lpfc_handle_eratt(struct lpfc_hba *phba) 1415{ 1416 (*phba->lpfc_handle_eratt)(phba); 1417} 1418 1419/** 1420 * lpfc_handle_latt - The HBA link event handler 1421 * @phba: pointer to lpfc hba data structure. 1422 * 1423 * This routine is invoked from the worker thread to handle a HBA host 1424 * attention link event. 1425 **/ 1426void 1427lpfc_handle_latt(struct lpfc_hba *phba) 1428{ 1429 struct lpfc_vport *vport = phba->pport; 1430 struct lpfc_sli *psli = &phba->sli; 1431 LPFC_MBOXQ_t *pmb; 1432 volatile uint32_t control; 1433 struct lpfc_dmabuf *mp; 1434 int rc = 0; 1435 1436 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1437 if (!pmb) { 1438 rc = 1; 1439 goto lpfc_handle_latt_err_exit; 1440 } 1441 1442 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1443 if (!mp) { 1444 rc = 2; 1445 goto lpfc_handle_latt_free_pmb; 1446 } 1447 1448 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1449 if (!mp->virt) { 1450 rc = 3; 1451 goto lpfc_handle_latt_free_mp; 1452 } 1453 1454 /* Cleanup any outstanding ELS commands */ 1455 lpfc_els_flush_all_cmd(phba); 1456 1457 psli->slistat.link_event++; 1458 lpfc_read_la(phba, pmb, mp); 1459 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1460 pmb->vport = vport; 1461 /* Block ELS IOCBs until we have processed this mbox command */ 1462 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1463 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1464 if (rc == MBX_NOT_FINISHED) { 1465 rc = 4; 1466 goto lpfc_handle_latt_free_mbuf; 1467 } 1468 1469 /* Clear Link Attention in HA REG */ 1470 spin_lock_irq(&phba->hbalock); 1471 writel(HA_LATT, phba->HAregaddr); 1472 readl(phba->HAregaddr); /* flush */ 1473 spin_unlock_irq(&phba->hbalock); 1474 1475 return; 1476 1477lpfc_handle_latt_free_mbuf: 1478 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1479 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1480lpfc_handle_latt_free_mp: 1481 kfree(mp); 1482lpfc_handle_latt_free_pmb: 1483 mempool_free(pmb, phba->mbox_mem_pool); 1484lpfc_handle_latt_err_exit: 1485 /* Enable Link attention interrupts */ 1486 spin_lock_irq(&phba->hbalock); 1487 psli->sli_flag |= LPFC_PROCESS_LA; 1488 control = readl(phba->HCregaddr); 1489 control |= HC_LAINT_ENA; 1490 writel(control, phba->HCregaddr); 1491 readl(phba->HCregaddr); /* flush */ 1492 1493 /* Clear Link Attention in HA REG */ 1494 writel(HA_LATT, phba->HAregaddr); 1495 readl(phba->HAregaddr); /* flush */ 1496 spin_unlock_irq(&phba->hbalock); 1497 lpfc_linkdown(phba); 1498 phba->link_state = LPFC_HBA_ERROR; 1499 1500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1501 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1502 1503 return; 1504} 1505 1506/** 1507 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1508 * @phba: pointer to lpfc hba data structure. 1509 * @vpd: pointer to the vital product data. 1510 * @len: length of the vital product data in bytes. 1511 * 1512 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1513 * an array of characters. In this routine, the ModelName, ProgramType, and 1514 * ModelDesc, etc. fields of the phba data structure will be populated. 1515 * 1516 * Return codes 1517 * 0 - pointer to the VPD passed in is NULL 1518 * 1 - success 1519 **/ 1520int 1521lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1522{ 1523 uint8_t lenlo, lenhi; 1524 int Length; 1525 int i, j; 1526 int finished = 0; 1527 int index = 0; 1528 1529 if (!vpd) 1530 return 0; 1531 1532 /* Vital Product */ 1533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1534 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1535 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1536 (uint32_t) vpd[3]); 1537 while (!finished && (index < (len - 4))) { 1538 switch (vpd[index]) { 1539 case 0x82: 1540 case 0x91: 1541 index += 1; 1542 lenlo = vpd[index]; 1543 index += 1; 1544 lenhi = vpd[index]; 1545 index += 1; 1546 i = ((((unsigned short)lenhi) << 8) + lenlo); 1547 index += i; 1548 break; 1549 case 0x90: 1550 index += 1; 1551 lenlo = vpd[index]; 1552 index += 1; 1553 lenhi = vpd[index]; 1554 index += 1; 1555 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1556 if (Length > len - index) 1557 Length = len - index; 1558 while (Length > 0) { 1559 /* Look for Serial Number */ 1560 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1561 index += 2; 1562 i = vpd[index]; 1563 index += 1; 1564 j = 0; 1565 Length -= (3+i); 1566 while(i--) { 1567 phba->SerialNumber[j++] = vpd[index++]; 1568 if (j == 31) 1569 break; 1570 } 1571 phba->SerialNumber[j] = 0; 1572 continue; 1573 } 1574 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1575 phba->vpd_flag |= VPD_MODEL_DESC; 1576 index += 2; 1577 i = vpd[index]; 1578 index += 1; 1579 j = 0; 1580 Length -= (3+i); 1581 while(i--) { 1582 phba->ModelDesc[j++] = vpd[index++]; 1583 if (j == 255) 1584 break; 1585 } 1586 phba->ModelDesc[j] = 0; 1587 continue; 1588 } 1589 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1590 phba->vpd_flag |= VPD_MODEL_NAME; 1591 index += 2; 1592 i = vpd[index]; 1593 index += 1; 1594 j = 0; 1595 Length -= (3+i); 1596 while(i--) { 1597 phba->ModelName[j++] = vpd[index++]; 1598 if (j == 79) 1599 break; 1600 } 1601 phba->ModelName[j] = 0; 1602 continue; 1603 } 1604 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1605 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1606 index += 2; 1607 i = vpd[index]; 1608 index += 1; 1609 j = 0; 1610 Length -= (3+i); 1611 while(i--) { 1612 phba->ProgramType[j++] = vpd[index++]; 1613 if (j == 255) 1614 break; 1615 } 1616 phba->ProgramType[j] = 0; 1617 continue; 1618 } 1619 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1620 phba->vpd_flag |= VPD_PORT; 1621 index += 2; 1622 i = vpd[index]; 1623 index += 1; 1624 j = 0; 1625 Length -= (3+i); 1626 while(i--) { 1627 phba->Port[j++] = vpd[index++]; 1628 if (j == 19) 1629 break; 1630 } 1631 phba->Port[j] = 0; 1632 continue; 1633 } 1634 else { 1635 index += 2; 1636 i = vpd[index]; 1637 index += 1; 1638 index += i; 1639 Length -= (3 + i); 1640 } 1641 } 1642 finished = 0; 1643 break; 1644 case 0x78: 1645 finished = 1; 1646 break; 1647 default: 1648 index ++; 1649 break; 1650 } 1651 } 1652 1653 return(1); 1654} 1655 1656/** 1657 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1658 * @phba: pointer to lpfc hba data structure. 1659 * @mdp: pointer to the data structure to hold the derived model name. 1660 * @descp: pointer to the data structure to hold the derived description. 1661 * 1662 * This routine retrieves HBA's description based on its registered PCI device 1663 * ID. The @descp passed into this function points to an array of 256 chars. It 1664 * shall be returned with the model name, maximum speed, and the host bus type. 1665 * The @mdp passed into this function points to an array of 80 chars. When the 1666 * function returns, the @mdp will be filled with the model name. 1667 **/ 1668static void 1669lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1670{ 1671 lpfc_vpd_t *vp; 1672 uint16_t dev_id = phba->pcidev->device; 1673 int max_speed; 1674 int GE = 0; 1675 int oneConnect = 0; /* default is not a oneConnect */ 1676 struct { 1677 char *name; 1678 char *bus; 1679 char *function; 1680 } m = {"<Unknown>", "", ""}; 1681 1682 if (mdp && mdp[0] != '\0' 1683 && descp && descp[0] != '\0') 1684 return; 1685 1686 if (phba->lmt & LMT_10Gb) 1687 max_speed = 10; 1688 else if (phba->lmt & LMT_8Gb) 1689 max_speed = 8; 1690 else if (phba->lmt & LMT_4Gb) 1691 max_speed = 4; 1692 else if (phba->lmt & LMT_2Gb) 1693 max_speed = 2; 1694 else 1695 max_speed = 1; 1696 1697 vp = &phba->vpd; 1698 1699 switch (dev_id) { 1700 case PCI_DEVICE_ID_FIREFLY: 1701 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1702 break; 1703 case PCI_DEVICE_ID_SUPERFLY: 1704 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1705 m = (typeof(m)){"LP7000", "PCI", 1706 "Fibre Channel Adapter"}; 1707 else 1708 m = (typeof(m)){"LP7000E", "PCI", 1709 "Fibre Channel Adapter"}; 1710 break; 1711 case PCI_DEVICE_ID_DRAGONFLY: 1712 m = (typeof(m)){"LP8000", "PCI", 1713 "Fibre Channel Adapter"}; 1714 break; 1715 case PCI_DEVICE_ID_CENTAUR: 1716 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1717 m = (typeof(m)){"LP9002", "PCI", 1718 "Fibre Channel Adapter"}; 1719 else 1720 m = (typeof(m)){"LP9000", "PCI", 1721 "Fibre Channel Adapter"}; 1722 break; 1723 case PCI_DEVICE_ID_RFLY: 1724 m = (typeof(m)){"LP952", "PCI", 1725 "Fibre Channel Adapter"}; 1726 break; 1727 case PCI_DEVICE_ID_PEGASUS: 1728 m = (typeof(m)){"LP9802", "PCI-X", 1729 "Fibre Channel Adapter"}; 1730 break; 1731 case PCI_DEVICE_ID_THOR: 1732 m = (typeof(m)){"LP10000", "PCI-X", 1733 "Fibre Channel Adapter"}; 1734 break; 1735 case PCI_DEVICE_ID_VIPER: 1736 m = (typeof(m)){"LPX1000", "PCI-X", 1737 "Fibre Channel Adapter"}; 1738 break; 1739 case PCI_DEVICE_ID_PFLY: 1740 m = (typeof(m)){"LP982", "PCI-X", 1741 "Fibre Channel Adapter"}; 1742 break; 1743 case PCI_DEVICE_ID_TFLY: 1744 m = (typeof(m)){"LP1050", "PCI-X", 1745 "Fibre Channel Adapter"}; 1746 break; 1747 case PCI_DEVICE_ID_HELIOS: 1748 m = (typeof(m)){"LP11000", "PCI-X2", 1749 "Fibre Channel Adapter"}; 1750 break; 1751 case PCI_DEVICE_ID_HELIOS_SCSP: 1752 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1753 "Fibre Channel Adapter"}; 1754 break; 1755 case PCI_DEVICE_ID_HELIOS_DCSP: 1756 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1757 "Fibre Channel Adapter"}; 1758 break; 1759 case PCI_DEVICE_ID_NEPTUNE: 1760 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1761 break; 1762 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1763 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1764 break; 1765 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1766 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1767 break; 1768 case PCI_DEVICE_ID_BMID: 1769 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1770 break; 1771 case PCI_DEVICE_ID_BSMB: 1772 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1773 break; 1774 case PCI_DEVICE_ID_ZEPHYR: 1775 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1776 break; 1777 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1778 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1779 break; 1780 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1781 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1782 GE = 1; 1783 break; 1784 case PCI_DEVICE_ID_ZMID: 1785 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1786 break; 1787 case PCI_DEVICE_ID_ZSMB: 1788 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1789 break; 1790 case PCI_DEVICE_ID_LP101: 1791 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_LP10000S: 1794 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1795 break; 1796 case PCI_DEVICE_ID_LP11000S: 1797 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1798 break; 1799 case PCI_DEVICE_ID_LPE11000S: 1800 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1801 break; 1802 case PCI_DEVICE_ID_SAT: 1803 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_SAT_MID: 1806 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1807 break; 1808 case PCI_DEVICE_ID_SAT_SMB: 1809 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1810 break; 1811 case PCI_DEVICE_ID_SAT_DCSP: 1812 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1813 break; 1814 case PCI_DEVICE_ID_SAT_SCSP: 1815 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_SAT_S: 1818 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1819 break; 1820 case PCI_DEVICE_ID_HORNET: 1821 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1822 GE = 1; 1823 break; 1824 case PCI_DEVICE_ID_PROTEUS_VF: 1825 m = (typeof(m)){"LPev12000", "PCIe IOV", 1826 "Fibre Channel Adapter"}; 1827 break; 1828 case PCI_DEVICE_ID_PROTEUS_PF: 1829 m = (typeof(m)){"LPev12000", "PCIe IOV", 1830 "Fibre Channel Adapter"}; 1831 break; 1832 case PCI_DEVICE_ID_PROTEUS_S: 1833 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1834 "Fibre Channel Adapter"}; 1835 break; 1836 case PCI_DEVICE_ID_TIGERSHARK: 1837 oneConnect = 1; 1838 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1839 break; 1840 case PCI_DEVICE_ID_TOMCAT: 1841 oneConnect = 1; 1842 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1843 break; 1844 case PCI_DEVICE_ID_FALCON: 1845 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1846 "EmulexSecure Fibre"}; 1847 break; 1848 case PCI_DEVICE_ID_BALIUS: 1849 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1850 "Fibre Channel Adapter"}; 1851 break; 1852 default: 1853 m = (typeof(m)){"Unknown", "", ""}; 1854 break; 1855 } 1856 1857 if (mdp && mdp[0] == '\0') 1858 snprintf(mdp, 79,"%s", m.name); 1859 /* oneConnect hba requires special processing, they are all initiators 1860 * and we put the port number on the end 1861 */ 1862 if (descp && descp[0] == '\0') { 1863 if (oneConnect) 1864 snprintf(descp, 255, 1865 "Emulex OneConnect %s, %s Initiator, Port %s", 1866 m.name, m.function, 1867 phba->Port); 1868 else 1869 snprintf(descp, 255, 1870 "Emulex %s %d%s %s %s", 1871 m.name, max_speed, (GE) ? "GE" : "Gb", 1872 m.bus, m.function); 1873 } 1874} 1875 1876/** 1877 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1878 * @phba: pointer to lpfc hba data structure. 1879 * @pring: pointer to a IOCB ring. 1880 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1881 * 1882 * This routine posts a given number of IOCBs with the associated DMA buffer 1883 * descriptors specified by the cnt argument to the given IOCB ring. 1884 * 1885 * Return codes 1886 * The number of IOCBs NOT able to be posted to the IOCB ring. 1887 **/ 1888int 1889lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1890{ 1891 IOCB_t *icmd; 1892 struct lpfc_iocbq *iocb; 1893 struct lpfc_dmabuf *mp1, *mp2; 1894 1895 cnt += pring->missbufcnt; 1896 1897 /* While there are buffers to post */ 1898 while (cnt > 0) { 1899 /* Allocate buffer for command iocb */ 1900 iocb = lpfc_sli_get_iocbq(phba); 1901 if (iocb == NULL) { 1902 pring->missbufcnt = cnt; 1903 return cnt; 1904 } 1905 icmd = &iocb->iocb; 1906 1907 /* 2 buffers can be posted per command */ 1908 /* Allocate buffer to post */ 1909 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1910 if (mp1) 1911 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1912 if (!mp1 || !mp1->virt) { 1913 kfree(mp1); 1914 lpfc_sli_release_iocbq(phba, iocb); 1915 pring->missbufcnt = cnt; 1916 return cnt; 1917 } 1918 1919 INIT_LIST_HEAD(&mp1->list); 1920 /* Allocate buffer to post */ 1921 if (cnt > 1) { 1922 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1923 if (mp2) 1924 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1925 &mp2->phys); 1926 if (!mp2 || !mp2->virt) { 1927 kfree(mp2); 1928 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1929 kfree(mp1); 1930 lpfc_sli_release_iocbq(phba, iocb); 1931 pring->missbufcnt = cnt; 1932 return cnt; 1933 } 1934 1935 INIT_LIST_HEAD(&mp2->list); 1936 } else { 1937 mp2 = NULL; 1938 } 1939 1940 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1941 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1942 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1943 icmd->ulpBdeCount = 1; 1944 cnt--; 1945 if (mp2) { 1946 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1947 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1948 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1949 cnt--; 1950 icmd->ulpBdeCount = 2; 1951 } 1952 1953 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1954 icmd->ulpLe = 1; 1955 1956 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 1957 IOCB_ERROR) { 1958 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1959 kfree(mp1); 1960 cnt++; 1961 if (mp2) { 1962 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1963 kfree(mp2); 1964 cnt++; 1965 } 1966 lpfc_sli_release_iocbq(phba, iocb); 1967 pring->missbufcnt = cnt; 1968 return cnt; 1969 } 1970 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1971 if (mp2) 1972 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1973 } 1974 pring->missbufcnt = 0; 1975 return 0; 1976} 1977 1978/** 1979 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 1980 * @phba: pointer to lpfc hba data structure. 1981 * 1982 * This routine posts initial receive IOCB buffers to the ELS ring. The 1983 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1984 * set to 64 IOCBs. 1985 * 1986 * Return codes 1987 * 0 - success (currently always success) 1988 **/ 1989static int 1990lpfc_post_rcv_buf(struct lpfc_hba *phba) 1991{ 1992 struct lpfc_sli *psli = &phba->sli; 1993 1994 /* Ring 0, ELS / CT buffers */ 1995 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1996 /* Ring 2 - FCP no buffers needed */ 1997 1998 return 0; 1999} 2000 2001#define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2002 2003/** 2004 * lpfc_sha_init - Set up initial array of hash table entries 2005 * @HashResultPointer: pointer to an array as hash table. 2006 * 2007 * This routine sets up the initial values to the array of hash table entries 2008 * for the LC HBAs. 2009 **/ 2010static void 2011lpfc_sha_init(uint32_t * HashResultPointer) 2012{ 2013 HashResultPointer[0] = 0x67452301; 2014 HashResultPointer[1] = 0xEFCDAB89; 2015 HashResultPointer[2] = 0x98BADCFE; 2016 HashResultPointer[3] = 0x10325476; 2017 HashResultPointer[4] = 0xC3D2E1F0; 2018} 2019 2020/** 2021 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2022 * @HashResultPointer: pointer to an initial/result hash table. 2023 * @HashWorkingPointer: pointer to an working hash table. 2024 * 2025 * This routine iterates an initial hash table pointed by @HashResultPointer 2026 * with the values from the working hash table pointeed by @HashWorkingPointer. 2027 * The results are putting back to the initial hash table, returned through 2028 * the @HashResultPointer as the result hash table. 2029 **/ 2030static void 2031lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2032{ 2033 int t; 2034 uint32_t TEMP; 2035 uint32_t A, B, C, D, E; 2036 t = 16; 2037 do { 2038 HashWorkingPointer[t] = 2039 S(1, 2040 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2041 8] ^ 2042 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2043 } while (++t <= 79); 2044 t = 0; 2045 A = HashResultPointer[0]; 2046 B = HashResultPointer[1]; 2047 C = HashResultPointer[2]; 2048 D = HashResultPointer[3]; 2049 E = HashResultPointer[4]; 2050 2051 do { 2052 if (t < 20) { 2053 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2054 } else if (t < 40) { 2055 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2056 } else if (t < 60) { 2057 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2058 } else { 2059 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2060 } 2061 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2062 E = D; 2063 D = C; 2064 C = S(30, B); 2065 B = A; 2066 A = TEMP; 2067 } while (++t <= 79); 2068 2069 HashResultPointer[0] += A; 2070 HashResultPointer[1] += B; 2071 HashResultPointer[2] += C; 2072 HashResultPointer[3] += D; 2073 HashResultPointer[4] += E; 2074 2075} 2076 2077/** 2078 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2079 * @RandomChallenge: pointer to the entry of host challenge random number array. 2080 * @HashWorking: pointer to the entry of the working hash array. 2081 * 2082 * This routine calculates the working hash array referred by @HashWorking 2083 * from the challenge random numbers associated with the host, referred by 2084 * @RandomChallenge. The result is put into the entry of the working hash 2085 * array and returned by reference through @HashWorking. 2086 **/ 2087static void 2088lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2089{ 2090 *HashWorking = (*RandomChallenge ^ *HashWorking); 2091} 2092 2093/** 2094 * lpfc_hba_init - Perform special handling for LC HBA initialization 2095 * @phba: pointer to lpfc hba data structure. 2096 * @hbainit: pointer to an array of unsigned 32-bit integers. 2097 * 2098 * This routine performs the special handling for LC HBA initialization. 2099 **/ 2100void 2101lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2102{ 2103 int t; 2104 uint32_t *HashWorking; 2105 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2106 2107 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2108 if (!HashWorking) 2109 return; 2110 2111 HashWorking[0] = HashWorking[78] = *pwwnn++; 2112 HashWorking[1] = HashWorking[79] = *pwwnn; 2113 2114 for (t = 0; t < 7; t++) 2115 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2116 2117 lpfc_sha_init(hbainit); 2118 lpfc_sha_iterate(hbainit, HashWorking); 2119 kfree(HashWorking); 2120} 2121 2122/** 2123 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2124 * @vport: pointer to a virtual N_Port data structure. 2125 * 2126 * This routine performs the necessary cleanups before deleting the @vport. 2127 * It invokes the discovery state machine to perform necessary state 2128 * transitions and to release the ndlps associated with the @vport. Note, 2129 * the physical port is treated as @vport 0. 2130 **/ 2131void 2132lpfc_cleanup(struct lpfc_vport *vport) 2133{ 2134 struct lpfc_hba *phba = vport->phba; 2135 struct lpfc_nodelist *ndlp, *next_ndlp; 2136 int i = 0; 2137 2138 if (phba->link_state > LPFC_LINK_DOWN) 2139 lpfc_port_link_failure(vport); 2140 2141 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2142 if (!NLP_CHK_NODE_ACT(ndlp)) { 2143 ndlp = lpfc_enable_node(vport, ndlp, 2144 NLP_STE_UNUSED_NODE); 2145 if (!ndlp) 2146 continue; 2147 spin_lock_irq(&phba->ndlp_lock); 2148 NLP_SET_FREE_REQ(ndlp); 2149 spin_unlock_irq(&phba->ndlp_lock); 2150 /* Trigger the release of the ndlp memory */ 2151 lpfc_nlp_put(ndlp); 2152 continue; 2153 } 2154 spin_lock_irq(&phba->ndlp_lock); 2155 if (NLP_CHK_FREE_REQ(ndlp)) { 2156 /* The ndlp should not be in memory free mode already */ 2157 spin_unlock_irq(&phba->ndlp_lock); 2158 continue; 2159 } else 2160 /* Indicate request for freeing ndlp memory */ 2161 NLP_SET_FREE_REQ(ndlp); 2162 spin_unlock_irq(&phba->ndlp_lock); 2163 2164 if (vport->port_type != LPFC_PHYSICAL_PORT && 2165 ndlp->nlp_DID == Fabric_DID) { 2166 /* Just free up ndlp with Fabric_DID for vports */ 2167 lpfc_nlp_put(ndlp); 2168 continue; 2169 } 2170 2171 if (ndlp->nlp_type & NLP_FABRIC) 2172 lpfc_disc_state_machine(vport, ndlp, NULL, 2173 NLP_EVT_DEVICE_RECOVERY); 2174 2175 lpfc_disc_state_machine(vport, ndlp, NULL, 2176 NLP_EVT_DEVICE_RM); 2177 2178 } 2179 2180 /* At this point, ALL ndlp's should be gone 2181 * because of the previous NLP_EVT_DEVICE_RM. 2182 * Lets wait for this to happen, if needed. 2183 */ 2184 while (!list_empty(&vport->fc_nodes)) { 2185 if (i++ > 3000) { 2186 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2187 "0233 Nodelist not empty\n"); 2188 list_for_each_entry_safe(ndlp, next_ndlp, 2189 &vport->fc_nodes, nlp_listp) { 2190 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2191 LOG_NODE, 2192 "0282 did:x%x ndlp:x%p " 2193 "usgmap:x%x refcnt:%d\n", 2194 ndlp->nlp_DID, (void *)ndlp, 2195 ndlp->nlp_usg_map, 2196 atomic_read( 2197 &ndlp->kref.refcount)); 2198 } 2199 break; 2200 } 2201 2202 /* Wait for any activity on ndlps to settle */ 2203 msleep(10); 2204 } 2205} 2206 2207/** 2208 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2209 * @vport: pointer to a virtual N_Port data structure. 2210 * 2211 * This routine stops all the timers associated with a @vport. This function 2212 * is invoked before disabling or deleting a @vport. Note that the physical 2213 * port is treated as @vport 0. 2214 **/ 2215void 2216lpfc_stop_vport_timers(struct lpfc_vport *vport) 2217{ 2218 del_timer_sync(&vport->els_tmofunc); 2219 del_timer_sync(&vport->fc_fdmitmo); 2220 lpfc_can_disctmo(vport); 2221 return; 2222} 2223 2224/** 2225 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2226 * @phba: pointer to lpfc hba data structure. 2227 * 2228 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2229 * caller of this routine should already hold the host lock. 2230 **/ 2231void 2232__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2233{ 2234 /* Clear pending FCF rediscovery wait and failover in progress flags */ 2235 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | 2236 FCF_DEAD_DISC | 2237 FCF_ACVL_DISC); 2238 /* Now, try to stop the timer */ 2239 del_timer(&phba->fcf.redisc_wait); 2240} 2241 2242/** 2243 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2244 * @phba: pointer to lpfc hba data structure. 2245 * 2246 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2247 * checks whether the FCF rediscovery wait timer is pending with the host 2248 * lock held before proceeding with disabling the timer and clearing the 2249 * wait timer pendig flag. 2250 **/ 2251void 2252lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2253{ 2254 spin_lock_irq(&phba->hbalock); 2255 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2256 /* FCF rediscovery timer already fired or stopped */ 2257 spin_unlock_irq(&phba->hbalock); 2258 return; 2259 } 2260 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2261 spin_unlock_irq(&phba->hbalock); 2262} 2263 2264/** 2265 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2266 * @phba: pointer to lpfc hba data structure. 2267 * 2268 * This routine stops all the timers associated with a HBA. This function is 2269 * invoked before either putting a HBA offline or unloading the driver. 2270 **/ 2271void 2272lpfc_stop_hba_timers(struct lpfc_hba *phba) 2273{ 2274 lpfc_stop_vport_timers(phba->pport); 2275 del_timer_sync(&phba->sli.mbox_tmo); 2276 del_timer_sync(&phba->fabric_block_timer); 2277 del_timer_sync(&phba->eratt_poll); 2278 del_timer_sync(&phba->hb_tmofunc); 2279 phba->hb_outstanding = 0; 2280 2281 switch (phba->pci_dev_grp) { 2282 case LPFC_PCI_DEV_LP: 2283 /* Stop any LightPulse device specific driver timers */ 2284 del_timer_sync(&phba->fcp_poll_timer); 2285 break; 2286 case LPFC_PCI_DEV_OC: 2287 /* Stop any OneConnect device sepcific driver timers */ 2288 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2289 break; 2290 default: 2291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2292 "0297 Invalid device group (x%x)\n", 2293 phba->pci_dev_grp); 2294 break; 2295 } 2296 return; 2297} 2298 2299/** 2300 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2301 * @phba: pointer to lpfc hba data structure. 2302 * 2303 * This routine marks a HBA's management interface as blocked. Once the HBA's 2304 * management interface is marked as blocked, all the user space access to 2305 * the HBA, whether they are from sysfs interface or libdfc interface will 2306 * all be blocked. The HBA is set to block the management interface when the 2307 * driver prepares the HBA interface for online or offline. 2308 **/ 2309static void 2310lpfc_block_mgmt_io(struct lpfc_hba * phba) 2311{ 2312 unsigned long iflag; 2313 uint8_t actcmd = MBX_HEARTBEAT; 2314 unsigned long timeout; 2315 2316 2317 spin_lock_irqsave(&phba->hbalock, iflag); 2318 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2319 if (phba->sli.mbox_active) 2320 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2321 spin_unlock_irqrestore(&phba->hbalock, iflag); 2322 /* Determine how long we might wait for the active mailbox 2323 * command to be gracefully completed by firmware. 2324 */ 2325 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2326 jiffies; 2327 /* Wait for the outstnading mailbox command to complete */ 2328 while (phba->sli.mbox_active) { 2329 /* Check active mailbox complete status every 2ms */ 2330 msleep(2); 2331 if (time_after(jiffies, timeout)) { 2332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2333 "2813 Mgmt IO is Blocked %x " 2334 "- mbox cmd %x still active\n", 2335 phba->sli.sli_flag, actcmd); 2336 break; 2337 } 2338 } 2339} 2340 2341/** 2342 * lpfc_online - Initialize and bring a HBA online 2343 * @phba: pointer to lpfc hba data structure. 2344 * 2345 * This routine initializes the HBA and brings a HBA online. During this 2346 * process, the management interface is blocked to prevent user space access 2347 * to the HBA interfering with the driver initialization. 2348 * 2349 * Return codes 2350 * 0 - successful 2351 * 1 - failed 2352 **/ 2353int 2354lpfc_online(struct lpfc_hba *phba) 2355{ 2356 struct lpfc_vport *vport; 2357 struct lpfc_vport **vports; 2358 int i; 2359 2360 if (!phba) 2361 return 0; 2362 vport = phba->pport; 2363 2364 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2365 return 0; 2366 2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2368 "0458 Bring Adapter online\n"); 2369 2370 lpfc_block_mgmt_io(phba); 2371 2372 if (!lpfc_sli_queue_setup(phba)) { 2373 lpfc_unblock_mgmt_io(phba); 2374 return 1; 2375 } 2376 2377 if (phba->sli_rev == LPFC_SLI_REV4) { 2378 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2379 lpfc_unblock_mgmt_io(phba); 2380 return 1; 2381 } 2382 } else { 2383 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2384 lpfc_unblock_mgmt_io(phba); 2385 return 1; 2386 } 2387 } 2388 2389 vports = lpfc_create_vport_work_array(phba); 2390 if (vports != NULL) 2391 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2392 struct Scsi_Host *shost; 2393 shost = lpfc_shost_from_vport(vports[i]); 2394 spin_lock_irq(shost->host_lock); 2395 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2396 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2397 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2398 if (phba->sli_rev == LPFC_SLI_REV4) 2399 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2400 spin_unlock_irq(shost->host_lock); 2401 } 2402 lpfc_destroy_vport_work_array(phba, vports); 2403 2404 lpfc_unblock_mgmt_io(phba); 2405 return 0; 2406} 2407 2408/** 2409 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2410 * @phba: pointer to lpfc hba data structure. 2411 * 2412 * This routine marks a HBA's management interface as not blocked. Once the 2413 * HBA's management interface is marked as not blocked, all the user space 2414 * access to the HBA, whether they are from sysfs interface or libdfc 2415 * interface will be allowed. The HBA is set to block the management interface 2416 * when the driver prepares the HBA interface for online or offline and then 2417 * set to unblock the management interface afterwards. 2418 **/ 2419void 2420lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2421{ 2422 unsigned long iflag; 2423 2424 spin_lock_irqsave(&phba->hbalock, iflag); 2425 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2426 spin_unlock_irqrestore(&phba->hbalock, iflag); 2427} 2428 2429/** 2430 * lpfc_offline_prep - Prepare a HBA to be brought offline 2431 * @phba: pointer to lpfc hba data structure. 2432 * 2433 * This routine is invoked to prepare a HBA to be brought offline. It performs 2434 * unregistration login to all the nodes on all vports and flushes the mailbox 2435 * queue to make it ready to be brought offline. 2436 **/ 2437void 2438lpfc_offline_prep(struct lpfc_hba * phba) 2439{ 2440 struct lpfc_vport *vport = phba->pport; 2441 struct lpfc_nodelist *ndlp, *next_ndlp; 2442 struct lpfc_vport **vports; 2443 struct Scsi_Host *shost; 2444 int i; 2445 2446 if (vport->fc_flag & FC_OFFLINE_MODE) 2447 return; 2448 2449 lpfc_block_mgmt_io(phba); 2450 2451 lpfc_linkdown(phba); 2452 2453 /* Issue an unreg_login to all nodes on all vports */ 2454 vports = lpfc_create_vport_work_array(phba); 2455 if (vports != NULL) { 2456 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2457 if (vports[i]->load_flag & FC_UNLOADING) 2458 continue; 2459 shost = lpfc_shost_from_vport(vports[i]); 2460 spin_lock_irq(shost->host_lock); 2461 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2462 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2463 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2464 spin_unlock_irq(shost->host_lock); 2465 2466 shost = lpfc_shost_from_vport(vports[i]); 2467 list_for_each_entry_safe(ndlp, next_ndlp, 2468 &vports[i]->fc_nodes, 2469 nlp_listp) { 2470 if (!NLP_CHK_NODE_ACT(ndlp)) 2471 continue; 2472 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2473 continue; 2474 if (ndlp->nlp_type & NLP_FABRIC) { 2475 lpfc_disc_state_machine(vports[i], ndlp, 2476 NULL, NLP_EVT_DEVICE_RECOVERY); 2477 lpfc_disc_state_machine(vports[i], ndlp, 2478 NULL, NLP_EVT_DEVICE_RM); 2479 } 2480 spin_lock_irq(shost->host_lock); 2481 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2482 spin_unlock_irq(shost->host_lock); 2483 lpfc_unreg_rpi(vports[i], ndlp); 2484 } 2485 } 2486 } 2487 lpfc_destroy_vport_work_array(phba, vports); 2488 2489 lpfc_sli_mbox_sys_shutdown(phba); 2490} 2491 2492/** 2493 * lpfc_offline - Bring a HBA offline 2494 * @phba: pointer to lpfc hba data structure. 2495 * 2496 * This routine actually brings a HBA offline. It stops all the timers 2497 * associated with the HBA, brings down the SLI layer, and eventually 2498 * marks the HBA as in offline state for the upper layer protocol. 2499 **/ 2500void 2501lpfc_offline(struct lpfc_hba *phba) 2502{ 2503 struct Scsi_Host *shost; 2504 struct lpfc_vport **vports; 2505 int i; 2506 2507 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2508 return; 2509 2510 /* stop port and all timers associated with this hba */ 2511 lpfc_stop_port(phba); 2512 vports = lpfc_create_vport_work_array(phba); 2513 if (vports != NULL) 2514 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2515 lpfc_stop_vport_timers(vports[i]); 2516 lpfc_destroy_vport_work_array(phba, vports); 2517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2518 "0460 Bring Adapter offline\n"); 2519 /* Bring down the SLI Layer and cleanup. The HBA is offline 2520 now. */ 2521 lpfc_sli_hba_down(phba); 2522 spin_lock_irq(&phba->hbalock); 2523 phba->work_ha = 0; 2524 spin_unlock_irq(&phba->hbalock); 2525 vports = lpfc_create_vport_work_array(phba); 2526 if (vports != NULL) 2527 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2528 shost = lpfc_shost_from_vport(vports[i]); 2529 spin_lock_irq(shost->host_lock); 2530 vports[i]->work_port_events = 0; 2531 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2532 spin_unlock_irq(shost->host_lock); 2533 } 2534 lpfc_destroy_vport_work_array(phba, vports); 2535} 2536 2537/** 2538 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2539 * @phba: pointer to lpfc hba data structure. 2540 * 2541 * This routine is to free all the SCSI buffers and IOCBs from the driver 2542 * list back to kernel. It is called from lpfc_pci_remove_one to free 2543 * the internal resources before the device is removed from the system. 2544 * 2545 * Return codes 2546 * 0 - successful (for now, it always returns 0) 2547 **/ 2548static int 2549lpfc_scsi_free(struct lpfc_hba *phba) 2550{ 2551 struct lpfc_scsi_buf *sb, *sb_next; 2552 struct lpfc_iocbq *io, *io_next; 2553 2554 spin_lock_irq(&phba->hbalock); 2555 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2556 spin_lock(&phba->scsi_buf_list_lock); 2557 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2558 list_del(&sb->list); 2559 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2560 sb->dma_handle); 2561 kfree(sb); 2562 phba->total_scsi_bufs--; 2563 } 2564 spin_unlock(&phba->scsi_buf_list_lock); 2565 2566 /* Release all the lpfc_iocbq entries maintained by this host. */ 2567 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2568 list_del(&io->list); 2569 kfree(io); 2570 phba->total_iocbq_bufs--; 2571 } 2572 spin_unlock_irq(&phba->hbalock); 2573 return 0; 2574} 2575 2576/** 2577 * lpfc_create_port - Create an FC port 2578 * @phba: pointer to lpfc hba data structure. 2579 * @instance: a unique integer ID to this FC port. 2580 * @dev: pointer to the device data structure. 2581 * 2582 * This routine creates a FC port for the upper layer protocol. The FC port 2583 * can be created on top of either a physical port or a virtual port provided 2584 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2585 * and associates the FC port created before adding the shost into the SCSI 2586 * layer. 2587 * 2588 * Return codes 2589 * @vport - pointer to the virtual N_Port data structure. 2590 * NULL - port create failed. 2591 **/ 2592struct lpfc_vport * 2593lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2594{ 2595 struct lpfc_vport *vport; 2596 struct Scsi_Host *shost; 2597 int error = 0; 2598 2599 if (dev != &phba->pcidev->dev) 2600 shost = scsi_host_alloc(&lpfc_vport_template, 2601 sizeof(struct lpfc_vport)); 2602 else 2603 shost = scsi_host_alloc(&lpfc_template, 2604 sizeof(struct lpfc_vport)); 2605 if (!shost) 2606 goto out; 2607 2608 vport = (struct lpfc_vport *) shost->hostdata; 2609 vport->phba = phba; 2610 vport->load_flag |= FC_LOADING; 2611 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2612 vport->fc_rscn_flush = 0; 2613 2614 lpfc_get_vport_cfgparam(vport); 2615 shost->unique_id = instance; 2616 shost->max_id = LPFC_MAX_TARGET; 2617 shost->max_lun = vport->cfg_max_luns; 2618 shost->this_id = -1; 2619 shost->max_cmd_len = 16; 2620 if (phba->sli_rev == LPFC_SLI_REV4) { 2621 shost->dma_boundary = 2622 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2623 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2624 } 2625 2626 /* 2627 * Set initial can_queue value since 0 is no longer supported and 2628 * scsi_add_host will fail. This will be adjusted later based on the 2629 * max xri value determined in hba setup. 2630 */ 2631 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2632 if (dev != &phba->pcidev->dev) { 2633 shost->transportt = lpfc_vport_transport_template; 2634 vport->port_type = LPFC_NPIV_PORT; 2635 } else { 2636 shost->transportt = lpfc_transport_template; 2637 vport->port_type = LPFC_PHYSICAL_PORT; 2638 } 2639 2640 /* Initialize all internally managed lists. */ 2641 INIT_LIST_HEAD(&vport->fc_nodes); 2642 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2643 spin_lock_init(&vport->work_port_lock); 2644 2645 init_timer(&vport->fc_disctmo); 2646 vport->fc_disctmo.function = lpfc_disc_timeout; 2647 vport->fc_disctmo.data = (unsigned long)vport; 2648 2649 init_timer(&vport->fc_fdmitmo); 2650 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2651 vport->fc_fdmitmo.data = (unsigned long)vport; 2652 2653 init_timer(&vport->els_tmofunc); 2654 vport->els_tmofunc.function = lpfc_els_timeout; 2655 vport->els_tmofunc.data = (unsigned long)vport; 2656 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2657 if (error) 2658 goto out_put_shost; 2659 2660 spin_lock_irq(&phba->hbalock); 2661 list_add_tail(&vport->listentry, &phba->port_list); 2662 spin_unlock_irq(&phba->hbalock); 2663 return vport; 2664 2665out_put_shost: 2666 scsi_host_put(shost); 2667out: 2668 return NULL; 2669} 2670 2671/** 2672 * destroy_port - destroy an FC port 2673 * @vport: pointer to an lpfc virtual N_Port data structure. 2674 * 2675 * This routine destroys a FC port from the upper layer protocol. All the 2676 * resources associated with the port are released. 2677 **/ 2678void 2679destroy_port(struct lpfc_vport *vport) 2680{ 2681 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2682 struct lpfc_hba *phba = vport->phba; 2683 2684 lpfc_debugfs_terminate(vport); 2685 fc_remove_host(shost); 2686 scsi_remove_host(shost); 2687 2688 spin_lock_irq(&phba->hbalock); 2689 list_del_init(&vport->listentry); 2690 spin_unlock_irq(&phba->hbalock); 2691 2692 lpfc_cleanup(vport); 2693 return; 2694} 2695 2696/** 2697 * lpfc_get_instance - Get a unique integer ID 2698 * 2699 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2700 * uses the kernel idr facility to perform the task. 2701 * 2702 * Return codes: 2703 * instance - a unique integer ID allocated as the new instance. 2704 * -1 - lpfc get instance failed. 2705 **/ 2706int 2707lpfc_get_instance(void) 2708{ 2709 int instance = 0; 2710 2711 /* Assign an unused number */ 2712 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2713 return -1; 2714 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2715 return -1; 2716 return instance; 2717} 2718 2719/** 2720 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2721 * @shost: pointer to SCSI host data structure. 2722 * @time: elapsed time of the scan in jiffies. 2723 * 2724 * This routine is called by the SCSI layer with a SCSI host to determine 2725 * whether the scan host is finished. 2726 * 2727 * Note: there is no scan_start function as adapter initialization will have 2728 * asynchronously kicked off the link initialization. 2729 * 2730 * Return codes 2731 * 0 - SCSI host scan is not over yet. 2732 * 1 - SCSI host scan is over. 2733 **/ 2734int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2735{ 2736 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2737 struct lpfc_hba *phba = vport->phba; 2738 int stat = 0; 2739 2740 spin_lock_irq(shost->host_lock); 2741 2742 if (vport->load_flag & FC_UNLOADING) { 2743 stat = 1; 2744 goto finished; 2745 } 2746 if (time >= 30 * HZ) { 2747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2748 "0461 Scanning longer than 30 " 2749 "seconds. Continuing initialization\n"); 2750 stat = 1; 2751 goto finished; 2752 } 2753 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2755 "0465 Link down longer than 15 " 2756 "seconds. Continuing initialization\n"); 2757 stat = 1; 2758 goto finished; 2759 } 2760 2761 if (vport->port_state != LPFC_VPORT_READY) 2762 goto finished; 2763 if (vport->num_disc_nodes || vport->fc_prli_sent) 2764 goto finished; 2765 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2766 goto finished; 2767 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2768 goto finished; 2769 2770 stat = 1; 2771 2772finished: 2773 spin_unlock_irq(shost->host_lock); 2774 return stat; 2775} 2776 2777/** 2778 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2779 * @shost: pointer to SCSI host data structure. 2780 * 2781 * This routine initializes a given SCSI host attributes on a FC port. The 2782 * SCSI host can be either on top of a physical port or a virtual port. 2783 **/ 2784void lpfc_host_attrib_init(struct Scsi_Host *shost) 2785{ 2786 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2787 struct lpfc_hba *phba = vport->phba; 2788 /* 2789 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2790 */ 2791 2792 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2793 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2794 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2795 2796 memset(fc_host_supported_fc4s(shost), 0, 2797 sizeof(fc_host_supported_fc4s(shost))); 2798 fc_host_supported_fc4s(shost)[2] = 1; 2799 fc_host_supported_fc4s(shost)[7] = 1; 2800 2801 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2802 sizeof fc_host_symbolic_name(shost)); 2803 2804 fc_host_supported_speeds(shost) = 0; 2805 if (phba->lmt & LMT_10Gb) 2806 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2807 if (phba->lmt & LMT_8Gb) 2808 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2809 if (phba->lmt & LMT_4Gb) 2810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2811 if (phba->lmt & LMT_2Gb) 2812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2813 if (phba->lmt & LMT_1Gb) 2814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2815 2816 fc_host_maxframe_size(shost) = 2817 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2818 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2819 2820 /* This value is also unchanging */ 2821 memset(fc_host_active_fc4s(shost), 0, 2822 sizeof(fc_host_active_fc4s(shost))); 2823 fc_host_active_fc4s(shost)[2] = 1; 2824 fc_host_active_fc4s(shost)[7] = 1; 2825 2826 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2827 spin_lock_irq(shost->host_lock); 2828 vport->load_flag &= ~FC_LOADING; 2829 spin_unlock_irq(shost->host_lock); 2830} 2831 2832/** 2833 * lpfc_stop_port_s3 - Stop SLI3 device port 2834 * @phba: pointer to lpfc hba data structure. 2835 * 2836 * This routine is invoked to stop an SLI3 device port, it stops the device 2837 * from generating interrupts and stops the device driver's timers for the 2838 * device. 2839 **/ 2840static void 2841lpfc_stop_port_s3(struct lpfc_hba *phba) 2842{ 2843 /* Clear all interrupt enable conditions */ 2844 writel(0, phba->HCregaddr); 2845 readl(phba->HCregaddr); /* flush */ 2846 /* Clear all pending interrupts */ 2847 writel(0xffffffff, phba->HAregaddr); 2848 readl(phba->HAregaddr); /* flush */ 2849 2850 /* Reset some HBA SLI setup states */ 2851 lpfc_stop_hba_timers(phba); 2852 phba->pport->work_port_events = 0; 2853} 2854 2855/** 2856 * lpfc_stop_port_s4 - Stop SLI4 device port 2857 * @phba: pointer to lpfc hba data structure. 2858 * 2859 * This routine is invoked to stop an SLI4 device port, it stops the device 2860 * from generating interrupts and stops the device driver's timers for the 2861 * device. 2862 **/ 2863static void 2864lpfc_stop_port_s4(struct lpfc_hba *phba) 2865{ 2866 /* Reset some HBA SLI4 setup states */ 2867 lpfc_stop_hba_timers(phba); 2868 phba->pport->work_port_events = 0; 2869 phba->sli4_hba.intr_enable = 0; 2870} 2871 2872/** 2873 * lpfc_stop_port - Wrapper function for stopping hba port 2874 * @phba: Pointer to HBA context object. 2875 * 2876 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2877 * the API jump table function pointer from the lpfc_hba struct. 2878 **/ 2879void 2880lpfc_stop_port(struct lpfc_hba *phba) 2881{ 2882 phba->lpfc_stop_port(phba); 2883} 2884 2885/** 2886 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. 2887 * @phba: pointer to lpfc hba data structure. 2888 * 2889 * This routine is invoked to remove the driver default fcf record from 2890 * the port. This routine currently acts on FCF Index 0. 2891 * 2892 **/ 2893void 2894lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) 2895{ 2896 int rc = 0; 2897 LPFC_MBOXQ_t *mboxq; 2898 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; 2899 uint32_t mbox_tmo, req_len; 2900 uint32_t shdr_status, shdr_add_status; 2901 2902 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2903 if (!mboxq) { 2904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2905 "2020 Failed to allocate mbox for ADD_FCF cmd\n"); 2906 return; 2907 } 2908 2909 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - 2910 sizeof(struct lpfc_sli4_cfg_mhdr); 2911 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2912 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, 2913 req_len, LPFC_SLI4_MBX_EMBED); 2914 /* 2915 * In phase 1, there is a single FCF index, 0. In phase2, the driver 2916 * supports multiple FCF indices. 2917 */ 2918 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; 2919 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); 2920 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, 2921 phba->fcf.current_rec.fcf_indx); 2922 2923 if (!phba->sli4_hba.intr_enable) 2924 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 2925 else { 2926 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 2927 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 2928 } 2929 /* The IOCTL status is embedded in the mailbox subheader. */ 2930 shdr_status = bf_get(lpfc_mbox_hdr_status, 2931 &del_fcf_record->header.cfg_shdr.response); 2932 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 2933 &del_fcf_record->header.cfg_shdr.response); 2934 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 2935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2936 "2516 DEL FCF of default FCF Index failed " 2937 "mbx status x%x, status x%x add_status x%x\n", 2938 rc, shdr_status, shdr_add_status); 2939 } 2940 if (rc != MBX_TIMEOUT) 2941 mempool_free(mboxq, phba->mbox_mem_pool); 2942} 2943 2944/** 2945 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2946 * @phba: Pointer to hba for which this call is being executed. 2947 * 2948 * This routine starts the timer waiting for the FCF rediscovery to complete. 2949 **/ 2950void 2951lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2952{ 2953 unsigned long fcf_redisc_wait_tmo = 2954 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2955 /* Start fcf rediscovery wait period timer */ 2956 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2957 spin_lock_irq(&phba->hbalock); 2958 /* Allow action to new fcf asynchronous event */ 2959 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2960 /* Mark the FCF rediscovery pending state */ 2961 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2962 spin_unlock_irq(&phba->hbalock); 2963} 2964 2965/** 2966 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2967 * @ptr: Map to lpfc_hba data structure pointer. 2968 * 2969 * This routine is invoked when waiting for FCF table rediscover has been 2970 * timed out. If new FCF record(s) has (have) been discovered during the 2971 * wait period, a new FCF event shall be added to the FCOE async event 2972 * list, and then worker thread shall be waked up for processing from the 2973 * worker thread context. 2974 **/ 2975void 2976lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2977{ 2978 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2979 2980 /* Don't send FCF rediscovery event if timer cancelled */ 2981 spin_lock_irq(&phba->hbalock); 2982 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2983 spin_unlock_irq(&phba->hbalock); 2984 return; 2985 } 2986 /* Clear FCF rediscovery timer pending flag */ 2987 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2988 /* FCF rediscovery event to worker thread */ 2989 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2990 spin_unlock_irq(&phba->hbalock); 2991 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2992 "2776 FCF rediscover wait timer expired, post " 2993 "a worker thread event for FCF table scan\n"); 2994 /* wake up worker thread */ 2995 lpfc_worker_wake_up(phba); 2996} 2997 2998/** 2999 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support 3000 * @phba: pointer to lpfc hba data structure. 3001 * 3002 * This function uses the QUERY_FW_CFG mailbox command to determine if the 3003 * firmware loaded supports FCoE. A return of zero indicates that the mailbox 3004 * was successful and the firmware supports FCoE. Any other return indicates 3005 * a error. It is assumed that this function will be called before interrupts 3006 * are enabled. 3007 **/ 3008static int 3009lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) 3010{ 3011 int rc = 0; 3012 LPFC_MBOXQ_t *mboxq; 3013 struct lpfc_mbx_query_fw_cfg *query_fw_cfg; 3014 uint32_t length; 3015 uint32_t shdr_status, shdr_add_status; 3016 3017 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3018 if (!mboxq) { 3019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3020 "2621 Failed to allocate mbox for " 3021 "query firmware config cmd\n"); 3022 return -ENOMEM; 3023 } 3024 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; 3025 length = (sizeof(struct lpfc_mbx_query_fw_cfg) - 3026 sizeof(struct lpfc_sli4_cfg_mhdr)); 3027 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 3028 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 3029 length, LPFC_SLI4_MBX_EMBED); 3030 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 3031 /* The IOCTL status is embedded in the mailbox subheader. */ 3032 shdr_status = bf_get(lpfc_mbox_hdr_status, 3033 &query_fw_cfg->header.cfg_shdr.response); 3034 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 3035 &query_fw_cfg->header.cfg_shdr.response); 3036 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { 3037 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3038 "2622 Query Firmware Config failed " 3039 "mbx status x%x, status x%x add_status x%x\n", 3040 rc, shdr_status, shdr_add_status); 3041 return -EINVAL; 3042 } 3043 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { 3044 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3045 "2623 FCoE Function not supported by firmware. " 3046 "Function mode = %08x\n", 3047 query_fw_cfg->function_mode); 3048 return -EINVAL; 3049 } 3050 if (rc != MBX_TIMEOUT) 3051 mempool_free(mboxq, phba->mbox_mem_pool); 3052 return 0; 3053} 3054 3055/** 3056 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3057 * @phba: pointer to lpfc hba data structure. 3058 * @acqe_link: pointer to the async link completion queue entry. 3059 * 3060 * This routine is to parse the SLI4 link-attention link fault code and 3061 * translate it into the base driver's read link attention mailbox command 3062 * status. 3063 * 3064 * Return: Link-attention status in terms of base driver's coding. 3065 **/ 3066static uint16_t 3067lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3068 struct lpfc_acqe_link *acqe_link) 3069{ 3070 uint16_t latt_fault; 3071 3072 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3073 case LPFC_ASYNC_LINK_FAULT_NONE: 3074 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3075 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3076 latt_fault = 0; 3077 break; 3078 default: 3079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3080 "0398 Invalid link fault code: x%x\n", 3081 bf_get(lpfc_acqe_link_fault, acqe_link)); 3082 latt_fault = MBXERR_ERROR; 3083 break; 3084 } 3085 return latt_fault; 3086} 3087 3088/** 3089 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3090 * @phba: pointer to lpfc hba data structure. 3091 * @acqe_link: pointer to the async link completion queue entry. 3092 * 3093 * This routine is to parse the SLI4 link attention type and translate it 3094 * into the base driver's link attention type coding. 3095 * 3096 * Return: Link attention type in terms of base driver's coding. 3097 **/ 3098static uint8_t 3099lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3100 struct lpfc_acqe_link *acqe_link) 3101{ 3102 uint8_t att_type; 3103 3104 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3105 case LPFC_ASYNC_LINK_STATUS_DOWN: 3106 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3107 att_type = AT_LINK_DOWN; 3108 break; 3109 case LPFC_ASYNC_LINK_STATUS_UP: 3110 /* Ignore physical link up events - wait for logical link up */ 3111 att_type = AT_RESERVED; 3112 break; 3113 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3114 att_type = AT_LINK_UP; 3115 break; 3116 default: 3117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3118 "0399 Invalid link attention type: x%x\n", 3119 bf_get(lpfc_acqe_link_status, acqe_link)); 3120 att_type = AT_RESERVED; 3121 break; 3122 } 3123 return att_type; 3124} 3125 3126/** 3127 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3128 * @phba: pointer to lpfc hba data structure. 3129 * @acqe_link: pointer to the async link completion queue entry. 3130 * 3131 * This routine is to parse the SLI4 link-attention link speed and translate 3132 * it into the base driver's link-attention link speed coding. 3133 * 3134 * Return: Link-attention link speed in terms of base driver's coding. 3135 **/ 3136static uint8_t 3137lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3138 struct lpfc_acqe_link *acqe_link) 3139{ 3140 uint8_t link_speed; 3141 3142 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3143 case LPFC_ASYNC_LINK_SPEED_ZERO: 3144 link_speed = LA_UNKNW_LINK; 3145 break; 3146 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3147 link_speed = LA_UNKNW_LINK; 3148 break; 3149 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3150 link_speed = LA_UNKNW_LINK; 3151 break; 3152 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3153 link_speed = LA_1GHZ_LINK; 3154 break; 3155 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3156 link_speed = LA_10GHZ_LINK; 3157 break; 3158 default: 3159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3160 "0483 Invalid link-attention link speed: x%x\n", 3161 bf_get(lpfc_acqe_link_speed, acqe_link)); 3162 link_speed = LA_UNKNW_LINK; 3163 break; 3164 } 3165 return link_speed; 3166} 3167 3168/** 3169 * lpfc_sli4_async_link_evt - Process the asynchronous link event 3170 * @phba: pointer to lpfc hba data structure. 3171 * @acqe_link: pointer to the async link completion queue entry. 3172 * 3173 * This routine is to handle the SLI4 asynchronous link event. 3174 **/ 3175static void 3176lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3177 struct lpfc_acqe_link *acqe_link) 3178{ 3179 struct lpfc_dmabuf *mp; 3180 LPFC_MBOXQ_t *pmb; 3181 MAILBOX_t *mb; 3182 READ_LA_VAR *la; 3183 uint8_t att_type; 3184 3185 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3186 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 3187 return; 3188 phba->fcoe_eventtag = acqe_link->event_tag; 3189 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3190 if (!pmb) { 3191 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3192 "0395 The mboxq allocation failed\n"); 3193 return; 3194 } 3195 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3196 if (!mp) { 3197 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3198 "0396 The lpfc_dmabuf allocation failed\n"); 3199 goto out_free_pmb; 3200 } 3201 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3202 if (!mp->virt) { 3203 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3204 "0397 The mbuf allocation failed\n"); 3205 goto out_free_dmabuf; 3206 } 3207 3208 /* Cleanup any outstanding ELS commands */ 3209 lpfc_els_flush_all_cmd(phba); 3210 3211 /* Block ELS IOCBs until we have done process link event */ 3212 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3213 3214 /* Update link event statistics */ 3215 phba->sli.slistat.link_event++; 3216 3217 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ 3218 lpfc_read_la(phba, pmb, mp); 3219 pmb->vport = phba->pport; 3220 3221 /* Parse and translate status field */ 3222 mb = &pmb->u.mb; 3223 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3224 3225 /* Parse and translate link attention fields */ 3226 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 3227 la->eventTag = acqe_link->event_tag; 3228 la->attType = att_type; 3229 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); 3230 3231 /* Fake the the following irrelvant fields */ 3232 la->topology = TOPOLOGY_PT_PT; 3233 la->granted_AL_PA = 0; 3234 la->il = 0; 3235 la->pb = 0; 3236 la->fa = 0; 3237 la->mm = 0; 3238 3239 /* Keep the link status for extra SLI4 state machine reference */ 3240 phba->sli4_hba.link_state.speed = 3241 bf_get(lpfc_acqe_link_speed, acqe_link); 3242 phba->sli4_hba.link_state.duplex = 3243 bf_get(lpfc_acqe_link_duplex, acqe_link); 3244 phba->sli4_hba.link_state.status = 3245 bf_get(lpfc_acqe_link_status, acqe_link); 3246 phba->sli4_hba.link_state.physical = 3247 bf_get(lpfc_acqe_link_physical, acqe_link); 3248 phba->sli4_hba.link_state.fault = 3249 bf_get(lpfc_acqe_link_fault, acqe_link); 3250 phba->sli4_hba.link_state.logical_speed = 3251 bf_get(lpfc_acqe_qos_link_speed, acqe_link); 3252 3253 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3254 lpfc_mbx_cmpl_read_la(phba, pmb); 3255 3256 return; 3257 3258out_free_dmabuf: 3259 kfree(mp); 3260out_free_pmb: 3261 mempool_free(pmb, phba->mbox_mem_pool); 3262} 3263 3264/** 3265 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3266 * @vport: pointer to vport data structure. 3267 * 3268 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3269 * response to a CVL event. 3270 * 3271 * Return the pointer to the ndlp with the vport if successful, otherwise 3272 * return NULL. 3273 **/ 3274static struct lpfc_nodelist * 3275lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3276{ 3277 struct lpfc_nodelist *ndlp; 3278 struct Scsi_Host *shost; 3279 struct lpfc_hba *phba; 3280 3281 if (!vport) 3282 return NULL; 3283 phba = vport->phba; 3284 if (!phba) 3285 return NULL; 3286 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3287 if (!ndlp) { 3288 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3289 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3290 if (!ndlp) 3291 return 0; 3292 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3293 /* Set the node type */ 3294 ndlp->nlp_type |= NLP_FABRIC; 3295 /* Put ndlp onto node list */ 3296 lpfc_enqueue_node(vport, ndlp); 3297 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3298 /* re-setup ndlp without removing from node list */ 3299 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3300 if (!ndlp) 3301 return 0; 3302 } 3303 if (phba->pport->port_state < LPFC_FLOGI) 3304 return NULL; 3305 /* If virtual link is not yet instantiated ignore CVL */ 3306 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) 3307 return NULL; 3308 shost = lpfc_shost_from_vport(vport); 3309 if (!shost) 3310 return NULL; 3311 lpfc_linkdown_port(vport); 3312 lpfc_cleanup_pending_mbox(vport); 3313 spin_lock_irq(shost->host_lock); 3314 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3315 spin_unlock_irq(shost->host_lock); 3316 3317 return ndlp; 3318} 3319 3320/** 3321 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3322 * @vport: pointer to lpfc hba data structure. 3323 * 3324 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3325 * response to a FCF dead event. 3326 **/ 3327static void 3328lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3329{ 3330 struct lpfc_vport **vports; 3331 int i; 3332 3333 vports = lpfc_create_vport_work_array(phba); 3334 if (vports) 3335 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3336 lpfc_sli4_perform_vport_cvl(vports[i]); 3337 lpfc_destroy_vport_work_array(phba, vports); 3338} 3339 3340/** 3341 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3342 * @phba: pointer to lpfc hba data structure. 3343 * @acqe_link: pointer to the async fcoe completion queue entry. 3344 * 3345 * This routine is to handle the SLI4 asynchronous fcoe event. 3346 **/ 3347static void 3348lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, 3349 struct lpfc_acqe_fcoe *acqe_fcoe) 3350{ 3351 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3352 int rc; 3353 struct lpfc_vport *vport; 3354 struct lpfc_nodelist *ndlp; 3355 struct Scsi_Host *shost; 3356 int active_vlink_present; 3357 struct lpfc_vport **vports; 3358 int i; 3359 3360 phba->fc_eventTag = acqe_fcoe->event_tag; 3361 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3362 switch (event_type) { 3363 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3364 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3365 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) 3366 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3367 LOG_DISCOVERY, 3368 "2546 New FCF found event: " 3369 "evt_tag:x%x, fcf_index:x%x\n", 3370 acqe_fcoe->event_tag, 3371 acqe_fcoe->index); 3372 else 3373 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3374 LOG_DISCOVERY, 3375 "2788 FCF parameter modified event: " 3376 "evt_tag:x%x, fcf_index:x%x\n", 3377 acqe_fcoe->event_tag, 3378 acqe_fcoe->index); 3379 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3380 /* 3381 * During period of FCF discovery, read the FCF 3382 * table record indexed by the event to update 3383 * FCF round robin failover eligible FCF bmask. 3384 */ 3385 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3386 LOG_DISCOVERY, 3387 "2779 Read new FCF record with " 3388 "fcf_index:x%x for updating FCF " 3389 "round robin failover bmask\n", 3390 acqe_fcoe->index); 3391 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); 3392 } 3393 3394 /* If the FCF discovery is in progress, do nothing. */ 3395 spin_lock_irq(&phba->hbalock); 3396 if (phba->hba_flag & FCF_DISC_INPROGRESS) { 3397 spin_unlock_irq(&phba->hbalock); 3398 break; 3399 } 3400 /* If fast FCF failover rescan event is pending, do nothing */ 3401 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3402 spin_unlock_irq(&phba->hbalock); 3403 break; 3404 } 3405 3406 /* If the FCF has been in discovered state, do nothing. */ 3407 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3408 spin_unlock_irq(&phba->hbalock); 3409 break; 3410 } 3411 spin_unlock_irq(&phba->hbalock); 3412 3413 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3414 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3415 "2770 Start FCF table scan due to new FCF " 3416 "event: evt_tag:x%x, fcf_index:x%x\n", 3417 acqe_fcoe->event_tag, acqe_fcoe->index); 3418 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3419 LPFC_FCOE_FCF_GET_FIRST); 3420 if (rc) 3421 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3422 "2547 Issue FCF scan read FCF mailbox " 3423 "command failed 0x%x\n", rc); 3424 break; 3425 3426 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3428 "2548 FCF Table full count 0x%x tag 0x%x\n", 3429 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 3430 acqe_fcoe->event_tag); 3431 break; 3432 3433 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3434 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3435 "2549 FCF disconnected from network index 0x%x" 3436 " tag 0x%x\n", acqe_fcoe->index, 3437 acqe_fcoe->event_tag); 3438 /* 3439 * If we are in the middle of FCF failover process, clear 3440 * the corresponding FCF bit in the roundrobin bitmap. 3441 */ 3442 spin_lock_irq(&phba->hbalock); 3443 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3444 spin_unlock_irq(&phba->hbalock); 3445 /* Update FLOGI FCF failover eligible FCF bmask */ 3446 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); 3447 break; 3448 } 3449 spin_unlock_irq(&phba->hbalock); 3450 3451 /* If the event is not for currently used fcf do nothing */ 3452 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3453 break; 3454 3455 /* 3456 * Otherwise, request the port to rediscover the entire FCF 3457 * table for a fast recovery from case that the current FCF 3458 * is no longer valid as we are not in the middle of FCF 3459 * failover process already. 3460 */ 3461 spin_lock_irq(&phba->hbalock); 3462 /* Mark the fast failover process in progress */ 3463 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3464 spin_unlock_irq(&phba->hbalock); 3465 3466 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3467 "2771 Start FCF fast failover process due to " 3468 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3469 "\n", acqe_fcoe->event_tag, acqe_fcoe->index); 3470 rc = lpfc_sli4_redisc_fcf_table(phba); 3471 if (rc) { 3472 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3473 LOG_DISCOVERY, 3474 "2772 Issue FCF rediscover mabilbox " 3475 "command failed, fail through to FCF " 3476 "dead event\n"); 3477 spin_lock_irq(&phba->hbalock); 3478 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3479 spin_unlock_irq(&phba->hbalock); 3480 /* 3481 * Last resort will fail over by treating this 3482 * as a link down to FCF registration. 3483 */ 3484 lpfc_sli4_fcf_dead_failthrough(phba); 3485 } else { 3486 /* Reset FCF roundrobin bmask for new discovery */ 3487 memset(phba->fcf.fcf_rr_bmask, 0, 3488 sizeof(*phba->fcf.fcf_rr_bmask)); 3489 /* 3490 * Handling fast FCF failover to a DEAD FCF event is 3491 * considered equalivant to receiving CVL to all vports. 3492 */ 3493 lpfc_sli4_perform_all_vport_cvl(phba); 3494 } 3495 break; 3496 case LPFC_FCOE_EVENT_TYPE_CVL: 3497 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3498 "2718 Clear Virtual Link Received for VPI 0x%x" 3499 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3500 vport = lpfc_find_vport_by_vpid(phba, 3501 acqe_fcoe->index - phba->vpi_base); 3502 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3503 if (!ndlp) 3504 break; 3505 active_vlink_present = 0; 3506 3507 vports = lpfc_create_vport_work_array(phba); 3508 if (vports) { 3509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3510 i++) { 3511 if ((!(vports[i]->fc_flag & 3512 FC_VPORT_CVL_RCVD)) && 3513 (vports[i]->port_state > LPFC_FDISC)) { 3514 active_vlink_present = 1; 3515 break; 3516 } 3517 } 3518 lpfc_destroy_vport_work_array(phba, vports); 3519 } 3520 3521 if (active_vlink_present) { 3522 /* 3523 * If there are other active VLinks present, 3524 * re-instantiate the Vlink using FDISC. 3525 */ 3526 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3527 shost = lpfc_shost_from_vport(vport); 3528 spin_lock_irq(shost->host_lock); 3529 ndlp->nlp_flag |= NLP_DELAY_TMO; 3530 spin_unlock_irq(shost->host_lock); 3531 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3532 vport->port_state = LPFC_FDISC; 3533 } else { 3534 /* 3535 * Otherwise, we request port to rediscover 3536 * the entire FCF table for a fast recovery 3537 * from possible case that the current FCF 3538 * is no longer valid if we are not already 3539 * in the FCF failover process. 3540 */ 3541 spin_lock_irq(&phba->hbalock); 3542 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3543 spin_unlock_irq(&phba->hbalock); 3544 break; 3545 } 3546 /* Mark the fast failover process in progress */ 3547 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3548 spin_unlock_irq(&phba->hbalock); 3549 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3550 LOG_DISCOVERY, 3551 "2773 Start FCF fast failover due " 3552 "to CVL event: evt_tag:x%x\n", 3553 acqe_fcoe->event_tag); 3554 rc = lpfc_sli4_redisc_fcf_table(phba); 3555 if (rc) { 3556 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3557 LOG_DISCOVERY, 3558 "2774 Issue FCF rediscover " 3559 "mabilbox command failed, " 3560 "through to CVL event\n"); 3561 spin_lock_irq(&phba->hbalock); 3562 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3563 spin_unlock_irq(&phba->hbalock); 3564 /* 3565 * Last resort will be re-try on the 3566 * the current registered FCF entry. 3567 */ 3568 lpfc_retry_pport_discovery(phba); 3569 } else 3570 /* 3571 * Reset FCF roundrobin bmask for new 3572 * discovery. 3573 */ 3574 memset(phba->fcf.fcf_rr_bmask, 0, 3575 sizeof(*phba->fcf.fcf_rr_bmask)); 3576 } 3577 break; 3578 default: 3579 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3580 "0288 Unknown FCoE event type 0x%x event tag " 3581 "0x%x\n", event_type, acqe_fcoe->event_tag); 3582 break; 3583 } 3584} 3585 3586/** 3587 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3588 * @phba: pointer to lpfc hba data structure. 3589 * @acqe_link: pointer to the async dcbx completion queue entry. 3590 * 3591 * This routine is to handle the SLI4 asynchronous dcbx event. 3592 **/ 3593static void 3594lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3595 struct lpfc_acqe_dcbx *acqe_dcbx) 3596{ 3597 phba->fc_eventTag = acqe_dcbx->event_tag; 3598 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3599 "0290 The SLI4 DCBX asynchronous event is not " 3600 "handled yet\n"); 3601} 3602 3603/** 3604 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3605 * @phba: pointer to lpfc hba data structure. 3606 * @acqe_link: pointer to the async grp5 completion queue entry. 3607 * 3608 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3609 * is an asynchronous notified of a logical link speed change. The Port 3610 * reports the logical link speed in units of 10Mbps. 3611 **/ 3612static void 3613lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3614 struct lpfc_acqe_grp5 *acqe_grp5) 3615{ 3616 uint16_t prev_ll_spd; 3617 3618 phba->fc_eventTag = acqe_grp5->event_tag; 3619 phba->fcoe_eventtag = acqe_grp5->event_tag; 3620 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3621 phba->sli4_hba.link_state.logical_speed = 3622 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3623 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3624 "2789 GRP5 Async Event: Updating logical link speed " 3625 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3626 (phba->sli4_hba.link_state.logical_speed*10)); 3627} 3628 3629/** 3630 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3631 * @phba: pointer to lpfc hba data structure. 3632 * 3633 * This routine is invoked by the worker thread to process all the pending 3634 * SLI4 asynchronous events. 3635 **/ 3636void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3637{ 3638 struct lpfc_cq_event *cq_event; 3639 3640 /* First, declare the async event has been handled */ 3641 spin_lock_irq(&phba->hbalock); 3642 phba->hba_flag &= ~ASYNC_EVENT; 3643 spin_unlock_irq(&phba->hbalock); 3644 /* Now, handle all the async events */ 3645 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3646 /* Get the first event from the head of the event queue */ 3647 spin_lock_irq(&phba->hbalock); 3648 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3649 cq_event, struct lpfc_cq_event, list); 3650 spin_unlock_irq(&phba->hbalock); 3651 /* Process the asynchronous event */ 3652 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3653 case LPFC_TRAILER_CODE_LINK: 3654 lpfc_sli4_async_link_evt(phba, 3655 &cq_event->cqe.acqe_link); 3656 break; 3657 case LPFC_TRAILER_CODE_FCOE: 3658 lpfc_sli4_async_fcoe_evt(phba, 3659 &cq_event->cqe.acqe_fcoe); 3660 break; 3661 case LPFC_TRAILER_CODE_DCBX: 3662 lpfc_sli4_async_dcbx_evt(phba, 3663 &cq_event->cqe.acqe_dcbx); 3664 break; 3665 case LPFC_TRAILER_CODE_GRP5: 3666 lpfc_sli4_async_grp5_evt(phba, 3667 &cq_event->cqe.acqe_grp5); 3668 break; 3669 default: 3670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3671 "1804 Invalid asynchrous event code: " 3672 "x%x\n", bf_get(lpfc_trailer_code, 3673 &cq_event->cqe.mcqe_cmpl)); 3674 break; 3675 } 3676 /* Free the completion event processed to the free pool */ 3677 lpfc_sli4_cq_event_release(phba, cq_event); 3678 } 3679} 3680 3681/** 3682 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3683 * @phba: pointer to lpfc hba data structure. 3684 * 3685 * This routine is invoked by the worker thread to process FCF table 3686 * rediscovery pending completion event. 3687 **/ 3688void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3689{ 3690 int rc; 3691 3692 spin_lock_irq(&phba->hbalock); 3693 /* Clear FCF rediscovery timeout event */ 3694 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3695 /* Clear driver fast failover FCF record flag */ 3696 phba->fcf.failover_rec.flag = 0; 3697 /* Set state for FCF fast failover */ 3698 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3699 spin_unlock_irq(&phba->hbalock); 3700 3701 /* Scan FCF table from the first entry to re-discover SAN */ 3702 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3703 "2777 Start FCF table scan after FCF " 3704 "rediscovery quiescent period over\n"); 3705 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3706 if (rc) 3707 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3708 "2747 Issue FCF scan read FCF mailbox " 3709 "command failed 0x%x\n", rc); 3710} 3711 3712/** 3713 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3714 * @phba: pointer to lpfc hba data structure. 3715 * @dev_grp: The HBA PCI-Device group number. 3716 * 3717 * This routine is invoked to set up the per HBA PCI-Device group function 3718 * API jump table entries. 3719 * 3720 * Return: 0 if success, otherwise -ENODEV 3721 **/ 3722int 3723lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3724{ 3725 int rc; 3726 3727 /* Set up lpfc PCI-device group */ 3728 phba->pci_dev_grp = dev_grp; 3729 3730 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3731 if (dev_grp == LPFC_PCI_DEV_OC) 3732 phba->sli_rev = LPFC_SLI_REV4; 3733 3734 /* Set up device INIT API function jump table */ 3735 rc = lpfc_init_api_table_setup(phba, dev_grp); 3736 if (rc) 3737 return -ENODEV; 3738 /* Set up SCSI API function jump table */ 3739 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3740 if (rc) 3741 return -ENODEV; 3742 /* Set up SLI API function jump table */ 3743 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3744 if (rc) 3745 return -ENODEV; 3746 /* Set up MBOX API function jump table */ 3747 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3748 if (rc) 3749 return -ENODEV; 3750 3751 return 0; 3752} 3753 3754/** 3755 * lpfc_log_intr_mode - Log the active interrupt mode 3756 * @phba: pointer to lpfc hba data structure. 3757 * @intr_mode: active interrupt mode adopted. 3758 * 3759 * This routine it invoked to log the currently used active interrupt mode 3760 * to the device. 3761 **/ 3762static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3763{ 3764 switch (intr_mode) { 3765 case 0: 3766 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3767 "0470 Enable INTx interrupt mode.\n"); 3768 break; 3769 case 1: 3770 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3771 "0481 Enabled MSI interrupt mode.\n"); 3772 break; 3773 case 2: 3774 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3775 "0480 Enabled MSI-X interrupt mode.\n"); 3776 break; 3777 default: 3778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3779 "0482 Illegal interrupt mode.\n"); 3780 break; 3781 } 3782 return; 3783} 3784 3785/** 3786 * lpfc_enable_pci_dev - Enable a generic PCI device. 3787 * @phba: pointer to lpfc hba data structure. 3788 * 3789 * This routine is invoked to enable the PCI device that is common to all 3790 * PCI devices. 3791 * 3792 * Return codes 3793 * 0 - successful 3794 * other values - error 3795 **/ 3796static int 3797lpfc_enable_pci_dev(struct lpfc_hba *phba) 3798{ 3799 struct pci_dev *pdev; 3800 int bars; 3801 3802 /* Obtain PCI device reference */ 3803 if (!phba->pcidev) 3804 goto out_error; 3805 else 3806 pdev = phba->pcidev; 3807 /* Select PCI BARs */ 3808 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3809 /* Enable PCI device */ 3810 if (pci_enable_device_mem(pdev)) 3811 goto out_error; 3812 /* Request PCI resource for the device */ 3813 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3814 goto out_disable_device; 3815 /* Set up device as PCI master and save state for EEH */ 3816 pci_set_master(pdev); 3817 pci_try_set_mwi(pdev); 3818 pci_save_state(pdev); 3819 3820 return 0; 3821 3822out_disable_device: 3823 pci_disable_device(pdev); 3824out_error: 3825 return -ENODEV; 3826} 3827 3828/** 3829 * lpfc_disable_pci_dev - Disable a generic PCI device. 3830 * @phba: pointer to lpfc hba data structure. 3831 * 3832 * This routine is invoked to disable the PCI device that is common to all 3833 * PCI devices. 3834 **/ 3835static void 3836lpfc_disable_pci_dev(struct lpfc_hba *phba) 3837{ 3838 struct pci_dev *pdev; 3839 int bars; 3840 3841 /* Obtain PCI device reference */ 3842 if (!phba->pcidev) 3843 return; 3844 else 3845 pdev = phba->pcidev; 3846 /* Select PCI BARs */ 3847 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3848 /* Release PCI resource and disable PCI device */ 3849 pci_release_selected_regions(pdev, bars); 3850 pci_disable_device(pdev); 3851 /* Null out PCI private reference to driver */ 3852 pci_set_drvdata(pdev, NULL); 3853 3854 return; 3855} 3856 3857/** 3858 * lpfc_reset_hba - Reset a hba 3859 * @phba: pointer to lpfc hba data structure. 3860 * 3861 * This routine is invoked to reset a hba device. It brings the HBA 3862 * offline, performs a board restart, and then brings the board back 3863 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3864 * on outstanding mailbox commands. 3865 **/ 3866void 3867lpfc_reset_hba(struct lpfc_hba *phba) 3868{ 3869 /* If resets are disabled then set error state and return. */ 3870 if (!phba->cfg_enable_hba_reset) { 3871 phba->link_state = LPFC_HBA_ERROR; 3872 return; 3873 } 3874 lpfc_offline_prep(phba); 3875 lpfc_offline(phba); 3876 lpfc_sli_brdrestart(phba); 3877 lpfc_online(phba); 3878 lpfc_unblock_mgmt_io(phba); 3879} 3880 3881/** 3882 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3883 * @phba: pointer to lpfc hba data structure. 3884 * 3885 * This routine is invoked to set up the driver internal resources specific to 3886 * support the SLI-3 HBA device it attached to. 3887 * 3888 * Return codes 3889 * 0 - successful 3890 * other values - error 3891 **/ 3892static int 3893lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3894{ 3895 struct lpfc_sli *psli; 3896 3897 /* 3898 * Initialize timers used by driver 3899 */ 3900 3901 /* Heartbeat timer */ 3902 init_timer(&phba->hb_tmofunc); 3903 phba->hb_tmofunc.function = lpfc_hb_timeout; 3904 phba->hb_tmofunc.data = (unsigned long)phba; 3905 3906 psli = &phba->sli; 3907 /* MBOX heartbeat timer */ 3908 init_timer(&psli->mbox_tmo); 3909 psli->mbox_tmo.function = lpfc_mbox_timeout; 3910 psli->mbox_tmo.data = (unsigned long) phba; 3911 /* FCP polling mode timer */ 3912 init_timer(&phba->fcp_poll_timer); 3913 phba->fcp_poll_timer.function = lpfc_poll_timeout; 3914 phba->fcp_poll_timer.data = (unsigned long) phba; 3915 /* Fabric block timer */ 3916 init_timer(&phba->fabric_block_timer); 3917 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 3918 phba->fabric_block_timer.data = (unsigned long) phba; 3919 /* EA polling mode timer */ 3920 init_timer(&phba->eratt_poll); 3921 phba->eratt_poll.function = lpfc_poll_eratt; 3922 phba->eratt_poll.data = (unsigned long) phba; 3923 3924 /* Host attention work mask setup */ 3925 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 3926 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 3927 3928 /* Get all the module params for configuring this host */ 3929 lpfc_get_cfgparam(phba); 3930 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 3931 phba->menlo_flag |= HBA_MENLO_SUPPORT; 3932 /* check for menlo minimum sg count */ 3933 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 3934 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 3935 } 3936 3937 /* 3938 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3939 * used to create the sg_dma_buf_pool must be dynamically calculated. 3940 * 2 segments are added since the IOCB needs a command and response bde. 3941 */ 3942 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 3943 sizeof(struct fcp_rsp) + 3944 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 3945 3946 if (phba->cfg_enable_bg) { 3947 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 3948 phba->cfg_sg_dma_buf_size += 3949 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 3950 } 3951 3952 /* Also reinitialize the host templates with new values. */ 3953 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3954 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 3955 3956 phba->max_vpi = LPFC_MAX_VPI; 3957 /* This will be set to correct value after config_port mbox */ 3958 phba->max_vports = 0; 3959 3960 /* 3961 * Initialize the SLI Layer to run with lpfc HBAs. 3962 */ 3963 lpfc_sli_setup(phba); 3964 lpfc_sli_queue_setup(phba); 3965 3966 /* Allocate device driver memory */ 3967 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 3968 return -ENOMEM; 3969 3970 return 0; 3971} 3972 3973/** 3974 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 3975 * @phba: pointer to lpfc hba data structure. 3976 * 3977 * This routine is invoked to unset the driver internal resources set up 3978 * specific for supporting the SLI-3 HBA device it attached to. 3979 **/ 3980static void 3981lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 3982{ 3983 /* Free device driver memory allocated */ 3984 lpfc_mem_free_all(phba); 3985 3986 return; 3987} 3988 3989/** 3990 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 3991 * @phba: pointer to lpfc hba data structure. 3992 * 3993 * This routine is invoked to set up the driver internal resources specific to 3994 * support the SLI-4 HBA device it attached to. 3995 * 3996 * Return codes 3997 * 0 - successful 3998 * other values - error 3999 **/ 4000static int 4001lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4002{ 4003 struct lpfc_sli *psli; 4004 LPFC_MBOXQ_t *mboxq; 4005 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4006 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4007 struct lpfc_mqe *mqe; 4008 int longs; 4009 4010 /* Before proceed, wait for POST done and device ready */ 4011 rc = lpfc_sli4_post_status_check(phba); 4012 if (rc) 4013 return -ENODEV; 4014 4015 /* 4016 * Initialize timers used by driver 4017 */ 4018 4019 /* Heartbeat timer */ 4020 init_timer(&phba->hb_tmofunc); 4021 phba->hb_tmofunc.function = lpfc_hb_timeout; 4022 phba->hb_tmofunc.data = (unsigned long)phba; 4023 4024 psli = &phba->sli; 4025 /* MBOX heartbeat timer */ 4026 init_timer(&psli->mbox_tmo); 4027 psli->mbox_tmo.function = lpfc_mbox_timeout; 4028 psli->mbox_tmo.data = (unsigned long) phba; 4029 /* Fabric block timer */ 4030 init_timer(&phba->fabric_block_timer); 4031 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4032 phba->fabric_block_timer.data = (unsigned long) phba; 4033 /* EA polling mode timer */ 4034 init_timer(&phba->eratt_poll); 4035 phba->eratt_poll.function = lpfc_poll_eratt; 4036 phba->eratt_poll.data = (unsigned long) phba; 4037 /* FCF rediscover timer */ 4038 init_timer(&phba->fcf.redisc_wait); 4039 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4040 phba->fcf.redisc_wait.data = (unsigned long)phba; 4041 4042 /* 4043 * We need to do a READ_CONFIG mailbox command here before 4044 * calling lpfc_get_cfgparam. For VFs this will report the 4045 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4046 * All of the resources allocated 4047 * for this Port are tied to these values. 4048 */ 4049 /* Get all the module params for configuring this host */ 4050 lpfc_get_cfgparam(phba); 4051 phba->max_vpi = LPFC_MAX_VPI; 4052 /* This will be set to correct value after the read_config mbox */ 4053 phba->max_vports = 0; 4054 4055 /* Program the default value of vlan_id and fc_map */ 4056 phba->valid_vlan = 0; 4057 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4058 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4059 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4060 4061 /* 4062 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4063 * used to create the sg_dma_buf_pool must be dynamically calculated. 4064 * 2 segments are added since the IOCB needs a command and response bde. 4065 * To insure that the scsi sgl does not cross a 4k page boundary only 4066 * sgl sizes of must be a power of 2. 4067 */ 4068 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4069 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4070 /* Feature Level 1 hardware is limited to 2 pages */ 4071 if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == 4072 LPFC_SLI_INTF_FEATURELEVEL1_1)) 4073 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4074 else 4075 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4076 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4077 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4078 dma_buf_size = dma_buf_size << 1) 4079 ; 4080 if (dma_buf_size == max_buf_size) 4081 phba->cfg_sg_seg_cnt = (dma_buf_size - 4082 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4083 (2 * sizeof(struct sli4_sge))) / 4084 sizeof(struct sli4_sge); 4085 phba->cfg_sg_dma_buf_size = dma_buf_size; 4086 4087 /* Initialize buffer queue management fields */ 4088 hbq_count = lpfc_sli_hbq_count(); 4089 for (i = 0; i < hbq_count; ++i) 4090 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4091 INIT_LIST_HEAD(&phba->rb_pend_list); 4092 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4093 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4094 4095 /* 4096 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4097 */ 4098 /* Initialize the Abort scsi buffer list used by driver */ 4099 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4100 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4101 /* This abort list used by worker thread */ 4102 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4103 4104 /* 4105 * Initialize dirver internal slow-path work queues 4106 */ 4107 4108 /* Driver internel slow-path CQ Event pool */ 4109 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4110 /* Response IOCB work queue list */ 4111 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4112 /* Asynchronous event CQ Event work queue list */ 4113 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4114 /* Fast-path XRI aborted CQ Event work queue list */ 4115 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4116 /* Slow-path XRI aborted CQ Event work queue list */ 4117 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4118 /* Receive queue CQ Event work queue list */ 4119 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4120 4121 /* Initialize the driver internal SLI layer lists. */ 4122 lpfc_sli_setup(phba); 4123 lpfc_sli_queue_setup(phba); 4124 4125 /* Allocate device driver memory */ 4126 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4127 if (rc) 4128 return -ENOMEM; 4129 4130 /* Create the bootstrap mailbox command */ 4131 rc = lpfc_create_bootstrap_mbox(phba); 4132 if (unlikely(rc)) 4133 goto out_free_mem; 4134 4135 /* Set up the host's endian order with the device. */ 4136 rc = lpfc_setup_endian_order(phba); 4137 if (unlikely(rc)) 4138 goto out_free_bsmbx; 4139 4140 rc = lpfc_sli4_fw_cfg_check(phba); 4141 if (unlikely(rc)) 4142 goto out_free_bsmbx; 4143 4144 /* Set up the hba's configuration parameters. */ 4145 rc = lpfc_sli4_read_config(phba); 4146 if (unlikely(rc)) 4147 goto out_free_bsmbx; 4148 4149 /* Perform a function reset */ 4150 rc = lpfc_pci_function_reset(phba); 4151 if (unlikely(rc)) 4152 goto out_free_bsmbx; 4153 4154 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4155 GFP_KERNEL); 4156 if (!mboxq) { 4157 rc = -ENOMEM; 4158 goto out_free_bsmbx; 4159 } 4160 4161 /* Get the Supported Pages. It is always available. */ 4162 lpfc_supported_pages(mboxq); 4163 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4164 if (unlikely(rc)) { 4165 rc = -EIO; 4166 mempool_free(mboxq, phba->mbox_mem_pool); 4167 goto out_free_bsmbx; 4168 } 4169 4170 mqe = &mboxq->u.mqe; 4171 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4172 LPFC_MAX_SUPPORTED_PAGES); 4173 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4174 switch (pn_page[i]) { 4175 case LPFC_SLI4_PARAMETERS: 4176 phba->sli4_hba.pc_sli4_params.supported = 1; 4177 break; 4178 default: 4179 break; 4180 } 4181 } 4182 4183 /* Read the port's SLI4 Parameters capabilities if supported. */ 4184 if (phba->sli4_hba.pc_sli4_params.supported) 4185 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4186 mempool_free(mboxq, phba->mbox_mem_pool); 4187 if (rc) { 4188 rc = -EIO; 4189 goto out_free_bsmbx; 4190 } 4191 /* Create all the SLI4 queues */ 4192 rc = lpfc_sli4_queue_create(phba); 4193 if (rc) 4194 goto out_free_bsmbx; 4195 4196 /* Create driver internal CQE event pool */ 4197 rc = lpfc_sli4_cq_event_pool_create(phba); 4198 if (rc) 4199 goto out_destroy_queue; 4200 4201 /* Initialize and populate the iocb list per host */ 4202 rc = lpfc_init_sgl_list(phba); 4203 if (rc) { 4204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4205 "1400 Failed to initialize sgl list.\n"); 4206 goto out_destroy_cq_event_pool; 4207 } 4208 rc = lpfc_init_active_sgl_array(phba); 4209 if (rc) { 4210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4211 "1430 Failed to initialize sgl list.\n"); 4212 goto out_free_sgl_list; 4213 } 4214 4215 rc = lpfc_sli4_init_rpi_hdrs(phba); 4216 if (rc) { 4217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4218 "1432 Failed to initialize rpi headers.\n"); 4219 goto out_free_active_sgl; 4220 } 4221 4222 /* Allocate eligible FCF bmask memory for FCF round robin failover */ 4223 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4224 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4225 GFP_KERNEL); 4226 if (!phba->fcf.fcf_rr_bmask) { 4227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4228 "2759 Failed allocate memory for FCF round " 4229 "robin failover bmask\n"); 4230 goto out_remove_rpi_hdrs; 4231 } 4232 4233 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4234 phba->cfg_fcp_eq_count), GFP_KERNEL); 4235 if (!phba->sli4_hba.fcp_eq_hdl) { 4236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4237 "2572 Failed allocate memory for fast-path " 4238 "per-EQ handle array\n"); 4239 goto out_free_fcf_rr_bmask; 4240 } 4241 4242 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4243 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4244 if (!phba->sli4_hba.msix_entries) { 4245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4246 "2573 Failed allocate memory for msi-x " 4247 "interrupt vector entries\n"); 4248 goto out_free_fcp_eq_hdl; 4249 } 4250 4251 return rc; 4252 4253out_free_fcp_eq_hdl: 4254 kfree(phba->sli4_hba.fcp_eq_hdl); 4255out_free_fcf_rr_bmask: 4256 kfree(phba->fcf.fcf_rr_bmask); 4257out_remove_rpi_hdrs: 4258 lpfc_sli4_remove_rpi_hdrs(phba); 4259out_free_active_sgl: 4260 lpfc_free_active_sgl(phba); 4261out_free_sgl_list: 4262 lpfc_free_sgl_list(phba); 4263out_destroy_cq_event_pool: 4264 lpfc_sli4_cq_event_pool_destroy(phba); 4265out_destroy_queue: 4266 lpfc_sli4_queue_destroy(phba); 4267out_free_bsmbx: 4268 lpfc_destroy_bootstrap_mbox(phba); 4269out_free_mem: 4270 lpfc_mem_free(phba); 4271 return rc; 4272} 4273 4274/** 4275 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4276 * @phba: pointer to lpfc hba data structure. 4277 * 4278 * This routine is invoked to unset the driver internal resources set up 4279 * specific for supporting the SLI-4 HBA device it attached to. 4280 **/ 4281static void 4282lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4283{ 4284 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4285 4286 /* unregister default FCFI from the HBA */ 4287 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); 4288 4289 /* Free the default FCR table */ 4290 lpfc_sli_remove_dflt_fcf(phba); 4291 4292 /* Free memory allocated for msi-x interrupt vector entries */ 4293 kfree(phba->sli4_hba.msix_entries); 4294 4295 /* Free memory allocated for fast-path work queue handles */ 4296 kfree(phba->sli4_hba.fcp_eq_hdl); 4297 4298 /* Free the allocated rpi headers. */ 4299 lpfc_sli4_remove_rpi_hdrs(phba); 4300 lpfc_sli4_remove_rpis(phba); 4301 4302 /* Free eligible FCF index bmask */ 4303 kfree(phba->fcf.fcf_rr_bmask); 4304 4305 /* Free the ELS sgl list */ 4306 lpfc_free_active_sgl(phba); 4307 lpfc_free_sgl_list(phba); 4308 4309 /* Free the SCSI sgl management array */ 4310 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4311 4312 /* Free the SLI4 queues */ 4313 lpfc_sli4_queue_destroy(phba); 4314 4315 /* Free the completion queue EQ event pool */ 4316 lpfc_sli4_cq_event_release_all(phba); 4317 lpfc_sli4_cq_event_pool_destroy(phba); 4318 4319 /* Reset SLI4 HBA FCoE function */ 4320 lpfc_pci_function_reset(phba); 4321 4322 /* Free the bsmbx region. */ 4323 lpfc_destroy_bootstrap_mbox(phba); 4324 4325 /* Free the SLI Layer memory with SLI4 HBAs */ 4326 lpfc_mem_free_all(phba); 4327 4328 /* Free the current connect table */ 4329 list_for_each_entry_safe(conn_entry, next_conn_entry, 4330 &phba->fcf_conn_rec_list, list) { 4331 list_del_init(&conn_entry->list); 4332 kfree(conn_entry); 4333 } 4334 4335 return; 4336} 4337 4338/** 4339 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4340 * @phba: The hba struct for which this call is being executed. 4341 * @dev_grp: The HBA PCI-Device group number. 4342 * 4343 * This routine sets up the device INIT interface API function jump table 4344 * in @phba struct. 4345 * 4346 * Returns: 0 - success, -ENODEV - failure. 4347 **/ 4348int 4349lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4350{ 4351 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4352 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4353 switch (dev_grp) { 4354 case LPFC_PCI_DEV_LP: 4355 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4356 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4357 phba->lpfc_stop_port = lpfc_stop_port_s3; 4358 break; 4359 case LPFC_PCI_DEV_OC: 4360 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4361 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4362 phba->lpfc_stop_port = lpfc_stop_port_s4; 4363 break; 4364 default: 4365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4366 "1431 Invalid HBA PCI-device group: 0x%x\n", 4367 dev_grp); 4368 return -ENODEV; 4369 break; 4370 } 4371 return 0; 4372} 4373 4374/** 4375 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4376 * @phba: pointer to lpfc hba data structure. 4377 * 4378 * This routine is invoked to set up the driver internal resources before the 4379 * device specific resource setup to support the HBA device it attached to. 4380 * 4381 * Return codes 4382 * 0 - successful 4383 * other values - error 4384 **/ 4385static int 4386lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4387{ 4388 /* 4389 * Driver resources common to all SLI revisions 4390 */ 4391 atomic_set(&phba->fast_event_count, 0); 4392 spin_lock_init(&phba->hbalock); 4393 4394 /* Initialize ndlp management spinlock */ 4395 spin_lock_init(&phba->ndlp_lock); 4396 4397 INIT_LIST_HEAD(&phba->port_list); 4398 INIT_LIST_HEAD(&phba->work_list); 4399 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4400 4401 /* Initialize the wait queue head for the kernel thread */ 4402 init_waitqueue_head(&phba->work_waitq); 4403 4404 /* Initialize the scsi buffer list used by driver for scsi IO */ 4405 spin_lock_init(&phba->scsi_buf_list_lock); 4406 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4407 4408 /* Initialize the fabric iocb list */ 4409 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4410 4411 /* Initialize list to save ELS buffers */ 4412 INIT_LIST_HEAD(&phba->elsbuf); 4413 4414 /* Initialize FCF connection rec list */ 4415 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4416 4417 return 0; 4418} 4419 4420/** 4421 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4422 * @phba: pointer to lpfc hba data structure. 4423 * 4424 * This routine is invoked to set up the driver internal resources after the 4425 * device specific resource setup to support the HBA device it attached to. 4426 * 4427 * Return codes 4428 * 0 - successful 4429 * other values - error 4430 **/ 4431static int 4432lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4433{ 4434 int error; 4435 4436 /* Startup the kernel thread for this host adapter. */ 4437 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4438 "lpfc_worker_%d", phba->brd_no); 4439 if (IS_ERR(phba->worker_thread)) { 4440 error = PTR_ERR(phba->worker_thread); 4441 return error; 4442 } 4443 4444 return 0; 4445} 4446 4447/** 4448 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4449 * @phba: pointer to lpfc hba data structure. 4450 * 4451 * This routine is invoked to unset the driver internal resources set up after 4452 * the device specific resource setup for supporting the HBA device it 4453 * attached to. 4454 **/ 4455static void 4456lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4457{ 4458 /* Stop kernel worker thread */ 4459 kthread_stop(phba->worker_thread); 4460} 4461 4462/** 4463 * lpfc_free_iocb_list - Free iocb list. 4464 * @phba: pointer to lpfc hba data structure. 4465 * 4466 * This routine is invoked to free the driver's IOCB list and memory. 4467 **/ 4468static void 4469lpfc_free_iocb_list(struct lpfc_hba *phba) 4470{ 4471 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4472 4473 spin_lock_irq(&phba->hbalock); 4474 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4475 &phba->lpfc_iocb_list, list) { 4476 list_del(&iocbq_entry->list); 4477 kfree(iocbq_entry); 4478 phba->total_iocbq_bufs--; 4479 } 4480 spin_unlock_irq(&phba->hbalock); 4481 4482 return; 4483} 4484 4485/** 4486 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4487 * @phba: pointer to lpfc hba data structure. 4488 * 4489 * This routine is invoked to allocate and initizlize the driver's IOCB 4490 * list and set up the IOCB tag array accordingly. 4491 * 4492 * Return codes 4493 * 0 - successful 4494 * other values - error 4495 **/ 4496static int 4497lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4498{ 4499 struct lpfc_iocbq *iocbq_entry = NULL; 4500 uint16_t iotag; 4501 int i; 4502 4503 /* Initialize and populate the iocb list per host. */ 4504 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4505 for (i = 0; i < iocb_count; i++) { 4506 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4507 if (iocbq_entry == NULL) { 4508 printk(KERN_ERR "%s: only allocated %d iocbs of " 4509 "expected %d count. Unloading driver.\n", 4510 __func__, i, LPFC_IOCB_LIST_CNT); 4511 goto out_free_iocbq; 4512 } 4513 4514 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4515 if (iotag == 0) { 4516 kfree(iocbq_entry); 4517 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4518 "Unloading driver.\n", __func__); 4519 goto out_free_iocbq; 4520 } 4521 iocbq_entry->sli4_xritag = NO_XRI; 4522 4523 spin_lock_irq(&phba->hbalock); 4524 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4525 phba->total_iocbq_bufs++; 4526 spin_unlock_irq(&phba->hbalock); 4527 } 4528 4529 return 0; 4530 4531out_free_iocbq: 4532 lpfc_free_iocb_list(phba); 4533 4534 return -ENOMEM; 4535} 4536 4537/** 4538 * lpfc_free_sgl_list - Free sgl list. 4539 * @phba: pointer to lpfc hba data structure. 4540 * 4541 * This routine is invoked to free the driver's sgl list and memory. 4542 **/ 4543static void 4544lpfc_free_sgl_list(struct lpfc_hba *phba) 4545{ 4546 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4547 LIST_HEAD(sglq_list); 4548 int rc = 0; 4549 4550 spin_lock_irq(&phba->hbalock); 4551 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4552 spin_unlock_irq(&phba->hbalock); 4553 4554 list_for_each_entry_safe(sglq_entry, sglq_next, 4555 &sglq_list, list) { 4556 list_del(&sglq_entry->list); 4557 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4558 kfree(sglq_entry); 4559 phba->sli4_hba.total_sglq_bufs--; 4560 } 4561 rc = lpfc_sli4_remove_all_sgl_pages(phba); 4562 if (rc) { 4563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4564 "2005 Unable to deregister pages from HBA: %x\n", rc); 4565 } 4566 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4567} 4568 4569/** 4570 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4571 * @phba: pointer to lpfc hba data structure. 4572 * 4573 * This routine is invoked to allocate the driver's active sgl memory. 4574 * This array will hold the sglq_entry's for active IOs. 4575 **/ 4576static int 4577lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4578{ 4579 int size; 4580 size = sizeof(struct lpfc_sglq *); 4581 size *= phba->sli4_hba.max_cfg_param.max_xri; 4582 4583 phba->sli4_hba.lpfc_sglq_active_list = 4584 kzalloc(size, GFP_KERNEL); 4585 if (!phba->sli4_hba.lpfc_sglq_active_list) 4586 return -ENOMEM; 4587 return 0; 4588} 4589 4590/** 4591 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4592 * @phba: pointer to lpfc hba data structure. 4593 * 4594 * This routine is invoked to walk through the array of active sglq entries 4595 * and free all of the resources. 4596 * This is just a place holder for now. 4597 **/ 4598static void 4599lpfc_free_active_sgl(struct lpfc_hba *phba) 4600{ 4601 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4602} 4603 4604/** 4605 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4606 * @phba: pointer to lpfc hba data structure. 4607 * 4608 * This routine is invoked to allocate and initizlize the driver's sgl 4609 * list and set up the sgl xritag tag array accordingly. 4610 * 4611 * Return codes 4612 * 0 - successful 4613 * other values - error 4614 **/ 4615static int 4616lpfc_init_sgl_list(struct lpfc_hba *phba) 4617{ 4618 struct lpfc_sglq *sglq_entry = NULL; 4619 int i; 4620 int els_xri_cnt; 4621 4622 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4623 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4624 "2400 lpfc_init_sgl_list els %d.\n", 4625 els_xri_cnt); 4626 /* Initialize and populate the sglq list per host/VF. */ 4627 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4628 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4629 4630 /* Sanity check on XRI management */ 4631 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4632 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4633 "2562 No room left for SCSI XRI allocation: " 4634 "max_xri=%d, els_xri=%d\n", 4635 phba->sli4_hba.max_cfg_param.max_xri, 4636 els_xri_cnt); 4637 return -ENOMEM; 4638 } 4639 4640 /* Allocate memory for the ELS XRI management array */ 4641 phba->sli4_hba.lpfc_els_sgl_array = 4642 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4643 GFP_KERNEL); 4644 4645 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4647 "2401 Failed to allocate memory for ELS " 4648 "XRI management array of size %d.\n", 4649 els_xri_cnt); 4650 return -ENOMEM; 4651 } 4652 4653 /* Keep the SCSI XRI into the XRI management array */ 4654 phba->sli4_hba.scsi_xri_max = 4655 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4656 phba->sli4_hba.scsi_xri_cnt = 0; 4657 4658 phba->sli4_hba.lpfc_scsi_psb_array = 4659 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4660 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4661 4662 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4664 "2563 Failed to allocate memory for SCSI " 4665 "XRI management array of size %d.\n", 4666 phba->sli4_hba.scsi_xri_max); 4667 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4668 return -ENOMEM; 4669 } 4670 4671 for (i = 0; i < els_xri_cnt; i++) { 4672 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4673 if (sglq_entry == NULL) { 4674 printk(KERN_ERR "%s: only allocated %d sgls of " 4675 "expected %d count. Unloading driver.\n", 4676 __func__, i, els_xri_cnt); 4677 goto out_free_mem; 4678 } 4679 4680 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4681 if (sglq_entry->sli4_xritag == NO_XRI) { 4682 kfree(sglq_entry); 4683 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4684 "Unloading driver.\n", __func__); 4685 goto out_free_mem; 4686 } 4687 sglq_entry->buff_type = GEN_BUFF_TYPE; 4688 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4689 if (sglq_entry->virt == NULL) { 4690 kfree(sglq_entry); 4691 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4692 "Unloading driver.\n", __func__); 4693 goto out_free_mem; 4694 } 4695 sglq_entry->sgl = sglq_entry->virt; 4696 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4697 4698 /* The list order is used by later block SGL registraton */ 4699 spin_lock_irq(&phba->hbalock); 4700 sglq_entry->state = SGL_FREED; 4701 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4702 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4703 phba->sli4_hba.total_sglq_bufs++; 4704 spin_unlock_irq(&phba->hbalock); 4705 } 4706 return 0; 4707 4708out_free_mem: 4709 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4710 lpfc_free_sgl_list(phba); 4711 return -ENOMEM; 4712} 4713 4714/** 4715 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4716 * @phba: pointer to lpfc hba data structure. 4717 * 4718 * This routine is invoked to post rpi header templates to the 4719 * HBA consistent with the SLI-4 interface spec. This routine 4720 * posts a PAGE_SIZE memory region to the port to hold up to 4721 * PAGE_SIZE modulo 64 rpi context headers. 4722 * No locks are held here because this is an initialization routine 4723 * called only from probe or lpfc_online when interrupts are not 4724 * enabled and the driver is reinitializing the device. 4725 * 4726 * Return codes 4727 * 0 - successful 4728 * ENOMEM - No availble memory 4729 * EIO - The mailbox failed to complete successfully. 4730 **/ 4731int 4732lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4733{ 4734 int rc = 0; 4735 int longs; 4736 uint16_t rpi_count; 4737 struct lpfc_rpi_hdr *rpi_hdr; 4738 4739 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4740 4741 /* 4742 * Provision an rpi bitmask range for discovery. The total count 4743 * is the difference between max and base + 1. 4744 */ 4745 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4746 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4747 4748 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4749 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4750 GFP_KERNEL); 4751 if (!phba->sli4_hba.rpi_bmask) 4752 return -ENOMEM; 4753 4754 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4755 if (!rpi_hdr) { 4756 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4757 "0391 Error during rpi post operation\n"); 4758 lpfc_sli4_remove_rpis(phba); 4759 rc = -ENODEV; 4760 } 4761 4762 return rc; 4763} 4764 4765/** 4766 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4767 * @phba: pointer to lpfc hba data structure. 4768 * 4769 * This routine is invoked to allocate a single 4KB memory region to 4770 * support rpis and stores them in the phba. This single region 4771 * provides support for up to 64 rpis. The region is used globally 4772 * by the device. 4773 * 4774 * Returns: 4775 * A valid rpi hdr on success. 4776 * A NULL pointer on any failure. 4777 **/ 4778struct lpfc_rpi_hdr * 4779lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4780{ 4781 uint16_t rpi_limit, curr_rpi_range; 4782 struct lpfc_dmabuf *dmabuf; 4783 struct lpfc_rpi_hdr *rpi_hdr; 4784 4785 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4786 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4787 4788 spin_lock_irq(&phba->hbalock); 4789 curr_rpi_range = phba->sli4_hba.next_rpi; 4790 spin_unlock_irq(&phba->hbalock); 4791 4792 /* 4793 * The port has a limited number of rpis. The increment here 4794 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4795 * and to allow the full max_rpi range per port. 4796 */ 4797 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4798 return NULL; 4799 4800 /* 4801 * First allocate the protocol header region for the port. The 4802 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4803 */ 4804 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4805 if (!dmabuf) 4806 return NULL; 4807 4808 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4809 LPFC_HDR_TEMPLATE_SIZE, 4810 &dmabuf->phys, 4811 GFP_KERNEL); 4812 if (!dmabuf->virt) { 4813 rpi_hdr = NULL; 4814 goto err_free_dmabuf; 4815 } 4816 4817 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4818 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4819 rpi_hdr = NULL; 4820 goto err_free_coherent; 4821 } 4822 4823 /* Save the rpi header data for cleanup later. */ 4824 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4825 if (!rpi_hdr) 4826 goto err_free_coherent; 4827 4828 rpi_hdr->dmabuf = dmabuf; 4829 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4830 rpi_hdr->page_count = 1; 4831 spin_lock_irq(&phba->hbalock); 4832 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4833 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4834 4835 /* 4836 * The next_rpi stores the next module-64 rpi value to post 4837 * in any subsequent rpi memory region postings. 4838 */ 4839 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4840 spin_unlock_irq(&phba->hbalock); 4841 return rpi_hdr; 4842 4843 err_free_coherent: 4844 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4845 dmabuf->virt, dmabuf->phys); 4846 err_free_dmabuf: 4847 kfree(dmabuf); 4848 return NULL; 4849} 4850 4851/** 4852 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4853 * @phba: pointer to lpfc hba data structure. 4854 * 4855 * This routine is invoked to remove all memory resources allocated 4856 * to support rpis. This routine presumes the caller has released all 4857 * rpis consumed by fabric or port logins and is prepared to have 4858 * the header pages removed. 4859 **/ 4860void 4861lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4862{ 4863 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4864 4865 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4866 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4867 list_del(&rpi_hdr->list); 4868 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4869 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4870 kfree(rpi_hdr->dmabuf); 4871 kfree(rpi_hdr); 4872 } 4873 4874 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4875 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4876} 4877 4878/** 4879 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4880 * @pdev: pointer to pci device data structure. 4881 * 4882 * This routine is invoked to allocate the driver hba data structure for an 4883 * HBA device. If the allocation is successful, the phba reference to the 4884 * PCI device data structure is set. 4885 * 4886 * Return codes 4887 * pointer to @phba - successful 4888 * NULL - error 4889 **/ 4890static struct lpfc_hba * 4891lpfc_hba_alloc(struct pci_dev *pdev) 4892{ 4893 struct lpfc_hba *phba; 4894 4895 /* Allocate memory for HBA structure */ 4896 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4897 if (!phba) { 4898 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 4899 return NULL; 4900 } 4901 4902 /* Set reference to PCI device in HBA structure */ 4903 phba->pcidev = pdev; 4904 4905 /* Assign an unused board number */ 4906 phba->brd_no = lpfc_get_instance(); 4907 if (phba->brd_no < 0) { 4908 kfree(phba); 4909 return NULL; 4910 } 4911 4912 spin_lock_init(&phba->ct_ev_lock); 4913 INIT_LIST_HEAD(&phba->ct_ev_waiters); 4914 4915 return phba; 4916} 4917 4918/** 4919 * lpfc_hba_free - Free driver hba data structure with a device. 4920 * @phba: pointer to lpfc hba data structure. 4921 * 4922 * This routine is invoked to free the driver hba data structure with an 4923 * HBA device. 4924 **/ 4925static void 4926lpfc_hba_free(struct lpfc_hba *phba) 4927{ 4928 /* Release the driver assigned board number */ 4929 idr_remove(&lpfc_hba_index, phba->brd_no); 4930 4931 kfree(phba); 4932 return; 4933} 4934 4935/** 4936 * lpfc_create_shost - Create hba physical port with associated scsi host. 4937 * @phba: pointer to lpfc hba data structure. 4938 * 4939 * This routine is invoked to create HBA physical port and associate a SCSI 4940 * host with it. 4941 * 4942 * Return codes 4943 * 0 - successful 4944 * other values - error 4945 **/ 4946static int 4947lpfc_create_shost(struct lpfc_hba *phba) 4948{ 4949 struct lpfc_vport *vport; 4950 struct Scsi_Host *shost; 4951 4952 /* Initialize HBA FC structure */ 4953 phba->fc_edtov = FF_DEF_EDTOV; 4954 phba->fc_ratov = FF_DEF_RATOV; 4955 phba->fc_altov = FF_DEF_ALTOV; 4956 phba->fc_arbtov = FF_DEF_ARBTOV; 4957 4958 atomic_set(&phba->sdev_cnt, 0); 4959 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 4960 if (!vport) 4961 return -ENODEV; 4962 4963 shost = lpfc_shost_from_vport(vport); 4964 phba->pport = vport; 4965 lpfc_debugfs_initialize(vport); 4966 /* Put reference to SCSI host to driver's device private data */ 4967 pci_set_drvdata(phba->pcidev, shost); 4968 4969 return 0; 4970} 4971 4972/** 4973 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 4974 * @phba: pointer to lpfc hba data structure. 4975 * 4976 * This routine is invoked to destroy HBA physical port and the associated 4977 * SCSI host. 4978 **/ 4979static void 4980lpfc_destroy_shost(struct lpfc_hba *phba) 4981{ 4982 struct lpfc_vport *vport = phba->pport; 4983 4984 /* Destroy physical port that associated with the SCSI host */ 4985 destroy_port(vport); 4986 4987 return; 4988} 4989 4990/** 4991 * lpfc_setup_bg - Setup Block guard structures and debug areas. 4992 * @phba: pointer to lpfc hba data structure. 4993 * @shost: the shost to be used to detect Block guard settings. 4994 * 4995 * This routine sets up the local Block guard protocol settings for @shost. 4996 * This routine also allocates memory for debugging bg buffers. 4997 **/ 4998static void 4999lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5000{ 5001 int pagecnt = 10; 5002 if (lpfc_prot_mask && lpfc_prot_guard) { 5003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5004 "1478 Registering BlockGuard with the " 5005 "SCSI layer\n"); 5006 scsi_host_set_prot(shost, lpfc_prot_mask); 5007 scsi_host_set_guard(shost, lpfc_prot_guard); 5008 } 5009 if (!_dump_buf_data) { 5010 while (pagecnt) { 5011 spin_lock_init(&_dump_buf_lock); 5012 _dump_buf_data = 5013 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5014 if (_dump_buf_data) { 5015 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5016 "9043 BLKGRD: allocated %d pages for " 5017 "_dump_buf_data at 0x%p\n", 5018 (1 << pagecnt), _dump_buf_data); 5019 _dump_buf_data_order = pagecnt; 5020 memset(_dump_buf_data, 0, 5021 ((1 << PAGE_SHIFT) << pagecnt)); 5022 break; 5023 } else 5024 --pagecnt; 5025 } 5026 if (!_dump_buf_data_order) 5027 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5028 "9044 BLKGRD: ERROR unable to allocate " 5029 "memory for hexdump\n"); 5030 } else 5031 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5032 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5033 "\n", _dump_buf_data); 5034 if (!_dump_buf_dif) { 5035 while (pagecnt) { 5036 _dump_buf_dif = 5037 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5038 if (_dump_buf_dif) { 5039 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5040 "9046 BLKGRD: allocated %d pages for " 5041 "_dump_buf_dif at 0x%p\n", 5042 (1 << pagecnt), _dump_buf_dif); 5043 _dump_buf_dif_order = pagecnt; 5044 memset(_dump_buf_dif, 0, 5045 ((1 << PAGE_SHIFT) << pagecnt)); 5046 break; 5047 } else 5048 --pagecnt; 5049 } 5050 if (!_dump_buf_dif_order) 5051 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5052 "9047 BLKGRD: ERROR unable to allocate " 5053 "memory for hexdump\n"); 5054 } else 5055 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5056 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5057 _dump_buf_dif); 5058} 5059 5060/** 5061 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5062 * @phba: pointer to lpfc hba data structure. 5063 * 5064 * This routine is invoked to perform all the necessary post initialization 5065 * setup for the device. 5066 **/ 5067static void 5068lpfc_post_init_setup(struct lpfc_hba *phba) 5069{ 5070 struct Scsi_Host *shost; 5071 struct lpfc_adapter_event_header adapter_event; 5072 5073 /* Get the default values for Model Name and Description */ 5074 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5075 5076 /* 5077 * hba setup may have changed the hba_queue_depth so we need to 5078 * adjust the value of can_queue. 5079 */ 5080 shost = pci_get_drvdata(phba->pcidev); 5081 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5082 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5083 lpfc_setup_bg(phba, shost); 5084 5085 lpfc_host_attrib_init(shost); 5086 5087 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5088 spin_lock_irq(shost->host_lock); 5089 lpfc_poll_start_timer(phba); 5090 spin_unlock_irq(shost->host_lock); 5091 } 5092 5093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5094 "0428 Perform SCSI scan\n"); 5095 /* Send board arrival event to upper layer */ 5096 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5097 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5098 fc_host_post_vendor_event(shost, fc_get_event_number(), 5099 sizeof(adapter_event), 5100 (char *) &adapter_event, 5101 LPFC_NL_VENDOR_ID); 5102 return; 5103} 5104 5105/** 5106 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5107 * @phba: pointer to lpfc hba data structure. 5108 * 5109 * This routine is invoked to set up the PCI device memory space for device 5110 * with SLI-3 interface spec. 5111 * 5112 * Return codes 5113 * 0 - successful 5114 * other values - error 5115 **/ 5116static int 5117lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5118{ 5119 struct pci_dev *pdev; 5120 unsigned long bar0map_len, bar2map_len; 5121 int i, hbq_count; 5122 void *ptr; 5123 int error = -ENODEV; 5124 5125 /* Obtain PCI device reference */ 5126 if (!phba->pcidev) 5127 return error; 5128 else 5129 pdev = phba->pcidev; 5130 5131 /* Set the device DMA mask size */ 5132 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5133 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5134 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5135 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5136 return error; 5137 } 5138 } 5139 5140 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5141 * required by each mapping. 5142 */ 5143 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5144 bar0map_len = pci_resource_len(pdev, 0); 5145 5146 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5147 bar2map_len = pci_resource_len(pdev, 2); 5148 5149 /* Map HBA SLIM to a kernel virtual address. */ 5150 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5151 if (!phba->slim_memmap_p) { 5152 dev_printk(KERN_ERR, &pdev->dev, 5153 "ioremap failed for SLIM memory.\n"); 5154 goto out; 5155 } 5156 5157 /* Map HBA Control Registers to a kernel virtual address. */ 5158 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5159 if (!phba->ctrl_regs_memmap_p) { 5160 dev_printk(KERN_ERR, &pdev->dev, 5161 "ioremap failed for HBA control registers.\n"); 5162 goto out_iounmap_slim; 5163 } 5164 5165 /* Allocate memory for SLI-2 structures */ 5166 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5167 SLI2_SLIM_SIZE, 5168 &phba->slim2p.phys, 5169 GFP_KERNEL); 5170 if (!phba->slim2p.virt) 5171 goto out_iounmap; 5172 5173 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5174 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5175 phba->mbox_ext = (phba->slim2p.virt + 5176 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5177 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5178 phba->IOCBs = (phba->slim2p.virt + 5179 offsetof(struct lpfc_sli2_slim, IOCBs)); 5180 5181 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5182 lpfc_sli_hbq_size(), 5183 &phba->hbqslimp.phys, 5184 GFP_KERNEL); 5185 if (!phba->hbqslimp.virt) 5186 goto out_free_slim; 5187 5188 hbq_count = lpfc_sli_hbq_count(); 5189 ptr = phba->hbqslimp.virt; 5190 for (i = 0; i < hbq_count; ++i) { 5191 phba->hbqs[i].hbq_virt = ptr; 5192 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5193 ptr += (lpfc_hbq_defs[i]->entry_count * 5194 sizeof(struct lpfc_hbq_entry)); 5195 } 5196 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5197 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5198 5199 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5200 5201 INIT_LIST_HEAD(&phba->rb_pend_list); 5202 5203 phba->MBslimaddr = phba->slim_memmap_p; 5204 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5205 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5206 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5207 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5208 5209 return 0; 5210 5211out_free_slim: 5212 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5213 phba->slim2p.virt, phba->slim2p.phys); 5214out_iounmap: 5215 iounmap(phba->ctrl_regs_memmap_p); 5216out_iounmap_slim: 5217 iounmap(phba->slim_memmap_p); 5218out: 5219 return error; 5220} 5221 5222/** 5223 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5224 * @phba: pointer to lpfc hba data structure. 5225 * 5226 * This routine is invoked to unset the PCI device memory space for device 5227 * with SLI-3 interface spec. 5228 **/ 5229static void 5230lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5231{ 5232 struct pci_dev *pdev; 5233 5234 /* Obtain PCI device reference */ 5235 if (!phba->pcidev) 5236 return; 5237 else 5238 pdev = phba->pcidev; 5239 5240 /* Free coherent DMA memory allocated */ 5241 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5242 phba->hbqslimp.virt, phba->hbqslimp.phys); 5243 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5244 phba->slim2p.virt, phba->slim2p.phys); 5245 5246 /* I/O memory unmap */ 5247 iounmap(phba->ctrl_regs_memmap_p); 5248 iounmap(phba->slim_memmap_p); 5249 5250 return; 5251} 5252 5253/** 5254 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5255 * @phba: pointer to lpfc hba data structure. 5256 * 5257 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5258 * done and check status. 5259 * 5260 * Return 0 if successful, otherwise -ENODEV. 5261 **/ 5262int 5263lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5264{ 5265 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; 5266 int i, port_error = -ENODEV; 5267 5268 if (!phba->sli4_hba.STAregaddr) 5269 return -ENODEV; 5270 5271 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5272 for (i = 0; i < 3000; i++) { 5273 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 5274 /* Encounter fatal POST error, break out */ 5275 if (bf_get(lpfc_hst_state_perr, &sta_reg)) { 5276 port_error = -ENODEV; 5277 break; 5278 } 5279 if (LPFC_POST_STAGE_ARMFW_READY == 5280 bf_get(lpfc_hst_state_port_status, &sta_reg)) { 5281 port_error = 0; 5282 break; 5283 } 5284 msleep(10); 5285 } 5286 5287 if (port_error) 5288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5289 "1408 Failure HBA POST Status: sta_reg=0x%x, " 5290 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " 5291 "dl=x%x, pstatus=x%x\n", sta_reg.word0, 5292 bf_get(lpfc_hst_state_perr, &sta_reg), 5293 bf_get(lpfc_hst_state_sfi, &sta_reg), 5294 bf_get(lpfc_hst_state_nip, &sta_reg), 5295 bf_get(lpfc_hst_state_ipc, &sta_reg), 5296 bf_get(lpfc_hst_state_xrom, &sta_reg), 5297 bf_get(lpfc_hst_state_dl, &sta_reg), 5298 bf_get(lpfc_hst_state_port_status, &sta_reg)); 5299 5300 /* Log device information */ 5301 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); 5302 if (bf_get(lpfc_sli_intf_valid, 5303 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { 5304 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5305 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " 5306 "FeatureL1=0x%x, FeatureL2=0x%x\n", 5307 bf_get(lpfc_sli_intf_sli_family, 5308 &phba->sli4_hba.sli_intf), 5309 bf_get(lpfc_sli_intf_slirev, 5310 &phba->sli4_hba.sli_intf), 5311 bf_get(lpfc_sli_intf_featurelevel1, 5312 &phba->sli4_hba.sli_intf), 5313 bf_get(lpfc_sli_intf_featurelevel2, 5314 &phba->sli4_hba.sli_intf)); 5315 } 5316 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); 5317 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); 5318 /* With uncoverable error, log the error message and return error */ 5319 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 5320 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 5321 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5322 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5324 "1422 HBA Unrecoverable error: " 5325 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 5326 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 5327 uerrlo_reg.word0, uerrhi_reg.word0, 5328 phba->sli4_hba.ue_mask_lo, 5329 phba->sli4_hba.ue_mask_hi); 5330 return -ENODEV; 5331 } 5332 5333 return port_error; 5334} 5335 5336/** 5337 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5338 * @phba: pointer to lpfc hba data structure. 5339 * 5340 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5341 * memory map. 5342 **/ 5343static void 5344lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) 5345{ 5346 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5347 LPFC_UERR_STATUS_LO; 5348 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5349 LPFC_UERR_STATUS_HI; 5350 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + 5351 LPFC_UE_MASK_LO; 5352 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 5353 LPFC_UE_MASK_HI; 5354 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + 5355 LPFC_SLI_INTF; 5356} 5357 5358/** 5359 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5360 * @phba: pointer to lpfc hba data structure. 5361 * 5362 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5363 * memory map. 5364 **/ 5365static void 5366lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5367{ 5368 5369 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5370 LPFC_HST_STATE; 5371 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5372 LPFC_HST_ISR0; 5373 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5374 LPFC_HST_IMR0; 5375 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5376 LPFC_HST_ISCR0; 5377 return; 5378} 5379 5380/** 5381 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5382 * @phba: pointer to lpfc hba data structure. 5383 * @vf: virtual function number 5384 * 5385 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5386 * based on the given viftual function number, @vf. 5387 * 5388 * Return 0 if successful, otherwise -ENODEV. 5389 **/ 5390static int 5391lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5392{ 5393 if (vf > LPFC_VIR_FUNC_MAX) 5394 return -ENODEV; 5395 5396 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5397 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5398 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5399 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5400 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5401 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5402 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5403 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5404 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5405 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5406 return 0; 5407} 5408 5409/** 5410 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5411 * @phba: pointer to lpfc hba data structure. 5412 * 5413 * This routine is invoked to create the bootstrap mailbox 5414 * region consistent with the SLI-4 interface spec. This 5415 * routine allocates all memory necessary to communicate 5416 * mailbox commands to the port and sets up all alignment 5417 * needs. No locks are expected to be held when calling 5418 * this routine. 5419 * 5420 * Return codes 5421 * 0 - successful 5422 * ENOMEM - could not allocated memory. 5423 **/ 5424static int 5425lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5426{ 5427 uint32_t bmbx_size; 5428 struct lpfc_dmabuf *dmabuf; 5429 struct dma_address *dma_address; 5430 uint32_t pa_addr; 5431 uint64_t phys_addr; 5432 5433 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5434 if (!dmabuf) 5435 return -ENOMEM; 5436 5437 /* 5438 * The bootstrap mailbox region is comprised of 2 parts 5439 * plus an alignment restriction of 16 bytes. 5440 */ 5441 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5442 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5443 bmbx_size, 5444 &dmabuf->phys, 5445 GFP_KERNEL); 5446 if (!dmabuf->virt) { 5447 kfree(dmabuf); 5448 return -ENOMEM; 5449 } 5450 memset(dmabuf->virt, 0, bmbx_size); 5451 5452 /* 5453 * Initialize the bootstrap mailbox pointers now so that the register 5454 * operations are simple later. The mailbox dma address is required 5455 * to be 16-byte aligned. Also align the virtual memory as each 5456 * maibox is copied into the bmbx mailbox region before issuing the 5457 * command to the port. 5458 */ 5459 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5460 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5461 5462 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5463 LPFC_ALIGN_16_BYTE); 5464 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5465 LPFC_ALIGN_16_BYTE); 5466 5467 /* 5468 * Set the high and low physical addresses now. The SLI4 alignment 5469 * requirement is 16 bytes and the mailbox is posted to the port 5470 * as two 30-bit addresses. The other data is a bit marking whether 5471 * the 30-bit address is the high or low address. 5472 * Upcast bmbx aphys to 64bits so shift instruction compiles 5473 * clean on 32 bit machines. 5474 */ 5475 dma_address = &phba->sli4_hba.bmbx.dma_address; 5476 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5477 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5478 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5479 LPFC_BMBX_BIT1_ADDR_HI); 5480 5481 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5482 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5483 LPFC_BMBX_BIT1_ADDR_LO); 5484 return 0; 5485} 5486 5487/** 5488 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5489 * @phba: pointer to lpfc hba data structure. 5490 * 5491 * This routine is invoked to teardown the bootstrap mailbox 5492 * region and release all host resources. This routine requires 5493 * the caller to ensure all mailbox commands recovered, no 5494 * additional mailbox comands are sent, and interrupts are disabled 5495 * before calling this routine. 5496 * 5497 **/ 5498static void 5499lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5500{ 5501 dma_free_coherent(&phba->pcidev->dev, 5502 phba->sli4_hba.bmbx.bmbx_size, 5503 phba->sli4_hba.bmbx.dmabuf->virt, 5504 phba->sli4_hba.bmbx.dmabuf->phys); 5505 5506 kfree(phba->sli4_hba.bmbx.dmabuf); 5507 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5508} 5509 5510/** 5511 * lpfc_sli4_read_config - Get the config parameters. 5512 * @phba: pointer to lpfc hba data structure. 5513 * 5514 * This routine is invoked to read the configuration parameters from the HBA. 5515 * The configuration parameters are used to set the base and maximum values 5516 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5517 * allocation for the port. 5518 * 5519 * Return codes 5520 * 0 - successful 5521 * ENOMEM - No availble memory 5522 * EIO - The mailbox failed to complete successfully. 5523 **/ 5524static int 5525lpfc_sli4_read_config(struct lpfc_hba *phba) 5526{ 5527 LPFC_MBOXQ_t *pmb; 5528 struct lpfc_mbx_read_config *rd_config; 5529 uint32_t rc = 0; 5530 5531 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5532 if (!pmb) { 5533 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5534 "2011 Unable to allocate memory for issuing " 5535 "SLI_CONFIG_SPECIAL mailbox command\n"); 5536 return -ENOMEM; 5537 } 5538 5539 lpfc_read_config(phba, pmb); 5540 5541 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5542 if (rc != MBX_SUCCESS) { 5543 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5544 "2012 Mailbox failed , mbxCmd x%x " 5545 "READ_CONFIG, mbxStatus x%x\n", 5546 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5547 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5548 rc = -EIO; 5549 } else { 5550 rd_config = &pmb->u.mqe.un.rd_config; 5551 phba->sli4_hba.max_cfg_param.max_xri = 5552 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5553 phba->sli4_hba.max_cfg_param.xri_base = 5554 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5555 phba->sli4_hba.max_cfg_param.max_vpi = 5556 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5557 phba->sli4_hba.max_cfg_param.vpi_base = 5558 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5559 phba->sli4_hba.max_cfg_param.max_rpi = 5560 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5561 phba->sli4_hba.max_cfg_param.rpi_base = 5562 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5563 phba->sli4_hba.max_cfg_param.max_vfi = 5564 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5565 phba->sli4_hba.max_cfg_param.vfi_base = 5566 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5567 phba->sli4_hba.max_cfg_param.max_fcfi = 5568 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5569 phba->sli4_hba.max_cfg_param.fcfi_base = 5570 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5571 phba->sli4_hba.max_cfg_param.max_eq = 5572 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5573 phba->sli4_hba.max_cfg_param.max_rq = 5574 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5575 phba->sli4_hba.max_cfg_param.max_wq = 5576 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5577 phba->sli4_hba.max_cfg_param.max_cq = 5578 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5579 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5580 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5581 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5582 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5583 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5584 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5585 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5586 phba->max_vports = phba->max_vpi; 5587 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5588 "2003 cfg params XRI(B:%d M:%d), " 5589 "VPI(B:%d M:%d) " 5590 "VFI(B:%d M:%d) " 5591 "RPI(B:%d M:%d) " 5592 "FCFI(B:%d M:%d)\n", 5593 phba->sli4_hba.max_cfg_param.xri_base, 5594 phba->sli4_hba.max_cfg_param.max_xri, 5595 phba->sli4_hba.max_cfg_param.vpi_base, 5596 phba->sli4_hba.max_cfg_param.max_vpi, 5597 phba->sli4_hba.max_cfg_param.vfi_base, 5598 phba->sli4_hba.max_cfg_param.max_vfi, 5599 phba->sli4_hba.max_cfg_param.rpi_base, 5600 phba->sli4_hba.max_cfg_param.max_rpi, 5601 phba->sli4_hba.max_cfg_param.fcfi_base, 5602 phba->sli4_hba.max_cfg_param.max_fcfi); 5603 } 5604 mempool_free(pmb, phba->mbox_mem_pool); 5605 5606 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5607 if (phba->cfg_hba_queue_depth > 5608 (phba->sli4_hba.max_cfg_param.max_xri - 5609 lpfc_sli4_get_els_iocb_cnt(phba))) 5610 phba->cfg_hba_queue_depth = 5611 phba->sli4_hba.max_cfg_param.max_xri - 5612 lpfc_sli4_get_els_iocb_cnt(phba); 5613 return rc; 5614} 5615 5616/** 5617 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. 5618 * @phba: pointer to lpfc hba data structure. 5619 * 5620 * This routine is invoked to setup the host-side endian order to the 5621 * HBA consistent with the SLI-4 interface spec. 5622 * 5623 * Return codes 5624 * 0 - successful 5625 * ENOMEM - No availble memory 5626 * EIO - The mailbox failed to complete successfully. 5627 **/ 5628static int 5629lpfc_setup_endian_order(struct lpfc_hba *phba) 5630{ 5631 LPFC_MBOXQ_t *mboxq; 5632 uint32_t rc = 0; 5633 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5634 HOST_ENDIAN_HIGH_WORD1}; 5635 5636 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5637 if (!mboxq) { 5638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5639 "0492 Unable to allocate memory for issuing " 5640 "SLI_CONFIG_SPECIAL mailbox command\n"); 5641 return -ENOMEM; 5642 } 5643 5644 /* 5645 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two 5646 * words to contain special data values and no other data. 5647 */ 5648 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5649 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5650 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5651 if (rc != MBX_SUCCESS) { 5652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5653 "0493 SLI_CONFIG_SPECIAL mailbox failed with " 5654 "status x%x\n", 5655 rc); 5656 rc = -EIO; 5657 } 5658 5659 mempool_free(mboxq, phba->mbox_mem_pool); 5660 return rc; 5661} 5662 5663/** 5664 * lpfc_sli4_queue_create - Create all the SLI4 queues 5665 * @phba: pointer to lpfc hba data structure. 5666 * 5667 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5668 * operation. For each SLI4 queue type, the parameters such as queue entry 5669 * count (queue depth) shall be taken from the module parameter. For now, 5670 * we just use some constant number as place holder. 5671 * 5672 * Return codes 5673 * 0 - successful 5674 * ENOMEM - No availble memory 5675 * EIO - The mailbox failed to complete successfully. 5676 **/ 5677static int 5678lpfc_sli4_queue_create(struct lpfc_hba *phba) 5679{ 5680 struct lpfc_queue *qdesc; 5681 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5682 int cfg_fcp_wq_count; 5683 int cfg_fcp_eq_count; 5684 5685 /* 5686 * Sanity check for confiugred queue parameters against the run-time 5687 * device parameters 5688 */ 5689 5690 /* Sanity check on FCP fast-path WQ parameters */ 5691 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5692 if (cfg_fcp_wq_count > 5693 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5694 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5695 LPFC_SP_WQN_DEF; 5696 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5698 "2581 Not enough WQs (%d) from " 5699 "the pci function for supporting " 5700 "FCP WQs (%d)\n", 5701 phba->sli4_hba.max_cfg_param.max_wq, 5702 phba->cfg_fcp_wq_count); 5703 goto out_error; 5704 } 5705 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5706 "2582 Not enough WQs (%d) from the pci " 5707 "function for supporting the requested " 5708 "FCP WQs (%d), the actual FCP WQs can " 5709 "be supported: %d\n", 5710 phba->sli4_hba.max_cfg_param.max_wq, 5711 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5712 } 5713 /* The actual number of FCP work queues adopted */ 5714 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5715 5716 /* Sanity check on FCP fast-path EQ parameters */ 5717 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5718 if (cfg_fcp_eq_count > 5719 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5720 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5721 LPFC_SP_EQN_DEF; 5722 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5724 "2574 Not enough EQs (%d) from the " 5725 "pci function for supporting FCP " 5726 "EQs (%d)\n", 5727 phba->sli4_hba.max_cfg_param.max_eq, 5728 phba->cfg_fcp_eq_count); 5729 goto out_error; 5730 } 5731 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5732 "2575 Not enough EQs (%d) from the pci " 5733 "function for supporting the requested " 5734 "FCP EQs (%d), the actual FCP EQs can " 5735 "be supported: %d\n", 5736 phba->sli4_hba.max_cfg_param.max_eq, 5737 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5738 } 5739 /* It does not make sense to have more EQs than WQs */ 5740 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5741 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5742 "2593 The FCP EQ count(%d) cannot be greater " 5743 "than the FCP WQ count(%d), limiting the " 5744 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5745 phba->cfg_fcp_wq_count, 5746 phba->cfg_fcp_wq_count); 5747 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5748 } 5749 /* The actual number of FCP event queues adopted */ 5750 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5751 /* The overall number of event queues used */ 5752 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5753 5754 /* 5755 * Create Event Queues (EQs) 5756 */ 5757 5758 /* Get EQ depth from module parameter, fake the default for now */ 5759 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5760 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5761 5762 /* Create slow path event queue */ 5763 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5764 phba->sli4_hba.eq_ecount); 5765 if (!qdesc) { 5766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5767 "0496 Failed allocate slow-path EQ\n"); 5768 goto out_error; 5769 } 5770 phba->sli4_hba.sp_eq = qdesc; 5771 5772 /* Create fast-path FCP Event Queue(s) */ 5773 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5774 phba->cfg_fcp_eq_count), GFP_KERNEL); 5775 if (!phba->sli4_hba.fp_eq) { 5776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5777 "2576 Failed allocate memory for fast-path " 5778 "EQ record array\n"); 5779 goto out_free_sp_eq; 5780 } 5781 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5782 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5783 phba->sli4_hba.eq_ecount); 5784 if (!qdesc) { 5785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5786 "0497 Failed allocate fast-path EQ\n"); 5787 goto out_free_fp_eq; 5788 } 5789 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5790 } 5791 5792 /* 5793 * Create Complete Queues (CQs) 5794 */ 5795 5796 /* Get CQ depth from module parameter, fake the default for now */ 5797 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 5798 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 5799 5800 /* Create slow-path Mailbox Command Complete Queue */ 5801 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5802 phba->sli4_hba.cq_ecount); 5803 if (!qdesc) { 5804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5805 "0500 Failed allocate slow-path mailbox CQ\n"); 5806 goto out_free_fp_eq; 5807 } 5808 phba->sli4_hba.mbx_cq = qdesc; 5809 5810 /* Create slow-path ELS Complete Queue */ 5811 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5812 phba->sli4_hba.cq_ecount); 5813 if (!qdesc) { 5814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5815 "0501 Failed allocate slow-path ELS CQ\n"); 5816 goto out_free_mbx_cq; 5817 } 5818 phba->sli4_hba.els_cq = qdesc; 5819 5820 5821 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5822 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5823 phba->cfg_fcp_eq_count), GFP_KERNEL); 5824 if (!phba->sli4_hba.fcp_cq) { 5825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5826 "2577 Failed allocate memory for fast-path " 5827 "CQ record array\n"); 5828 goto out_free_els_cq; 5829 } 5830 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5831 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5832 phba->sli4_hba.cq_ecount); 5833 if (!qdesc) { 5834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5835 "0499 Failed allocate fast-path FCP " 5836 "CQ (%d)\n", fcp_cqidx); 5837 goto out_free_fcp_cq; 5838 } 5839 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 5840 } 5841 5842 /* Create Mailbox Command Queue */ 5843 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 5844 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 5845 5846 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 5847 phba->sli4_hba.mq_ecount); 5848 if (!qdesc) { 5849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5850 "0505 Failed allocate slow-path MQ\n"); 5851 goto out_free_fcp_cq; 5852 } 5853 phba->sli4_hba.mbx_wq = qdesc; 5854 5855 /* 5856 * Create all the Work Queues (WQs) 5857 */ 5858 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 5859 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 5860 5861 /* Create slow-path ELS Work Queue */ 5862 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5863 phba->sli4_hba.wq_ecount); 5864 if (!qdesc) { 5865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5866 "0504 Failed allocate slow-path ELS WQ\n"); 5867 goto out_free_mbx_wq; 5868 } 5869 phba->sli4_hba.els_wq = qdesc; 5870 5871 /* Create fast-path FCP Work Queue(s) */ 5872 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 5873 phba->cfg_fcp_wq_count), GFP_KERNEL); 5874 if (!phba->sli4_hba.fcp_wq) { 5875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5876 "2578 Failed allocate memory for fast-path " 5877 "WQ record array\n"); 5878 goto out_free_els_wq; 5879 } 5880 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 5881 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 5882 phba->sli4_hba.wq_ecount); 5883 if (!qdesc) { 5884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5885 "0503 Failed allocate fast-path FCP " 5886 "WQ (%d)\n", fcp_wqidx); 5887 goto out_free_fcp_wq; 5888 } 5889 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 5890 } 5891 5892 /* 5893 * Create Receive Queue (RQ) 5894 */ 5895 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 5896 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 5897 5898 /* Create Receive Queue for header */ 5899 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5900 phba->sli4_hba.rq_ecount); 5901 if (!qdesc) { 5902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5903 "0506 Failed allocate receive HRQ\n"); 5904 goto out_free_fcp_wq; 5905 } 5906 phba->sli4_hba.hdr_rq = qdesc; 5907 5908 /* Create Receive Queue for data */ 5909 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 5910 phba->sli4_hba.rq_ecount); 5911 if (!qdesc) { 5912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5913 "0507 Failed allocate receive DRQ\n"); 5914 goto out_free_hdr_rq; 5915 } 5916 phba->sli4_hba.dat_rq = qdesc; 5917 5918 return 0; 5919 5920out_free_hdr_rq: 5921 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5922 phba->sli4_hba.hdr_rq = NULL; 5923out_free_fcp_wq: 5924 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 5925 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 5926 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 5927 } 5928 kfree(phba->sli4_hba.fcp_wq); 5929out_free_els_wq: 5930 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5931 phba->sli4_hba.els_wq = NULL; 5932out_free_mbx_wq: 5933 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5934 phba->sli4_hba.mbx_wq = NULL; 5935out_free_fcp_cq: 5936 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 5937 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 5938 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5939 } 5940 kfree(phba->sli4_hba.fcp_cq); 5941out_free_els_cq: 5942 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5943 phba->sli4_hba.els_cq = NULL; 5944out_free_mbx_cq: 5945 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 5946 phba->sli4_hba.mbx_cq = NULL; 5947out_free_fp_eq: 5948 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 5949 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 5950 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 5951 } 5952 kfree(phba->sli4_hba.fp_eq); 5953out_free_sp_eq: 5954 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 5955 phba->sli4_hba.sp_eq = NULL; 5956out_error: 5957 return -ENOMEM; 5958} 5959 5960/** 5961 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 5962 * @phba: pointer to lpfc hba data structure. 5963 * 5964 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 5965 * operation. 5966 * 5967 * Return codes 5968 * 0 - successful 5969 * ENOMEM - No availble memory 5970 * EIO - The mailbox failed to complete successfully. 5971 **/ 5972static void 5973lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 5974{ 5975 int fcp_qidx; 5976 5977 /* Release mailbox command work queue */ 5978 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 5979 phba->sli4_hba.mbx_wq = NULL; 5980 5981 /* Release ELS work queue */ 5982 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 5983 phba->sli4_hba.els_wq = NULL; 5984 5985 /* Release FCP work queue */ 5986 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 5987 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 5988 kfree(phba->sli4_hba.fcp_wq); 5989 phba->sli4_hba.fcp_wq = NULL; 5990 5991 /* Release unsolicited receive queue */ 5992 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 5993 phba->sli4_hba.hdr_rq = NULL; 5994 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5995 phba->sli4_hba.dat_rq = NULL; 5996 5997 /* Release ELS complete queue */ 5998 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5999 phba->sli4_hba.els_cq = NULL; 6000 6001 /* Release mailbox command complete queue */ 6002 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6003 phba->sli4_hba.mbx_cq = NULL; 6004 6005 /* Release FCP response complete queue */ 6006 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6007 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6008 kfree(phba->sli4_hba.fcp_cq); 6009 phba->sli4_hba.fcp_cq = NULL; 6010 6011 /* Release fast-path event queue */ 6012 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6013 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6014 kfree(phba->sli4_hba.fp_eq); 6015 phba->sli4_hba.fp_eq = NULL; 6016 6017 /* Release slow-path event queue */ 6018 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6019 phba->sli4_hba.sp_eq = NULL; 6020 6021 return; 6022} 6023 6024/** 6025 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6026 * @phba: pointer to lpfc hba data structure. 6027 * 6028 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6029 * operation. 6030 * 6031 * Return codes 6032 * 0 - successful 6033 * ENOMEM - No availble memory 6034 * EIO - The mailbox failed to complete successfully. 6035 **/ 6036int 6037lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6038{ 6039 int rc = -ENOMEM; 6040 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6041 int fcp_cq_index = 0; 6042 6043 /* 6044 * Set up Event Queues (EQs) 6045 */ 6046 6047 /* Set up slow-path event queue */ 6048 if (!phba->sli4_hba.sp_eq) { 6049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6050 "0520 Slow-path EQ not allocated\n"); 6051 goto out_error; 6052 } 6053 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6054 LPFC_SP_DEF_IMAX); 6055 if (rc) { 6056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6057 "0521 Failed setup of slow-path EQ: " 6058 "rc = 0x%x\n", rc); 6059 goto out_error; 6060 } 6061 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6062 "2583 Slow-path EQ setup: queue-id=%d\n", 6063 phba->sli4_hba.sp_eq->queue_id); 6064 6065 /* Set up fast-path event queue */ 6066 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6067 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6069 "0522 Fast-path EQ (%d) not " 6070 "allocated\n", fcp_eqidx); 6071 goto out_destroy_fp_eq; 6072 } 6073 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6074 phba->cfg_fcp_imax); 6075 if (rc) { 6076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6077 "0523 Failed setup of fast-path EQ " 6078 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6079 goto out_destroy_fp_eq; 6080 } 6081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6082 "2584 Fast-path EQ setup: " 6083 "queue[%d]-id=%d\n", fcp_eqidx, 6084 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6085 } 6086 6087 /* 6088 * Set up Complete Queues (CQs) 6089 */ 6090 6091 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6092 if (!phba->sli4_hba.mbx_cq) { 6093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6094 "0528 Mailbox CQ not allocated\n"); 6095 goto out_destroy_fp_eq; 6096 } 6097 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6098 LPFC_MCQ, LPFC_MBOX); 6099 if (rc) { 6100 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6101 "0529 Failed setup of slow-path mailbox CQ: " 6102 "rc = 0x%x\n", rc); 6103 goto out_destroy_fp_eq; 6104 } 6105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6106 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6107 phba->sli4_hba.mbx_cq->queue_id, 6108 phba->sli4_hba.sp_eq->queue_id); 6109 6110 /* Set up slow-path ELS Complete Queue */ 6111 if (!phba->sli4_hba.els_cq) { 6112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6113 "0530 ELS CQ not allocated\n"); 6114 goto out_destroy_mbx_cq; 6115 } 6116 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6117 LPFC_WCQ, LPFC_ELS); 6118 if (rc) { 6119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6120 "0531 Failed setup of slow-path ELS CQ: " 6121 "rc = 0x%x\n", rc); 6122 goto out_destroy_mbx_cq; 6123 } 6124 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6125 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6126 phba->sli4_hba.els_cq->queue_id, 6127 phba->sli4_hba.sp_eq->queue_id); 6128 6129 /* Set up fast-path FCP Response Complete Queue */ 6130 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6131 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6133 "0526 Fast-path FCP CQ (%d) not " 6134 "allocated\n", fcp_cqidx); 6135 goto out_destroy_fcp_cq; 6136 } 6137 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6138 phba->sli4_hba.fp_eq[fcp_cqidx], 6139 LPFC_WCQ, LPFC_FCP); 6140 if (rc) { 6141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6142 "0527 Failed setup of fast-path FCP " 6143 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6144 goto out_destroy_fcp_cq; 6145 } 6146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6147 "2588 FCP CQ setup: cq[%d]-id=%d, " 6148 "parent eq[%d]-id=%d\n", 6149 fcp_cqidx, 6150 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6151 fcp_cqidx, 6152 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6153 } 6154 6155 /* 6156 * Set up all the Work Queues (WQs) 6157 */ 6158 6159 /* Set up Mailbox Command Queue */ 6160 if (!phba->sli4_hba.mbx_wq) { 6161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6162 "0538 Slow-path MQ not allocated\n"); 6163 goto out_destroy_fcp_cq; 6164 } 6165 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6166 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6167 if (rc) { 6168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6169 "0539 Failed setup of slow-path MQ: " 6170 "rc = 0x%x\n", rc); 6171 goto out_destroy_fcp_cq; 6172 } 6173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6174 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6175 phba->sli4_hba.mbx_wq->queue_id, 6176 phba->sli4_hba.mbx_cq->queue_id); 6177 6178 /* Set up slow-path ELS Work Queue */ 6179 if (!phba->sli4_hba.els_wq) { 6180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6181 "0536 Slow-path ELS WQ not allocated\n"); 6182 goto out_destroy_mbx_wq; 6183 } 6184 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6185 phba->sli4_hba.els_cq, LPFC_ELS); 6186 if (rc) { 6187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6188 "0537 Failed setup of slow-path ELS WQ: " 6189 "rc = 0x%x\n", rc); 6190 goto out_destroy_mbx_wq; 6191 } 6192 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6193 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6194 phba->sli4_hba.els_wq->queue_id, 6195 phba->sli4_hba.els_cq->queue_id); 6196 6197 /* Set up fast-path FCP Work Queue */ 6198 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6199 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6201 "0534 Fast-path FCP WQ (%d) not " 6202 "allocated\n", fcp_wqidx); 6203 goto out_destroy_fcp_wq; 6204 } 6205 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6206 phba->sli4_hba.fcp_cq[fcp_cq_index], 6207 LPFC_FCP); 6208 if (rc) { 6209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6210 "0535 Failed setup of fast-path FCP " 6211 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6212 goto out_destroy_fcp_wq; 6213 } 6214 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6215 "2591 FCP WQ setup: wq[%d]-id=%d, " 6216 "parent cq[%d]-id=%d\n", 6217 fcp_wqidx, 6218 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6219 fcp_cq_index, 6220 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6221 /* Round robin FCP Work Queue's Completion Queue assignment */ 6222 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6223 } 6224 6225 /* 6226 * Create Receive Queue (RQ) 6227 */ 6228 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6230 "0540 Receive Queue not allocated\n"); 6231 goto out_destroy_fcp_wq; 6232 } 6233 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6234 phba->sli4_hba.els_cq, LPFC_USOL); 6235 if (rc) { 6236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6237 "0541 Failed setup of Receive Queue: " 6238 "rc = 0x%x\n", rc); 6239 goto out_destroy_fcp_wq; 6240 } 6241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6242 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6243 "parent cq-id=%d\n", 6244 phba->sli4_hba.hdr_rq->queue_id, 6245 phba->sli4_hba.dat_rq->queue_id, 6246 phba->sli4_hba.els_cq->queue_id); 6247 return 0; 6248 6249out_destroy_fcp_wq: 6250 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6251 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6252 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6253out_destroy_mbx_wq: 6254 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6255out_destroy_fcp_cq: 6256 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6257 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6258 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6259out_destroy_mbx_cq: 6260 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6261out_destroy_fp_eq: 6262 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6263 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6264 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6265out_error: 6266 return rc; 6267} 6268 6269/** 6270 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6271 * @phba: pointer to lpfc hba data structure. 6272 * 6273 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6274 * operation. 6275 * 6276 * Return codes 6277 * 0 - successful 6278 * ENOMEM - No availble memory 6279 * EIO - The mailbox failed to complete successfully. 6280 **/ 6281void 6282lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6283{ 6284 int fcp_qidx; 6285 6286 /* Unset mailbox command work queue */ 6287 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6288 /* Unset ELS work queue */ 6289 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6290 /* Unset unsolicited receive queue */ 6291 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6292 /* Unset FCP work queue */ 6293 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6294 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6295 /* Unset mailbox command complete queue */ 6296 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6297 /* Unset ELS complete queue */ 6298 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6299 /* Unset FCP response complete queue */ 6300 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6301 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6302 /* Unset fast-path event queue */ 6303 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6304 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6305 /* Unset slow-path event queue */ 6306 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6307} 6308 6309/** 6310 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6311 * @phba: pointer to lpfc hba data structure. 6312 * 6313 * This routine is invoked to allocate and set up a pool of completion queue 6314 * events. The body of the completion queue event is a completion queue entry 6315 * CQE. For now, this pool is used for the interrupt service routine to queue 6316 * the following HBA completion queue events for the worker thread to process: 6317 * - Mailbox asynchronous events 6318 * - Receive queue completion unsolicited events 6319 * Later, this can be used for all the slow-path events. 6320 * 6321 * Return codes 6322 * 0 - successful 6323 * -ENOMEM - No availble memory 6324 **/ 6325static int 6326lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6327{ 6328 struct lpfc_cq_event *cq_event; 6329 int i; 6330 6331 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6332 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6333 if (!cq_event) 6334 goto out_pool_create_fail; 6335 list_add_tail(&cq_event->list, 6336 &phba->sli4_hba.sp_cqe_event_pool); 6337 } 6338 return 0; 6339 6340out_pool_create_fail: 6341 lpfc_sli4_cq_event_pool_destroy(phba); 6342 return -ENOMEM; 6343} 6344 6345/** 6346 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6347 * @phba: pointer to lpfc hba data structure. 6348 * 6349 * This routine is invoked to free the pool of completion queue events at 6350 * driver unload time. Note that, it is the responsibility of the driver 6351 * cleanup routine to free all the outstanding completion-queue events 6352 * allocated from this pool back into the pool before invoking this routine 6353 * to destroy the pool. 6354 **/ 6355static void 6356lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6357{ 6358 struct lpfc_cq_event *cq_event, *next_cq_event; 6359 6360 list_for_each_entry_safe(cq_event, next_cq_event, 6361 &phba->sli4_hba.sp_cqe_event_pool, list) { 6362 list_del(&cq_event->list); 6363 kfree(cq_event); 6364 } 6365} 6366 6367/** 6368 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6369 * @phba: pointer to lpfc hba data structure. 6370 * 6371 * This routine is the lock free version of the API invoked to allocate a 6372 * completion-queue event from the free pool. 6373 * 6374 * Return: Pointer to the newly allocated completion-queue event if successful 6375 * NULL otherwise. 6376 **/ 6377struct lpfc_cq_event * 6378__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6379{ 6380 struct lpfc_cq_event *cq_event = NULL; 6381 6382 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6383 struct lpfc_cq_event, list); 6384 return cq_event; 6385} 6386 6387/** 6388 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6389 * @phba: pointer to lpfc hba data structure. 6390 * 6391 * This routine is the lock version of the API invoked to allocate a 6392 * completion-queue event from the free pool. 6393 * 6394 * Return: Pointer to the newly allocated completion-queue event if successful 6395 * NULL otherwise. 6396 **/ 6397struct lpfc_cq_event * 6398lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6399{ 6400 struct lpfc_cq_event *cq_event; 6401 unsigned long iflags; 6402 6403 spin_lock_irqsave(&phba->hbalock, iflags); 6404 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6405 spin_unlock_irqrestore(&phba->hbalock, iflags); 6406 return cq_event; 6407} 6408 6409/** 6410 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6411 * @phba: pointer to lpfc hba data structure. 6412 * @cq_event: pointer to the completion queue event to be freed. 6413 * 6414 * This routine is the lock free version of the API invoked to release a 6415 * completion-queue event back into the free pool. 6416 **/ 6417void 6418__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6419 struct lpfc_cq_event *cq_event) 6420{ 6421 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6422} 6423 6424/** 6425 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6426 * @phba: pointer to lpfc hba data structure. 6427 * @cq_event: pointer to the completion queue event to be freed. 6428 * 6429 * This routine is the lock version of the API invoked to release a 6430 * completion-queue event back into the free pool. 6431 **/ 6432void 6433lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6434 struct lpfc_cq_event *cq_event) 6435{ 6436 unsigned long iflags; 6437 spin_lock_irqsave(&phba->hbalock, iflags); 6438 __lpfc_sli4_cq_event_release(phba, cq_event); 6439 spin_unlock_irqrestore(&phba->hbalock, iflags); 6440} 6441 6442/** 6443 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6444 * @phba: pointer to lpfc hba data structure. 6445 * 6446 * This routine is to free all the pending completion-queue events to the 6447 * back into the free pool for device reset. 6448 **/ 6449static void 6450lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6451{ 6452 LIST_HEAD(cqelist); 6453 struct lpfc_cq_event *cqe; 6454 unsigned long iflags; 6455 6456 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6457 spin_lock_irqsave(&phba->hbalock, iflags); 6458 /* Pending FCP XRI abort events */ 6459 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6460 &cqelist); 6461 /* Pending ELS XRI abort events */ 6462 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6463 &cqelist); 6464 /* Pending asynnc events */ 6465 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6466 &cqelist); 6467 spin_unlock_irqrestore(&phba->hbalock, iflags); 6468 6469 while (!list_empty(&cqelist)) { 6470 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6471 lpfc_sli4_cq_event_release(phba, cqe); 6472 } 6473} 6474 6475/** 6476 * lpfc_pci_function_reset - Reset pci function. 6477 * @phba: pointer to lpfc hba data structure. 6478 * 6479 * This routine is invoked to request a PCI function reset. It will destroys 6480 * all resources assigned to the PCI function which originates this request. 6481 * 6482 * Return codes 6483 * 0 - successful 6484 * ENOMEM - No availble memory 6485 * EIO - The mailbox failed to complete successfully. 6486 **/ 6487int 6488lpfc_pci_function_reset(struct lpfc_hba *phba) 6489{ 6490 LPFC_MBOXQ_t *mboxq; 6491 uint32_t rc = 0; 6492 uint32_t shdr_status, shdr_add_status; 6493 union lpfc_sli4_cfg_shdr *shdr; 6494 6495 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6496 if (!mboxq) { 6497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6498 "0494 Unable to allocate memory for issuing " 6499 "SLI_FUNCTION_RESET mailbox command\n"); 6500 return -ENOMEM; 6501 } 6502 6503 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ 6504 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6505 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6506 LPFC_SLI4_MBX_EMBED); 6507 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6508 shdr = (union lpfc_sli4_cfg_shdr *) 6509 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6510 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6511 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6512 if (rc != MBX_TIMEOUT) 6513 mempool_free(mboxq, phba->mbox_mem_pool); 6514 if (shdr_status || shdr_add_status || rc) { 6515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6516 "0495 SLI_FUNCTION_RESET mailbox failed with " 6517 "status x%x add_status x%x, mbx status x%x\n", 6518 shdr_status, shdr_add_status, rc); 6519 rc = -ENXIO; 6520 } 6521 return rc; 6522} 6523 6524/** 6525 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6526 * @phba: pointer to lpfc hba data structure. 6527 * @cnt: number of nop mailbox commands to send. 6528 * 6529 * This routine is invoked to send a number @cnt of NOP mailbox command and 6530 * wait for each command to complete. 6531 * 6532 * Return: the number of NOP mailbox command completed. 6533 **/ 6534static int 6535lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6536{ 6537 LPFC_MBOXQ_t *mboxq; 6538 int length, cmdsent; 6539 uint32_t mbox_tmo; 6540 uint32_t rc = 0; 6541 uint32_t shdr_status, shdr_add_status; 6542 union lpfc_sli4_cfg_shdr *shdr; 6543 6544 if (cnt == 0) { 6545 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6546 "2518 Requested to send 0 NOP mailbox cmd\n"); 6547 return cnt; 6548 } 6549 6550 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6551 if (!mboxq) { 6552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6553 "2519 Unable to allocate memory for issuing " 6554 "NOP mailbox command\n"); 6555 return 0; 6556 } 6557 6558 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6559 length = (sizeof(struct lpfc_mbx_nop) - 6560 sizeof(struct lpfc_sli4_cfg_mhdr)); 6561 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6562 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6563 6564 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6565 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6566 if (!phba->sli4_hba.intr_enable) 6567 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6568 else 6569 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6570 if (rc == MBX_TIMEOUT) 6571 break; 6572 /* Check return status */ 6573 shdr = (union lpfc_sli4_cfg_shdr *) 6574 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6575 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6576 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6577 &shdr->response); 6578 if (shdr_status || shdr_add_status || rc) { 6579 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6580 "2520 NOP mailbox command failed " 6581 "status x%x add_status x%x mbx " 6582 "status x%x\n", shdr_status, 6583 shdr_add_status, rc); 6584 break; 6585 } 6586 } 6587 6588 if (rc != MBX_TIMEOUT) 6589 mempool_free(mboxq, phba->mbox_mem_pool); 6590 6591 return cmdsent; 6592} 6593 6594/** 6595 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device 6596 * @phba: pointer to lpfc hba data structure. 6597 * @fcfi: fcf index. 6598 * 6599 * This routine is invoked to unregister a FCFI from device. 6600 **/ 6601void 6602lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) 6603{ 6604 LPFC_MBOXQ_t *mbox; 6605 uint32_t mbox_tmo; 6606 int rc; 6607 unsigned long flags; 6608 6609 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6610 6611 if (!mbox) 6612 return; 6613 6614 lpfc_unreg_fcfi(mbox, fcfi); 6615 6616 if (!phba->sli4_hba.intr_enable) 6617 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6618 else { 6619 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6620 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6621 } 6622 if (rc != MBX_TIMEOUT) 6623 mempool_free(mbox, phba->mbox_mem_pool); 6624 if (rc != MBX_SUCCESS) 6625 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6626 "2517 Unregister FCFI command failed " 6627 "status %d, mbxStatus x%x\n", rc, 6628 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6629 else { 6630 spin_lock_irqsave(&phba->hbalock, flags); 6631 /* Mark the FCFI is no longer registered */ 6632 phba->fcf.fcf_flag &= 6633 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); 6634 spin_unlock_irqrestore(&phba->hbalock, flags); 6635 } 6636} 6637 6638/** 6639 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6640 * @phba: pointer to lpfc hba data structure. 6641 * 6642 * This routine is invoked to set up the PCI device memory space for device 6643 * with SLI-4 interface spec. 6644 * 6645 * Return codes 6646 * 0 - successful 6647 * other values - error 6648 **/ 6649static int 6650lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6651{ 6652 struct pci_dev *pdev; 6653 unsigned long bar0map_len, bar1map_len, bar2map_len; 6654 int error = -ENODEV; 6655 6656 /* Obtain PCI device reference */ 6657 if (!phba->pcidev) 6658 return error; 6659 else 6660 pdev = phba->pcidev; 6661 6662 /* Set the device DMA mask size */ 6663 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6664 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6665 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6666 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6667 return error; 6668 } 6669 } 6670 6671 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6672 * number of bytes required by each mapping. They are actually 6673 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. 6674 */ 6675 if (pci_resource_start(pdev, 0)) { 6676 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6677 bar0map_len = pci_resource_len(pdev, 0); 6678 } else { 6679 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6680 bar0map_len = pci_resource_len(pdev, 1); 6681 } 6682 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6683 bar1map_len = pci_resource_len(pdev, 2); 6684 6685 phba->pci_bar2_map = pci_resource_start(pdev, 4); 6686 bar2map_len = pci_resource_len(pdev, 4); 6687 6688 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ 6689 phba->sli4_hba.conf_regs_memmap_p = 6690 ioremap(phba->pci_bar0_map, bar0map_len); 6691 if (!phba->sli4_hba.conf_regs_memmap_p) { 6692 dev_printk(KERN_ERR, &pdev->dev, 6693 "ioremap failed for SLI4 PCI config registers.\n"); 6694 goto out; 6695 } 6696 6697 /* Map SLI4 HBA Control Register base to a kernel virtual address. */ 6698 phba->sli4_hba.ctrl_regs_memmap_p = 6699 ioremap(phba->pci_bar1_map, bar1map_len); 6700 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6701 dev_printk(KERN_ERR, &pdev->dev, 6702 "ioremap failed for SLI4 HBA control registers.\n"); 6703 goto out_iounmap_conf; 6704 } 6705 6706 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ 6707 phba->sli4_hba.drbl_regs_memmap_p = 6708 ioremap(phba->pci_bar2_map, bar2map_len); 6709 if (!phba->sli4_hba.drbl_regs_memmap_p) { 6710 dev_printk(KERN_ERR, &pdev->dev, 6711 "ioremap failed for SLI4 HBA doorbell registers.\n"); 6712 goto out_iounmap_ctrl; 6713 } 6714 6715 /* Set up BAR0 PCI config space register memory map */ 6716 lpfc_sli4_bar0_register_memmap(phba); 6717 6718 /* Set up BAR1 register memory map */ 6719 lpfc_sli4_bar1_register_memmap(phba); 6720 6721 /* Set up BAR2 register memory map */ 6722 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 6723 if (error) 6724 goto out_iounmap_all; 6725 6726 return 0; 6727 6728out_iounmap_all: 6729 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6730out_iounmap_ctrl: 6731 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6732out_iounmap_conf: 6733 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6734out: 6735 return error; 6736} 6737 6738/** 6739 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 6740 * @phba: pointer to lpfc hba data structure. 6741 * 6742 * This routine is invoked to unset the PCI device memory space for device 6743 * with SLI-4 interface spec. 6744 **/ 6745static void 6746lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 6747{ 6748 struct pci_dev *pdev; 6749 6750 /* Obtain PCI device reference */ 6751 if (!phba->pcidev) 6752 return; 6753 else 6754 pdev = phba->pcidev; 6755 6756 /* Free coherent DMA memory allocated */ 6757 6758 /* Unmap I/O memory space */ 6759 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 6760 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 6761 iounmap(phba->sli4_hba.conf_regs_memmap_p); 6762 6763 return; 6764} 6765 6766/** 6767 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 6768 * @phba: pointer to lpfc hba data structure. 6769 * 6770 * This routine is invoked to enable the MSI-X interrupt vectors to device 6771 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 6772 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 6773 * invoked, enables either all or nothing, depending on the current 6774 * availability of PCI vector resources. The device driver is responsible 6775 * for calling the individual request_irq() to register each MSI-X vector 6776 * with a interrupt handler, which is done in this function. Note that 6777 * later when device is unloading, the driver should always call free_irq() 6778 * on all MSI-X vectors it has done request_irq() on before calling 6779 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6780 * will be left with MSI-X enabled and leaks its vectors. 6781 * 6782 * Return codes 6783 * 0 - successful 6784 * other values - error 6785 **/ 6786static int 6787lpfc_sli_enable_msix(struct lpfc_hba *phba) 6788{ 6789 int rc, i; 6790 LPFC_MBOXQ_t *pmb; 6791 6792 /* Set up MSI-X multi-message vectors */ 6793 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6794 phba->msix_entries[i].entry = i; 6795 6796 /* Configure MSI-X capability structure */ 6797 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 6798 ARRAY_SIZE(phba->msix_entries)); 6799 if (rc) { 6800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6801 "0420 PCI enable MSI-X failed (%d)\n", rc); 6802 goto msi_fail_out; 6803 } 6804 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6805 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6806 "0477 MSI-X entry[%d]: vector=x%x " 6807 "message=%d\n", i, 6808 phba->msix_entries[i].vector, 6809 phba->msix_entries[i].entry); 6810 /* 6811 * Assign MSI-X vectors to interrupt handlers 6812 */ 6813 6814 /* vector-0 is associated to slow-path handler */ 6815 rc = request_irq(phba->msix_entries[0].vector, 6816 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 6817 LPFC_SP_DRIVER_HANDLER_NAME, phba); 6818 if (rc) { 6819 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6820 "0421 MSI-X slow-path request_irq failed " 6821 "(%d)\n", rc); 6822 goto msi_fail_out; 6823 } 6824 6825 /* vector-1 is associated to fast-path handler */ 6826 rc = request_irq(phba->msix_entries[1].vector, 6827 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 6828 LPFC_FP_DRIVER_HANDLER_NAME, phba); 6829 6830 if (rc) { 6831 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6832 "0429 MSI-X fast-path request_irq failed " 6833 "(%d)\n", rc); 6834 goto irq_fail_out; 6835 } 6836 6837 /* 6838 * Configure HBA MSI-X attention conditions to messages 6839 */ 6840 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6841 6842 if (!pmb) { 6843 rc = -ENOMEM; 6844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6845 "0474 Unable to allocate memory for issuing " 6846 "MBOX_CONFIG_MSI command\n"); 6847 goto mem_fail_out; 6848 } 6849 rc = lpfc_config_msi(phba, pmb); 6850 if (rc) 6851 goto mbx_fail_out; 6852 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6853 if (rc != MBX_SUCCESS) { 6854 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6855 "0351 Config MSI mailbox command failed, " 6856 "mbxCmd x%x, mbxStatus x%x\n", 6857 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 6858 goto mbx_fail_out; 6859 } 6860 6861 /* Free memory allocated for mailbox command */ 6862 mempool_free(pmb, phba->mbox_mem_pool); 6863 return rc; 6864 6865mbx_fail_out: 6866 /* Free memory allocated for mailbox command */ 6867 mempool_free(pmb, phba->mbox_mem_pool); 6868 6869mem_fail_out: 6870 /* free the irq already requested */ 6871 free_irq(phba->msix_entries[1].vector, phba); 6872 6873irq_fail_out: 6874 /* free the irq already requested */ 6875 free_irq(phba->msix_entries[0].vector, phba); 6876 6877msi_fail_out: 6878 /* Unconfigure MSI-X capability structure */ 6879 pci_disable_msix(phba->pcidev); 6880 return rc; 6881} 6882 6883/** 6884 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 6885 * @phba: pointer to lpfc hba data structure. 6886 * 6887 * This routine is invoked to release the MSI-X vectors and then disable the 6888 * MSI-X interrupt mode to device with SLI-3 interface spec. 6889 **/ 6890static void 6891lpfc_sli_disable_msix(struct lpfc_hba *phba) 6892{ 6893 int i; 6894 6895 /* Free up MSI-X multi-message vectors */ 6896 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6897 free_irq(phba->msix_entries[i].vector, phba); 6898 /* Disable MSI-X */ 6899 pci_disable_msix(phba->pcidev); 6900 6901 return; 6902} 6903 6904/** 6905 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 6906 * @phba: pointer to lpfc hba data structure. 6907 * 6908 * This routine is invoked to enable the MSI interrupt mode to device with 6909 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 6910 * enable the MSI vector. The device driver is responsible for calling the 6911 * request_irq() to register MSI vector with a interrupt the handler, which 6912 * is done in this function. 6913 * 6914 * Return codes 6915 * 0 - successful 6916 * other values - error 6917 */ 6918static int 6919lpfc_sli_enable_msi(struct lpfc_hba *phba) 6920{ 6921 int rc; 6922 6923 rc = pci_enable_msi(phba->pcidev); 6924 if (!rc) 6925 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6926 "0462 PCI enable MSI mode success.\n"); 6927 else { 6928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6929 "0471 PCI enable MSI mode failed (%d)\n", rc); 6930 return rc; 6931 } 6932 6933 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 6934 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6935 if (rc) { 6936 pci_disable_msi(phba->pcidev); 6937 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6938 "0478 MSI request_irq failed (%d)\n", rc); 6939 } 6940 return rc; 6941} 6942 6943/** 6944 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 6945 * @phba: pointer to lpfc hba data structure. 6946 * 6947 * This routine is invoked to disable the MSI interrupt mode to device with 6948 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 6949 * done request_irq() on before calling pci_disable_msi(). Failure to do so 6950 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 6951 * its vector. 6952 */ 6953static void 6954lpfc_sli_disable_msi(struct lpfc_hba *phba) 6955{ 6956 free_irq(phba->pcidev->irq, phba); 6957 pci_disable_msi(phba->pcidev); 6958 return; 6959} 6960 6961/** 6962 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 6963 * @phba: pointer to lpfc hba data structure. 6964 * 6965 * This routine is invoked to enable device interrupt and associate driver's 6966 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 6967 * spec. Depends on the interrupt mode configured to the driver, the driver 6968 * will try to fallback from the configured interrupt mode to an interrupt 6969 * mode which is supported by the platform, kernel, and device in the order 6970 * of: 6971 * MSI-X -> MSI -> IRQ. 6972 * 6973 * Return codes 6974 * 0 - successful 6975 * other values - error 6976 **/ 6977static uint32_t 6978lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6979{ 6980 uint32_t intr_mode = LPFC_INTR_ERROR; 6981 int retval; 6982 6983 if (cfg_mode == 2) { 6984 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6985 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 6986 if (!retval) { 6987 /* Now, try to enable MSI-X interrupt mode */ 6988 retval = lpfc_sli_enable_msix(phba); 6989 if (!retval) { 6990 /* Indicate initialization to MSI-X mode */ 6991 phba->intr_type = MSIX; 6992 intr_mode = 2; 6993 } 6994 } 6995 } 6996 6997 /* Fallback to MSI if MSI-X initialization failed */ 6998 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6999 retval = lpfc_sli_enable_msi(phba); 7000 if (!retval) { 7001 /* Indicate initialization to MSI mode */ 7002 phba->intr_type = MSI; 7003 intr_mode = 1; 7004 } 7005 } 7006 7007 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7008 if (phba->intr_type == NONE) { 7009 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7010 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7011 if (!retval) { 7012 /* Indicate initialization to INTx mode */ 7013 phba->intr_type = INTx; 7014 intr_mode = 0; 7015 } 7016 } 7017 return intr_mode; 7018} 7019 7020/** 7021 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7022 * @phba: pointer to lpfc hba data structure. 7023 * 7024 * This routine is invoked to disable device interrupt and disassociate the 7025 * driver's interrupt handler(s) from interrupt vector(s) to device with 7026 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7027 * release the interrupt vector(s) for the message signaled interrupt. 7028 **/ 7029static void 7030lpfc_sli_disable_intr(struct lpfc_hba *phba) 7031{ 7032 /* Disable the currently initialized interrupt mode */ 7033 if (phba->intr_type == MSIX) 7034 lpfc_sli_disable_msix(phba); 7035 else if (phba->intr_type == MSI) 7036 lpfc_sli_disable_msi(phba); 7037 else if (phba->intr_type == INTx) 7038 free_irq(phba->pcidev->irq, phba); 7039 7040 /* Reset interrupt management states */ 7041 phba->intr_type = NONE; 7042 phba->sli.slistat.sli_intr = 0; 7043 7044 return; 7045} 7046 7047/** 7048 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7049 * @phba: pointer to lpfc hba data structure. 7050 * 7051 * This routine is invoked to enable the MSI-X interrupt vectors to device 7052 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7053 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7054 * enables either all or nothing, depending on the current availability of 7055 * PCI vector resources. The device driver is responsible for calling the 7056 * individual request_irq() to register each MSI-X vector with a interrupt 7057 * handler, which is done in this function. Note that later when device is 7058 * unloading, the driver should always call free_irq() on all MSI-X vectors 7059 * it has done request_irq() on before calling pci_disable_msix(). Failure 7060 * to do so results in a BUG_ON() and a device will be left with MSI-X 7061 * enabled and leaks its vectors. 7062 * 7063 * Return codes 7064 * 0 - successful 7065 * other values - error 7066 **/ 7067static int 7068lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7069{ 7070 int vectors, rc, index; 7071 7072 /* Set up MSI-X multi-message vectors */ 7073 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7074 phba->sli4_hba.msix_entries[index].entry = index; 7075 7076 /* Configure MSI-X capability structure */ 7077 vectors = phba->sli4_hba.cfg_eqn; 7078enable_msix_vectors: 7079 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7080 vectors); 7081 if (rc > 1) { 7082 vectors = rc; 7083 goto enable_msix_vectors; 7084 } else if (rc) { 7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7086 "0484 PCI enable MSI-X failed (%d)\n", rc); 7087 goto msi_fail_out; 7088 } 7089 7090 /* Log MSI-X vector assignment */ 7091 for (index = 0; index < vectors; index++) 7092 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7093 "0489 MSI-X entry[%d]: vector=x%x " 7094 "message=%d\n", index, 7095 phba->sli4_hba.msix_entries[index].vector, 7096 phba->sli4_hba.msix_entries[index].entry); 7097 /* 7098 * Assign MSI-X vectors to interrupt handlers 7099 */ 7100 7101 /* The first vector must associated to slow-path handler for MQ */ 7102 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7103 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7104 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7105 if (rc) { 7106 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7107 "0485 MSI-X slow-path request_irq failed " 7108 "(%d)\n", rc); 7109 goto msi_fail_out; 7110 } 7111 7112 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7113 for (index = 1; index < vectors; index++) { 7114 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7115 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7116 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7117 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7118 LPFC_FP_DRIVER_HANDLER_NAME, 7119 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7120 if (rc) { 7121 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7122 "0486 MSI-X fast-path (%d) " 7123 "request_irq failed (%d)\n", index, rc); 7124 goto cfg_fail_out; 7125 } 7126 } 7127 phba->sli4_hba.msix_vec_nr = vectors; 7128 7129 return rc; 7130 7131cfg_fail_out: 7132 /* free the irq already requested */ 7133 for (--index; index >= 1; index--) 7134 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7135 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7136 7137 /* free the irq already requested */ 7138 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7139 7140msi_fail_out: 7141 /* Unconfigure MSI-X capability structure */ 7142 pci_disable_msix(phba->pcidev); 7143 return rc; 7144} 7145 7146/** 7147 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7148 * @phba: pointer to lpfc hba data structure. 7149 * 7150 * This routine is invoked to release the MSI-X vectors and then disable the 7151 * MSI-X interrupt mode to device with SLI-4 interface spec. 7152 **/ 7153static void 7154lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7155{ 7156 int index; 7157 7158 /* Free up MSI-X multi-message vectors */ 7159 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7160 7161 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7162 free_irq(phba->sli4_hba.msix_entries[index].vector, 7163 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7164 7165 /* Disable MSI-X */ 7166 pci_disable_msix(phba->pcidev); 7167 7168 return; 7169} 7170 7171/** 7172 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7173 * @phba: pointer to lpfc hba data structure. 7174 * 7175 * This routine is invoked to enable the MSI interrupt mode to device with 7176 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7177 * to enable the MSI vector. The device driver is responsible for calling 7178 * the request_irq() to register MSI vector with a interrupt the handler, 7179 * which is done in this function. 7180 * 7181 * Return codes 7182 * 0 - successful 7183 * other values - error 7184 **/ 7185static int 7186lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7187{ 7188 int rc, index; 7189 7190 rc = pci_enable_msi(phba->pcidev); 7191 if (!rc) 7192 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7193 "0487 PCI enable MSI mode success.\n"); 7194 else { 7195 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7196 "0488 PCI enable MSI mode failed (%d)\n", rc); 7197 return rc; 7198 } 7199 7200 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7201 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7202 if (rc) { 7203 pci_disable_msi(phba->pcidev); 7204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7205 "0490 MSI request_irq failed (%d)\n", rc); 7206 return rc; 7207 } 7208 7209 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7210 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7211 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7212 } 7213 7214 return 0; 7215} 7216 7217/** 7218 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7219 * @phba: pointer to lpfc hba data structure. 7220 * 7221 * This routine is invoked to disable the MSI interrupt mode to device with 7222 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7223 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7224 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7225 * its vector. 7226 **/ 7227static void 7228lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7229{ 7230 free_irq(phba->pcidev->irq, phba); 7231 pci_disable_msi(phba->pcidev); 7232 return; 7233} 7234 7235/** 7236 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7237 * @phba: pointer to lpfc hba data structure. 7238 * 7239 * This routine is invoked to enable device interrupt and associate driver's 7240 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7241 * interface spec. Depends on the interrupt mode configured to the driver, 7242 * the driver will try to fallback from the configured interrupt mode to an 7243 * interrupt mode which is supported by the platform, kernel, and device in 7244 * the order of: 7245 * MSI-X -> MSI -> IRQ. 7246 * 7247 * Return codes 7248 * 0 - successful 7249 * other values - error 7250 **/ 7251static uint32_t 7252lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7253{ 7254 uint32_t intr_mode = LPFC_INTR_ERROR; 7255 int retval, index; 7256 7257 if (cfg_mode == 2) { 7258 /* Preparation before conf_msi mbox cmd */ 7259 retval = 0; 7260 if (!retval) { 7261 /* Now, try to enable MSI-X interrupt mode */ 7262 retval = lpfc_sli4_enable_msix(phba); 7263 if (!retval) { 7264 /* Indicate initialization to MSI-X mode */ 7265 phba->intr_type = MSIX; 7266 intr_mode = 2; 7267 } 7268 } 7269 } 7270 7271 /* Fallback to MSI if MSI-X initialization failed */ 7272 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7273 retval = lpfc_sli4_enable_msi(phba); 7274 if (!retval) { 7275 /* Indicate initialization to MSI mode */ 7276 phba->intr_type = MSI; 7277 intr_mode = 1; 7278 } 7279 } 7280 7281 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7282 if (phba->intr_type == NONE) { 7283 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7284 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7285 if (!retval) { 7286 /* Indicate initialization to INTx mode */ 7287 phba->intr_type = INTx; 7288 intr_mode = 0; 7289 for (index = 0; index < phba->cfg_fcp_eq_count; 7290 index++) { 7291 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7292 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7293 } 7294 } 7295 } 7296 return intr_mode; 7297} 7298 7299/** 7300 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7301 * @phba: pointer to lpfc hba data structure. 7302 * 7303 * This routine is invoked to disable device interrupt and disassociate 7304 * the driver's interrupt handler(s) from interrupt vector(s) to device 7305 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7306 * will release the interrupt vector(s) for the message signaled interrupt. 7307 **/ 7308static void 7309lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7310{ 7311 /* Disable the currently initialized interrupt mode */ 7312 if (phba->intr_type == MSIX) 7313 lpfc_sli4_disable_msix(phba); 7314 else if (phba->intr_type == MSI) 7315 lpfc_sli4_disable_msi(phba); 7316 else if (phba->intr_type == INTx) 7317 free_irq(phba->pcidev->irq, phba); 7318 7319 /* Reset interrupt management states */ 7320 phba->intr_type = NONE; 7321 phba->sli.slistat.sli_intr = 0; 7322 7323 return; 7324} 7325 7326/** 7327 * lpfc_unset_hba - Unset SLI3 hba device initialization 7328 * @phba: pointer to lpfc hba data structure. 7329 * 7330 * This routine is invoked to unset the HBA device initialization steps to 7331 * a device with SLI-3 interface spec. 7332 **/ 7333static void 7334lpfc_unset_hba(struct lpfc_hba *phba) 7335{ 7336 struct lpfc_vport *vport = phba->pport; 7337 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7338 7339 spin_lock_irq(shost->host_lock); 7340 vport->load_flag |= FC_UNLOADING; 7341 spin_unlock_irq(shost->host_lock); 7342 7343 lpfc_stop_hba_timers(phba); 7344 7345 phba->pport->work_port_events = 0; 7346 7347 lpfc_sli_hba_down(phba); 7348 7349 lpfc_sli_brdrestart(phba); 7350 7351 lpfc_sli_disable_intr(phba); 7352 7353 return; 7354} 7355 7356/** 7357 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7358 * @phba: pointer to lpfc hba data structure. 7359 * 7360 * This routine is invoked to unset the HBA device initialization steps to 7361 * a device with SLI-4 interface spec. 7362 **/ 7363static void 7364lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7365{ 7366 struct lpfc_vport *vport = phba->pport; 7367 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7368 7369 spin_lock_irq(shost->host_lock); 7370 vport->load_flag |= FC_UNLOADING; 7371 spin_unlock_irq(shost->host_lock); 7372 7373 phba->pport->work_port_events = 0; 7374 7375 lpfc_sli4_hba_down(phba); 7376 7377 lpfc_sli4_disable_intr(phba); 7378 7379 return; 7380} 7381 7382/** 7383 * lpfc_sli4_hba_unset - Unset the fcoe hba 7384 * @phba: Pointer to HBA context object. 7385 * 7386 * This function is called in the SLI4 code path to reset the HBA's FCoE 7387 * function. The caller is not required to hold any lock. This routine 7388 * issues PCI function reset mailbox command to reset the FCoE function. 7389 * At the end of the function, it calls lpfc_hba_down_post function to 7390 * free any pending commands. 7391 **/ 7392static void 7393lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7394{ 7395 int wait_cnt = 0; 7396 LPFC_MBOXQ_t *mboxq; 7397 7398 lpfc_stop_hba_timers(phba); 7399 phba->sli4_hba.intr_enable = 0; 7400 7401 /* 7402 * Gracefully wait out the potential current outstanding asynchronous 7403 * mailbox command. 7404 */ 7405 7406 /* First, block any pending async mailbox command from posted */ 7407 spin_lock_irq(&phba->hbalock); 7408 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7409 spin_unlock_irq(&phba->hbalock); 7410 /* Now, trying to wait it out if we can */ 7411 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7412 msleep(10); 7413 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7414 break; 7415 } 7416 /* Forcefully release the outstanding mailbox command if timed out */ 7417 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7418 spin_lock_irq(&phba->hbalock); 7419 mboxq = phba->sli.mbox_active; 7420 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7421 __lpfc_mbox_cmpl_put(phba, mboxq); 7422 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7423 phba->sli.mbox_active = NULL; 7424 spin_unlock_irq(&phba->hbalock); 7425 } 7426 7427 /* Tear down the queues in the HBA */ 7428 lpfc_sli4_queue_unset(phba); 7429 7430 /* Disable PCI subsystem interrupt */ 7431 lpfc_sli4_disable_intr(phba); 7432 7433 /* Stop kthread signal shall trigger work_done one more time */ 7434 kthread_stop(phba->worker_thread); 7435 7436 /* Stop the SLI4 device port */ 7437 phba->pport->work_port_events = 0; 7438} 7439 7440 /** 7441 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7442 * @phba: Pointer to HBA context object. 7443 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7444 * 7445 * This function is called in the SLI4 code path to read the port's 7446 * sli4 capabilities. 7447 * 7448 * This function may be be called from any context that can block-wait 7449 * for the completion. The expectation is that this routine is called 7450 * typically from probe_one or from the online routine. 7451 **/ 7452int 7453lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7454{ 7455 int rc; 7456 struct lpfc_mqe *mqe; 7457 struct lpfc_pc_sli4_params *sli4_params; 7458 uint32_t mbox_tmo; 7459 7460 rc = 0; 7461 mqe = &mboxq->u.mqe; 7462 7463 /* Read the port's SLI4 Parameters port capabilities */ 7464 lpfc_sli4_params(mboxq); 7465 if (!phba->sli4_hba.intr_enable) 7466 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7467 else { 7468 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7469 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7470 } 7471 7472 if (unlikely(rc)) 7473 return 1; 7474 7475 sli4_params = &phba->sli4_hba.pc_sli4_params; 7476 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7477 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7478 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7479 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7480 &mqe->un.sli4_params); 7481 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7482 &mqe->un.sli4_params); 7483 sli4_params->proto_types = mqe->un.sli4_params.word3; 7484 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7485 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7486 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7487 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7488 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7489 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7490 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7491 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7492 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7493 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7494 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7495 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7496 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7497 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7498 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7499 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7500 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7501 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7502 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7503 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7504 return rc; 7505} 7506 7507/** 7508 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7509 * @pdev: pointer to PCI device 7510 * @pid: pointer to PCI device identifier 7511 * 7512 * This routine is to be called to attach a device with SLI-3 interface spec 7513 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7514 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7515 * information of the device and driver to see if the driver state that it can 7516 * support this kind of device. If the match is successful, the driver core 7517 * invokes this routine. If this routine determines it can claim the HBA, it 7518 * does all the initialization that it needs to do to handle the HBA properly. 7519 * 7520 * Return code 7521 * 0 - driver can claim the device 7522 * negative value - driver can not claim the device 7523 **/ 7524static int __devinit 7525lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7526{ 7527 struct lpfc_hba *phba; 7528 struct lpfc_vport *vport = NULL; 7529 struct Scsi_Host *shost = NULL; 7530 int error; 7531 uint32_t cfg_mode, intr_mode; 7532 7533 /* Allocate memory for HBA structure */ 7534 phba = lpfc_hba_alloc(pdev); 7535 if (!phba) 7536 return -ENOMEM; 7537 7538 /* Perform generic PCI device enabling operation */ 7539 error = lpfc_enable_pci_dev(phba); 7540 if (error) { 7541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7542 "1401 Failed to enable pci device.\n"); 7543 goto out_free_phba; 7544 } 7545 7546 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7547 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7548 if (error) 7549 goto out_disable_pci_dev; 7550 7551 /* Set up SLI-3 specific device PCI memory space */ 7552 error = lpfc_sli_pci_mem_setup(phba); 7553 if (error) { 7554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7555 "1402 Failed to set up pci memory space.\n"); 7556 goto out_disable_pci_dev; 7557 } 7558 7559 /* Set up phase-1 common device driver resources */ 7560 error = lpfc_setup_driver_resource_phase1(phba); 7561 if (error) { 7562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7563 "1403 Failed to set up driver resource.\n"); 7564 goto out_unset_pci_mem_s3; 7565 } 7566 7567 /* Set up SLI-3 specific device driver resources */ 7568 error = lpfc_sli_driver_resource_setup(phba); 7569 if (error) { 7570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7571 "1404 Failed to set up driver resource.\n"); 7572 goto out_unset_pci_mem_s3; 7573 } 7574 7575 /* Initialize and populate the iocb list per host */ 7576 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7577 if (error) { 7578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7579 "1405 Failed to initialize iocb list.\n"); 7580 goto out_unset_driver_resource_s3; 7581 } 7582 7583 /* Set up common device driver resources */ 7584 error = lpfc_setup_driver_resource_phase2(phba); 7585 if (error) { 7586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7587 "1406 Failed to set up driver resource.\n"); 7588 goto out_free_iocb_list; 7589 } 7590 7591 /* Create SCSI host to the physical port */ 7592 error = lpfc_create_shost(phba); 7593 if (error) { 7594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7595 "1407 Failed to create scsi host.\n"); 7596 goto out_unset_driver_resource; 7597 } 7598 7599 /* Configure sysfs attributes */ 7600 vport = phba->pport; 7601 error = lpfc_alloc_sysfs_attr(vport); 7602 if (error) { 7603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7604 "1476 Failed to allocate sysfs attr\n"); 7605 goto out_destroy_shost; 7606 } 7607 7608 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7609 /* Now, trying to enable interrupt and bring up the device */ 7610 cfg_mode = phba->cfg_use_msi; 7611 while (true) { 7612 /* Put device to a known state before enabling interrupt */ 7613 lpfc_stop_port(phba); 7614 /* Configure and enable interrupt */ 7615 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7616 if (intr_mode == LPFC_INTR_ERROR) { 7617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7618 "0431 Failed to enable interrupt.\n"); 7619 error = -ENODEV; 7620 goto out_free_sysfs_attr; 7621 } 7622 /* SLI-3 HBA setup */ 7623 if (lpfc_sli_hba_setup(phba)) { 7624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7625 "1477 Failed to set up hba\n"); 7626 error = -ENODEV; 7627 goto out_remove_device; 7628 } 7629 7630 /* Wait 50ms for the interrupts of previous mailbox commands */ 7631 msleep(50); 7632 /* Check active interrupts on message signaled interrupts */ 7633 if (intr_mode == 0 || 7634 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7635 /* Log the current active interrupt mode */ 7636 phba->intr_mode = intr_mode; 7637 lpfc_log_intr_mode(phba, intr_mode); 7638 break; 7639 } else { 7640 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7641 "0447 Configure interrupt mode (%d) " 7642 "failed active interrupt test.\n", 7643 intr_mode); 7644 /* Disable the current interrupt mode */ 7645 lpfc_sli_disable_intr(phba); 7646 /* Try next level of interrupt mode */ 7647 cfg_mode = --intr_mode; 7648 } 7649 } 7650 7651 /* Perform post initialization setup */ 7652 lpfc_post_init_setup(phba); 7653 7654 /* Check if there are static vports to be created. */ 7655 lpfc_create_static_vport(phba); 7656 7657 return 0; 7658 7659out_remove_device: 7660 lpfc_unset_hba(phba); 7661out_free_sysfs_attr: 7662 lpfc_free_sysfs_attr(vport); 7663out_destroy_shost: 7664 lpfc_destroy_shost(phba); 7665out_unset_driver_resource: 7666 lpfc_unset_driver_resource_phase2(phba); 7667out_free_iocb_list: 7668 lpfc_free_iocb_list(phba); 7669out_unset_driver_resource_s3: 7670 lpfc_sli_driver_resource_unset(phba); 7671out_unset_pci_mem_s3: 7672 lpfc_sli_pci_mem_unset(phba); 7673out_disable_pci_dev: 7674 lpfc_disable_pci_dev(phba); 7675 if (shost) 7676 scsi_host_put(shost); 7677out_free_phba: 7678 lpfc_hba_free(phba); 7679 return error; 7680} 7681 7682/** 7683 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 7684 * @pdev: pointer to PCI device 7685 * 7686 * This routine is to be called to disattach a device with SLI-3 interface 7687 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7688 * removed from PCI bus, it performs all the necessary cleanup for the HBA 7689 * device to be removed from the PCI subsystem properly. 7690 **/ 7691static void __devexit 7692lpfc_pci_remove_one_s3(struct pci_dev *pdev) 7693{ 7694 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7695 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 7696 struct lpfc_vport **vports; 7697 struct lpfc_hba *phba = vport->phba; 7698 int i; 7699 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 7700 7701 spin_lock_irq(&phba->hbalock); 7702 vport->load_flag |= FC_UNLOADING; 7703 spin_unlock_irq(&phba->hbalock); 7704 7705 lpfc_free_sysfs_attr(vport); 7706 7707 /* Release all the vports against this physical port */ 7708 vports = lpfc_create_vport_work_array(phba); 7709 if (vports != NULL) 7710 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 7711 fc_vport_terminate(vports[i]->fc_vport); 7712 lpfc_destroy_vport_work_array(phba, vports); 7713 7714 /* Remove FC host and then SCSI host with the physical port */ 7715 fc_remove_host(shost); 7716 scsi_remove_host(shost); 7717 lpfc_cleanup(vport); 7718 7719 /* 7720 * Bring down the SLI Layer. This step disable all interrupts, 7721 * clears the rings, discards all mailbox commands, and resets 7722 * the HBA. 7723 */ 7724 7725 /* HBA interrupt will be diabled after this call */ 7726 lpfc_sli_hba_down(phba); 7727 /* Stop kthread signal shall trigger work_done one more time */ 7728 kthread_stop(phba->worker_thread); 7729 /* Final cleanup of txcmplq and reset the HBA */ 7730 lpfc_sli_brdrestart(phba); 7731 7732 lpfc_stop_hba_timers(phba); 7733 spin_lock_irq(&phba->hbalock); 7734 list_del_init(&vport->listentry); 7735 spin_unlock_irq(&phba->hbalock); 7736 7737 lpfc_debugfs_terminate(vport); 7738 7739 /* Disable interrupt */ 7740 lpfc_sli_disable_intr(phba); 7741 7742 pci_set_drvdata(pdev, NULL); 7743 scsi_host_put(shost); 7744 7745 /* 7746 * Call scsi_free before mem_free since scsi bufs are released to their 7747 * corresponding pools here. 7748 */ 7749 lpfc_scsi_free(phba); 7750 lpfc_mem_free_all(phba); 7751 7752 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 7753 phba->hbqslimp.virt, phba->hbqslimp.phys); 7754 7755 /* Free resources associated with SLI2 interface */ 7756 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7757 phba->slim2p.virt, phba->slim2p.phys); 7758 7759 /* unmap adapter SLIM and Control Registers */ 7760 iounmap(phba->ctrl_regs_memmap_p); 7761 iounmap(phba->slim_memmap_p); 7762 7763 lpfc_hba_free(phba); 7764 7765 pci_release_selected_regions(pdev, bars); 7766 pci_disable_device(pdev); 7767} 7768 7769/** 7770 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 7771 * @pdev: pointer to PCI device 7772 * @msg: power management message 7773 * 7774 * This routine is to be called from the kernel's PCI subsystem to support 7775 * system Power Management (PM) to device with SLI-3 interface spec. When 7776 * PM invokes this method, it quiesces the device by stopping the driver's 7777 * worker thread for the device, turning off device's interrupt and DMA, 7778 * and bring the device offline. Note that as the driver implements the 7779 * minimum PM requirements to a power-aware driver's PM support for the 7780 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 7781 * to the suspend() method call will be treated as SUSPEND and the driver will 7782 * fully reinitialize its device during resume() method call, the driver will 7783 * set device to PCI_D3hot state in PCI config space instead of setting it 7784 * according to the @msg provided by the PM. 7785 * 7786 * Return code 7787 * 0 - driver suspended the device 7788 * Error otherwise 7789 **/ 7790static int 7791lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 7792{ 7793 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7794 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7795 7796 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7797 "0473 PCI device Power Management suspend.\n"); 7798 7799 /* Bring down the device */ 7800 lpfc_offline_prep(phba); 7801 lpfc_offline(phba); 7802 kthread_stop(phba->worker_thread); 7803 7804 /* Disable interrupt from device */ 7805 lpfc_sli_disable_intr(phba); 7806 7807 /* Save device state to PCI config space */ 7808 pci_save_state(pdev); 7809 pci_set_power_state(pdev, PCI_D3hot); 7810 7811 return 0; 7812} 7813 7814/** 7815 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 7816 * @pdev: pointer to PCI device 7817 * 7818 * This routine is to be called from the kernel's PCI subsystem to support 7819 * system Power Management (PM) to device with SLI-3 interface spec. When PM 7820 * invokes this method, it restores the device's PCI config space state and 7821 * fully reinitializes the device and brings it online. Note that as the 7822 * driver implements the minimum PM requirements to a power-aware driver's 7823 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 7824 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 7825 * driver will fully reinitialize its device during resume() method call, 7826 * the device will be set to PCI_D0 directly in PCI config space before 7827 * restoring the state. 7828 * 7829 * Return code 7830 * 0 - driver suspended the device 7831 * Error otherwise 7832 **/ 7833static int 7834lpfc_pci_resume_one_s3(struct pci_dev *pdev) 7835{ 7836 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7837 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7838 uint32_t intr_mode; 7839 int error; 7840 7841 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7842 "0452 PCI device Power Management resume.\n"); 7843 7844 /* Restore device state from PCI config space */ 7845 pci_set_power_state(pdev, PCI_D0); 7846 pci_restore_state(pdev); 7847 7848 /* 7849 * As the new kernel behavior of pci_restore_state() API call clears 7850 * device saved_state flag, need to save the restored state again. 7851 */ 7852 pci_save_state(pdev); 7853 7854 if (pdev->is_busmaster) 7855 pci_set_master(pdev); 7856 7857 /* Startup the kernel thread for this host adapter. */ 7858 phba->worker_thread = kthread_run(lpfc_do_work, phba, 7859 "lpfc_worker_%d", phba->brd_no); 7860 if (IS_ERR(phba->worker_thread)) { 7861 error = PTR_ERR(phba->worker_thread); 7862 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7863 "0434 PM resume failed to start worker " 7864 "thread: error=x%x.\n", error); 7865 return error; 7866 } 7867 7868 /* Configure and enable interrupt */ 7869 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 7870 if (intr_mode == LPFC_INTR_ERROR) { 7871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7872 "0430 PM resume Failed to enable interrupt\n"); 7873 return -EIO; 7874 } else 7875 phba->intr_mode = intr_mode; 7876 7877 /* Restart HBA and bring it online */ 7878 lpfc_sli_brdrestart(phba); 7879 lpfc_online(phba); 7880 7881 /* Log the current active interrupt mode */ 7882 lpfc_log_intr_mode(phba, phba->intr_mode); 7883 7884 return 0; 7885} 7886 7887/** 7888 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 7889 * @phba: pointer to lpfc hba data structure. 7890 * 7891 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7892 * aborts all the outstanding SCSI I/Os to the pci device. 7893 **/ 7894static void 7895lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7896{ 7897 struct lpfc_sli *psli = &phba->sli; 7898 struct lpfc_sli_ring *pring; 7899 7900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7901 "2723 PCI channel I/O abort preparing for recovery\n"); 7902 7903 /* 7904 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 7905 * and let the SCSI mid-layer to retry them to recover. 7906 */ 7907 pring = &psli->ring[psli->fcp_ring]; 7908 lpfc_sli_abort_iocb_ring(phba, pring); 7909} 7910 7911/** 7912 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 7913 * @phba: pointer to lpfc hba data structure. 7914 * 7915 * This routine is called to prepare the SLI3 device for PCI slot reset. It 7916 * disables the device interrupt and pci device, and aborts the internal FCP 7917 * pending I/Os. 7918 **/ 7919static void 7920lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7921{ 7922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7923 "2710 PCI channel disable preparing for reset\n"); 7924 7925 /* Block any management I/Os to the device */ 7926 lpfc_block_mgmt_io(phba); 7927 7928 /* Block all SCSI devices' I/Os on the host */ 7929 lpfc_scsi_dev_block(phba); 7930 7931 /* stop all timers */ 7932 lpfc_stop_hba_timers(phba); 7933 7934 /* Disable interrupt and pci device */ 7935 lpfc_sli_disable_intr(phba); 7936 pci_disable_device(phba->pcidev); 7937 7938 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 7939 lpfc_sli_flush_fcp_rings(phba); 7940} 7941 7942/** 7943 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 7944 * @phba: pointer to lpfc hba data structure. 7945 * 7946 * This routine is called to prepare the SLI3 device for PCI slot permanently 7947 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 7948 * pending I/Os. 7949 **/ 7950static void 7951lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 7952{ 7953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7954 "2711 PCI channel permanent disable for failure\n"); 7955 /* Block all SCSI devices' I/Os on the host */ 7956 lpfc_scsi_dev_block(phba); 7957 7958 /* stop all timers */ 7959 lpfc_stop_hba_timers(phba); 7960 7961 /* Clean up all driver's outstanding SCSI I/Os */ 7962 lpfc_sli_flush_fcp_rings(phba); 7963} 7964 7965/** 7966 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7967 * @pdev: pointer to PCI device. 7968 * @state: the current PCI connection state. 7969 * 7970 * This routine is called from the PCI subsystem for I/O error handling to 7971 * device with SLI-3 interface spec. This function is called by the PCI 7972 * subsystem after a PCI bus error affecting this device has been detected. 7973 * When this function is invoked, it will need to stop all the I/Os and 7974 * interrupt(s) to the device. Once that is done, it will return 7975 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 7976 * as desired. 7977 * 7978 * Return codes 7979 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 7980 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7981 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7982 **/ 7983static pci_ers_result_t 7984lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 7985{ 7986 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7987 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7988 7989 switch (state) { 7990 case pci_channel_io_normal: 7991 /* Non-fatal error, prepare for recovery */ 7992 lpfc_sli_prep_dev_for_recover(phba); 7993 return PCI_ERS_RESULT_CAN_RECOVER; 7994 case pci_channel_io_frozen: 7995 /* Fatal error, prepare for slot reset */ 7996 lpfc_sli_prep_dev_for_reset(phba); 7997 return PCI_ERS_RESULT_NEED_RESET; 7998 case pci_channel_io_perm_failure: 7999 /* Permanent failure, prepare for device down */ 8000 lpfc_sli_prep_dev_for_perm_failure(phba); 8001 return PCI_ERS_RESULT_DISCONNECT; 8002 default: 8003 /* Unknown state, prepare and request slot reset */ 8004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8005 "0472 Unknown PCI error state: x%x\n", state); 8006 lpfc_sli_prep_dev_for_reset(phba); 8007 return PCI_ERS_RESULT_NEED_RESET; 8008 } 8009} 8010 8011/** 8012 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8013 * @pdev: pointer to PCI device. 8014 * 8015 * This routine is called from the PCI subsystem for error handling to 8016 * device with SLI-3 interface spec. This is called after PCI bus has been 8017 * reset to restart the PCI card from scratch, as if from a cold-boot. 8018 * During the PCI subsystem error recovery, after driver returns 8019 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8020 * recovery and then call this routine before calling the .resume method 8021 * to recover the device. This function will initialize the HBA device, 8022 * enable the interrupt, but it will just put the HBA to offline state 8023 * without passing any I/O traffic. 8024 * 8025 * Return codes 8026 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8027 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8028 */ 8029static pci_ers_result_t 8030lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8031{ 8032 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8033 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8034 struct lpfc_sli *psli = &phba->sli; 8035 uint32_t intr_mode; 8036 8037 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8038 if (pci_enable_device_mem(pdev)) { 8039 printk(KERN_ERR "lpfc: Cannot re-enable " 8040 "PCI device after reset.\n"); 8041 return PCI_ERS_RESULT_DISCONNECT; 8042 } 8043 8044 pci_restore_state(pdev); 8045 8046 /* 8047 * As the new kernel behavior of pci_restore_state() API call clears 8048 * device saved_state flag, need to save the restored state again. 8049 */ 8050 pci_save_state(pdev); 8051 8052 if (pdev->is_busmaster) 8053 pci_set_master(pdev); 8054 8055 spin_lock_irq(&phba->hbalock); 8056 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8057 spin_unlock_irq(&phba->hbalock); 8058 8059 /* Configure and enable interrupt */ 8060 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8061 if (intr_mode == LPFC_INTR_ERROR) { 8062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8063 "0427 Cannot re-enable interrupt after " 8064 "slot reset.\n"); 8065 return PCI_ERS_RESULT_DISCONNECT; 8066 } else 8067 phba->intr_mode = intr_mode; 8068 8069 /* Take device offline, it will perform cleanup */ 8070 lpfc_offline_prep(phba); 8071 lpfc_offline(phba); 8072 lpfc_sli_brdrestart(phba); 8073 8074 /* Log the current active interrupt mode */ 8075 lpfc_log_intr_mode(phba, phba->intr_mode); 8076 8077 return PCI_ERS_RESULT_RECOVERED; 8078} 8079 8080/** 8081 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8082 * @pdev: pointer to PCI device 8083 * 8084 * This routine is called from the PCI subsystem for error handling to device 8085 * with SLI-3 interface spec. It is called when kernel error recovery tells 8086 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8087 * error recovery. After this call, traffic can start to flow from this device 8088 * again. 8089 */ 8090static void 8091lpfc_io_resume_s3(struct pci_dev *pdev) 8092{ 8093 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8094 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8095 8096 /* Bring device online, it will be no-op for non-fatal error resume */ 8097 lpfc_online(phba); 8098 8099 /* Clean up Advanced Error Reporting (AER) if needed */ 8100 if (phba->hba_flag & HBA_AER_ENABLED) 8101 pci_cleanup_aer_uncorrect_error_status(pdev); 8102} 8103 8104/** 8105 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8106 * @phba: pointer to lpfc hba data structure. 8107 * 8108 * returns the number of ELS/CT IOCBs to reserve 8109 **/ 8110int 8111lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8112{ 8113 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8114 8115 if (phba->sli_rev == LPFC_SLI_REV4) { 8116 if (max_xri <= 100) 8117 return 10; 8118 else if (max_xri <= 256) 8119 return 25; 8120 else if (max_xri <= 512) 8121 return 50; 8122 else if (max_xri <= 1024) 8123 return 100; 8124 else 8125 return 150; 8126 } else 8127 return 0; 8128} 8129 8130/** 8131 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8132 * @pdev: pointer to PCI device 8133 * @pid: pointer to PCI device identifier 8134 * 8135 * This routine is called from the kernel's PCI subsystem to device with 8136 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8137 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8138 * information of the device and driver to see if the driver state that it 8139 * can support this kind of device. If the match is successful, the driver 8140 * core invokes this routine. If this routine determines it can claim the HBA, 8141 * it does all the initialization that it needs to do to handle the HBA 8142 * properly. 8143 * 8144 * Return code 8145 * 0 - driver can claim the device 8146 * negative value - driver can not claim the device 8147 **/ 8148static int __devinit 8149lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8150{ 8151 struct lpfc_hba *phba; 8152 struct lpfc_vport *vport = NULL; 8153 struct Scsi_Host *shost = NULL; 8154 int error; 8155 uint32_t cfg_mode, intr_mode; 8156 int mcnt; 8157 8158 /* Allocate memory for HBA structure */ 8159 phba = lpfc_hba_alloc(pdev); 8160 if (!phba) 8161 return -ENOMEM; 8162 8163 /* Perform generic PCI device enabling operation */ 8164 error = lpfc_enable_pci_dev(phba); 8165 if (error) { 8166 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8167 "1409 Failed to enable pci device.\n"); 8168 goto out_free_phba; 8169 } 8170 8171 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8172 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8173 if (error) 8174 goto out_disable_pci_dev; 8175 8176 /* Set up SLI-4 specific device PCI memory space */ 8177 error = lpfc_sli4_pci_mem_setup(phba); 8178 if (error) { 8179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8180 "1410 Failed to set up pci memory space.\n"); 8181 goto out_disable_pci_dev; 8182 } 8183 8184 /* Set up phase-1 common device driver resources */ 8185 error = lpfc_setup_driver_resource_phase1(phba); 8186 if (error) { 8187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8188 "1411 Failed to set up driver resource.\n"); 8189 goto out_unset_pci_mem_s4; 8190 } 8191 8192 /* Set up SLI-4 Specific device driver resources */ 8193 error = lpfc_sli4_driver_resource_setup(phba); 8194 if (error) { 8195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8196 "1412 Failed to set up driver resource.\n"); 8197 goto out_unset_pci_mem_s4; 8198 } 8199 8200 /* Initialize and populate the iocb list per host */ 8201 8202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8203 "2821 initialize iocb list %d.\n", 8204 phba->cfg_iocb_cnt*1024); 8205 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8206 8207 if (error) { 8208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8209 "1413 Failed to initialize iocb list.\n"); 8210 goto out_unset_driver_resource_s4; 8211 } 8212 8213 /* Set up common device driver resources */ 8214 error = lpfc_setup_driver_resource_phase2(phba); 8215 if (error) { 8216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8217 "1414 Failed to set up driver resource.\n"); 8218 goto out_free_iocb_list; 8219 } 8220 8221 /* Create SCSI host to the physical port */ 8222 error = lpfc_create_shost(phba); 8223 if (error) { 8224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8225 "1415 Failed to create scsi host.\n"); 8226 goto out_unset_driver_resource; 8227 } 8228 8229 /* Configure sysfs attributes */ 8230 vport = phba->pport; 8231 error = lpfc_alloc_sysfs_attr(vport); 8232 if (error) { 8233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8234 "1416 Failed to allocate sysfs attr\n"); 8235 goto out_destroy_shost; 8236 } 8237 8238 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8239 /* Now, trying to enable interrupt and bring up the device */ 8240 cfg_mode = phba->cfg_use_msi; 8241 while (true) { 8242 /* Put device to a known state before enabling interrupt */ 8243 lpfc_stop_port(phba); 8244 /* Configure and enable interrupt */ 8245 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8246 if (intr_mode == LPFC_INTR_ERROR) { 8247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8248 "0426 Failed to enable interrupt.\n"); 8249 error = -ENODEV; 8250 goto out_free_sysfs_attr; 8251 } 8252 /* Default to single FCP EQ for non-MSI-X */ 8253 if (phba->intr_type != MSIX) 8254 phba->cfg_fcp_eq_count = 1; 8255 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8256 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8257 /* Set up SLI-4 HBA */ 8258 if (lpfc_sli4_hba_setup(phba)) { 8259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8260 "1421 Failed to set up hba\n"); 8261 error = -ENODEV; 8262 goto out_disable_intr; 8263 } 8264 8265 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8266 if (intr_mode != 0) 8267 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8268 LPFC_ACT_INTR_CNT); 8269 8270 /* Check active interrupts received only for MSI/MSI-X */ 8271 if (intr_mode == 0 || 8272 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8273 /* Log the current active interrupt mode */ 8274 phba->intr_mode = intr_mode; 8275 lpfc_log_intr_mode(phba, intr_mode); 8276 break; 8277 } 8278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8279 "0451 Configure interrupt mode (%d) " 8280 "failed active interrupt test.\n", 8281 intr_mode); 8282 /* Unset the preivous SLI-4 HBA setup */ 8283 lpfc_sli4_unset_hba(phba); 8284 /* Try next level of interrupt mode */ 8285 cfg_mode = --intr_mode; 8286 } 8287 8288 /* Perform post initialization setup */ 8289 lpfc_post_init_setup(phba); 8290 8291 /* Check if there are static vports to be created. */ 8292 lpfc_create_static_vport(phba); 8293 8294 return 0; 8295 8296out_disable_intr: 8297 lpfc_sli4_disable_intr(phba); 8298out_free_sysfs_attr: 8299 lpfc_free_sysfs_attr(vport); 8300out_destroy_shost: 8301 lpfc_destroy_shost(phba); 8302out_unset_driver_resource: 8303 lpfc_unset_driver_resource_phase2(phba); 8304out_free_iocb_list: 8305 lpfc_free_iocb_list(phba); 8306out_unset_driver_resource_s4: 8307 lpfc_sli4_driver_resource_unset(phba); 8308out_unset_pci_mem_s4: 8309 lpfc_sli4_pci_mem_unset(phba); 8310out_disable_pci_dev: 8311 lpfc_disable_pci_dev(phba); 8312 if (shost) 8313 scsi_host_put(shost); 8314out_free_phba: 8315 lpfc_hba_free(phba); 8316 return error; 8317} 8318 8319/** 8320 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8321 * @pdev: pointer to PCI device 8322 * 8323 * This routine is called from the kernel's PCI subsystem to device with 8324 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8325 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8326 * device to be removed from the PCI subsystem properly. 8327 **/ 8328static void __devexit 8329lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8330{ 8331 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8332 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8333 struct lpfc_vport **vports; 8334 struct lpfc_hba *phba = vport->phba; 8335 int i; 8336 8337 /* Mark the device unloading flag */ 8338 spin_lock_irq(&phba->hbalock); 8339 vport->load_flag |= FC_UNLOADING; 8340 spin_unlock_irq(&phba->hbalock); 8341 8342 /* Free the HBA sysfs attributes */ 8343 lpfc_free_sysfs_attr(vport); 8344 8345 /* Release all the vports against this physical port */ 8346 vports = lpfc_create_vport_work_array(phba); 8347 if (vports != NULL) 8348 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8349 fc_vport_terminate(vports[i]->fc_vport); 8350 lpfc_destroy_vport_work_array(phba, vports); 8351 8352 /* Remove FC host and then SCSI host with the physical port */ 8353 fc_remove_host(shost); 8354 scsi_remove_host(shost); 8355 8356 /* Perform cleanup on the physical port */ 8357 lpfc_cleanup(vport); 8358 8359 /* 8360 * Bring down the SLI Layer. This step disables all interrupts, 8361 * clears the rings, discards all mailbox commands, and resets 8362 * the HBA FCoE function. 8363 */ 8364 lpfc_debugfs_terminate(vport); 8365 lpfc_sli4_hba_unset(phba); 8366 8367 spin_lock_irq(&phba->hbalock); 8368 list_del_init(&vport->listentry); 8369 spin_unlock_irq(&phba->hbalock); 8370 8371 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi 8372 * buffers are released to their corresponding pools here. 8373 */ 8374 lpfc_scsi_free(phba); 8375 lpfc_sli4_driver_resource_unset(phba); 8376 8377 /* Unmap adapter Control and Doorbell registers */ 8378 lpfc_sli4_pci_mem_unset(phba); 8379 8380 /* Release PCI resources and disable device's PCI function */ 8381 scsi_host_put(shost); 8382 lpfc_disable_pci_dev(phba); 8383 8384 /* Finally, free the driver's device data structure */ 8385 lpfc_hba_free(phba); 8386 8387 return; 8388} 8389 8390/** 8391 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8392 * @pdev: pointer to PCI device 8393 * @msg: power management message 8394 * 8395 * This routine is called from the kernel's PCI subsystem to support system 8396 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8397 * this method, it quiesces the device by stopping the driver's worker 8398 * thread for the device, turning off device's interrupt and DMA, and bring 8399 * the device offline. Note that as the driver implements the minimum PM 8400 * requirements to a power-aware driver's PM support for suspend/resume -- all 8401 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8402 * method call will be treated as SUSPEND and the driver will fully 8403 * reinitialize its device during resume() method call, the driver will set 8404 * device to PCI_D3hot state in PCI config space instead of setting it 8405 * according to the @msg provided by the PM. 8406 * 8407 * Return code 8408 * 0 - driver suspended the device 8409 * Error otherwise 8410 **/ 8411static int 8412lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8413{ 8414 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8415 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8416 8417 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8418 "2843 PCI device Power Management suspend.\n"); 8419 8420 /* Bring down the device */ 8421 lpfc_offline_prep(phba); 8422 lpfc_offline(phba); 8423 kthread_stop(phba->worker_thread); 8424 8425 /* Disable interrupt from device */ 8426 lpfc_sli4_disable_intr(phba); 8427 8428 /* Save device state to PCI config space */ 8429 pci_save_state(pdev); 8430 pci_set_power_state(pdev, PCI_D3hot); 8431 8432 return 0; 8433} 8434 8435/** 8436 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8437 * @pdev: pointer to PCI device 8438 * 8439 * This routine is called from the kernel's PCI subsystem to support system 8440 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8441 * this method, it restores the device's PCI config space state and fully 8442 * reinitializes the device and brings it online. Note that as the driver 8443 * implements the minimum PM requirements to a power-aware driver's PM for 8444 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8445 * to the suspend() method call will be treated as SUSPEND and the driver 8446 * will fully reinitialize its device during resume() method call, the device 8447 * will be set to PCI_D0 directly in PCI config space before restoring the 8448 * state. 8449 * 8450 * Return code 8451 * 0 - driver suspended the device 8452 * Error otherwise 8453 **/ 8454static int 8455lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8456{ 8457 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8458 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8459 uint32_t intr_mode; 8460 int error; 8461 8462 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8463 "0292 PCI device Power Management resume.\n"); 8464 8465 /* Restore device state from PCI config space */ 8466 pci_set_power_state(pdev, PCI_D0); 8467 pci_restore_state(pdev); 8468 8469 /* 8470 * As the new kernel behavior of pci_restore_state() API call clears 8471 * device saved_state flag, need to save the restored state again. 8472 */ 8473 pci_save_state(pdev); 8474 8475 if (pdev->is_busmaster) 8476 pci_set_master(pdev); 8477 8478 /* Startup the kernel thread for this host adapter. */ 8479 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8480 "lpfc_worker_%d", phba->brd_no); 8481 if (IS_ERR(phba->worker_thread)) { 8482 error = PTR_ERR(phba->worker_thread); 8483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8484 "0293 PM resume failed to start worker " 8485 "thread: error=x%x.\n", error); 8486 return error; 8487 } 8488 8489 /* Configure and enable interrupt */ 8490 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8491 if (intr_mode == LPFC_INTR_ERROR) { 8492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8493 "0294 PM resume Failed to enable interrupt\n"); 8494 return -EIO; 8495 } else 8496 phba->intr_mode = intr_mode; 8497 8498 /* Restart HBA and bring it online */ 8499 lpfc_sli_brdrestart(phba); 8500 lpfc_online(phba); 8501 8502 /* Log the current active interrupt mode */ 8503 lpfc_log_intr_mode(phba, phba->intr_mode); 8504 8505 return 0; 8506} 8507 8508/** 8509 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8510 * @phba: pointer to lpfc hba data structure. 8511 * 8512 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8513 * aborts all the outstanding SCSI I/Os to the pci device. 8514 **/ 8515static void 8516lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8517{ 8518 struct lpfc_sli *psli = &phba->sli; 8519 struct lpfc_sli_ring *pring; 8520 8521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8522 "2828 PCI channel I/O abort preparing for recovery\n"); 8523 /* 8524 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8525 * and let the SCSI mid-layer to retry them to recover. 8526 */ 8527 pring = &psli->ring[psli->fcp_ring]; 8528 lpfc_sli_abort_iocb_ring(phba, pring); 8529} 8530 8531/** 8532 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8533 * @phba: pointer to lpfc hba data structure. 8534 * 8535 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8536 * disables the device interrupt and pci device, and aborts the internal FCP 8537 * pending I/Os. 8538 **/ 8539static void 8540lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8541{ 8542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8543 "2826 PCI channel disable preparing for reset\n"); 8544 8545 /* Block any management I/Os to the device */ 8546 lpfc_block_mgmt_io(phba); 8547 8548 /* Block all SCSI devices' I/Os on the host */ 8549 lpfc_scsi_dev_block(phba); 8550 8551 /* stop all timers */ 8552 lpfc_stop_hba_timers(phba); 8553 8554 /* Disable interrupt and pci device */ 8555 lpfc_sli4_disable_intr(phba); 8556 pci_disable_device(phba->pcidev); 8557 8558 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8559 lpfc_sli_flush_fcp_rings(phba); 8560} 8561 8562/** 8563 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8564 * @phba: pointer to lpfc hba data structure. 8565 * 8566 * This routine is called to prepare the SLI4 device for PCI slot permanently 8567 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8568 * pending I/Os. 8569 **/ 8570static void 8571lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8572{ 8573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8574 "2827 PCI channel permanent disable for failure\n"); 8575 8576 /* Block all SCSI devices' I/Os on the host */ 8577 lpfc_scsi_dev_block(phba); 8578 8579 /* stop all timers */ 8580 lpfc_stop_hba_timers(phba); 8581 8582 /* Clean up all driver's outstanding SCSI I/Os */ 8583 lpfc_sli_flush_fcp_rings(phba); 8584} 8585 8586/** 8587 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8588 * @pdev: pointer to PCI device. 8589 * @state: the current PCI connection state. 8590 * 8591 * This routine is called from the PCI subsystem for error handling to device 8592 * with SLI-4 interface spec. This function is called by the PCI subsystem 8593 * after a PCI bus error affecting this device has been detected. When this 8594 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8595 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8596 * for the PCI subsystem to perform proper recovery as desired. 8597 * 8598 * Return codes 8599 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8600 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8601 **/ 8602static pci_ers_result_t 8603lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8604{ 8605 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8606 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8607 8608 switch (state) { 8609 case pci_channel_io_normal: 8610 /* Non-fatal error, prepare for recovery */ 8611 lpfc_sli4_prep_dev_for_recover(phba); 8612 return PCI_ERS_RESULT_CAN_RECOVER; 8613 case pci_channel_io_frozen: 8614 /* Fatal error, prepare for slot reset */ 8615 lpfc_sli4_prep_dev_for_reset(phba); 8616 return PCI_ERS_RESULT_NEED_RESET; 8617 case pci_channel_io_perm_failure: 8618 /* Permanent failure, prepare for device down */ 8619 lpfc_sli4_prep_dev_for_perm_failure(phba); 8620 return PCI_ERS_RESULT_DISCONNECT; 8621 default: 8622 /* Unknown state, prepare and request slot reset */ 8623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8624 "2825 Unknown PCI error state: x%x\n", state); 8625 lpfc_sli4_prep_dev_for_reset(phba); 8626 return PCI_ERS_RESULT_NEED_RESET; 8627 } 8628} 8629 8630/** 8631 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8632 * @pdev: pointer to PCI device. 8633 * 8634 * This routine is called from the PCI subsystem for error handling to device 8635 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8636 * restart the PCI card from scratch, as if from a cold-boot. During the 8637 * PCI subsystem error recovery, after the driver returns 8638 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8639 * recovery and then call this routine before calling the .resume method to 8640 * recover the device. This function will initialize the HBA device, enable 8641 * the interrupt, but it will just put the HBA to offline state without 8642 * passing any I/O traffic. 8643 * 8644 * Return codes 8645 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8646 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8647 */ 8648static pci_ers_result_t 8649lpfc_io_slot_reset_s4(struct pci_dev *pdev) 8650{ 8651 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8652 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8653 struct lpfc_sli *psli = &phba->sli; 8654 uint32_t intr_mode; 8655 8656 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8657 if (pci_enable_device_mem(pdev)) { 8658 printk(KERN_ERR "lpfc: Cannot re-enable " 8659 "PCI device after reset.\n"); 8660 return PCI_ERS_RESULT_DISCONNECT; 8661 } 8662 8663 pci_restore_state(pdev); 8664 if (pdev->is_busmaster) 8665 pci_set_master(pdev); 8666 8667 spin_lock_irq(&phba->hbalock); 8668 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8669 spin_unlock_irq(&phba->hbalock); 8670 8671 /* Configure and enable interrupt */ 8672 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8673 if (intr_mode == LPFC_INTR_ERROR) { 8674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8675 "2824 Cannot re-enable interrupt after " 8676 "slot reset.\n"); 8677 return PCI_ERS_RESULT_DISCONNECT; 8678 } else 8679 phba->intr_mode = intr_mode; 8680 8681 /* Log the current active interrupt mode */ 8682 lpfc_log_intr_mode(phba, phba->intr_mode); 8683 8684 return PCI_ERS_RESULT_RECOVERED; 8685} 8686 8687/** 8688 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 8689 * @pdev: pointer to PCI device 8690 * 8691 * This routine is called from the PCI subsystem for error handling to device 8692 * with SLI-4 interface spec. It is called when kernel error recovery tells 8693 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8694 * error recovery. After this call, traffic can start to flow from this device 8695 * again. 8696 **/ 8697static void 8698lpfc_io_resume_s4(struct pci_dev *pdev) 8699{ 8700 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8701 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8702 8703 /* 8704 * In case of slot reset, as function reset is performed through 8705 * mailbox command which needs DMA to be enabled, this operation 8706 * has to be moved to the io resume phase. Taking device offline 8707 * will perform the necessary cleanup. 8708 */ 8709 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 8710 /* Perform device reset */ 8711 lpfc_offline_prep(phba); 8712 lpfc_offline(phba); 8713 lpfc_sli_brdrestart(phba); 8714 /* Bring the device back online */ 8715 lpfc_online(phba); 8716 } 8717 8718 /* Clean up Advanced Error Reporting (AER) if needed */ 8719 if (phba->hba_flag & HBA_AER_ENABLED) 8720 pci_cleanup_aer_uncorrect_error_status(pdev); 8721} 8722 8723/** 8724 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 8725 * @pdev: pointer to PCI device 8726 * @pid: pointer to PCI device identifier 8727 * 8728 * This routine is to be registered to the kernel's PCI subsystem. When an 8729 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 8730 * at PCI device-specific information of the device and driver to see if the 8731 * driver state that it can support this kind of device. If the match is 8732 * successful, the driver core invokes this routine. This routine dispatches 8733 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 8734 * do all the initialization that it needs to do to handle the HBA device 8735 * properly. 8736 * 8737 * Return code 8738 * 0 - driver can claim the device 8739 * negative value - driver can not claim the device 8740 **/ 8741static int __devinit 8742lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 8743{ 8744 int rc; 8745 struct lpfc_sli_intf intf; 8746 8747 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 8748 return -ENODEV; 8749 8750 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 8751 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 8752 rc = lpfc_pci_probe_one_s4(pdev, pid); 8753 else 8754 rc = lpfc_pci_probe_one_s3(pdev, pid); 8755 8756 return rc; 8757} 8758 8759/** 8760 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 8761 * @pdev: pointer to PCI device 8762 * 8763 * This routine is to be registered to the kernel's PCI subsystem. When an 8764 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 8765 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 8766 * remove routine, which will perform all the necessary cleanup for the 8767 * device to be removed from the PCI subsystem properly. 8768 **/ 8769static void __devexit 8770lpfc_pci_remove_one(struct pci_dev *pdev) 8771{ 8772 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8773 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8774 8775 switch (phba->pci_dev_grp) { 8776 case LPFC_PCI_DEV_LP: 8777 lpfc_pci_remove_one_s3(pdev); 8778 break; 8779 case LPFC_PCI_DEV_OC: 8780 lpfc_pci_remove_one_s4(pdev); 8781 break; 8782 default: 8783 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8784 "1424 Invalid PCI device group: 0x%x\n", 8785 phba->pci_dev_grp); 8786 break; 8787 } 8788 return; 8789} 8790 8791/** 8792 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 8793 * @pdev: pointer to PCI device 8794 * @msg: power management message 8795 * 8796 * This routine is to be registered to the kernel's PCI subsystem to support 8797 * system Power Management (PM). When PM invokes this method, it dispatches 8798 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 8799 * suspend the device. 8800 * 8801 * Return code 8802 * 0 - driver suspended the device 8803 * Error otherwise 8804 **/ 8805static int 8806lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 8807{ 8808 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8809 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8810 int rc = -ENODEV; 8811 8812 switch (phba->pci_dev_grp) { 8813 case LPFC_PCI_DEV_LP: 8814 rc = lpfc_pci_suspend_one_s3(pdev, msg); 8815 break; 8816 case LPFC_PCI_DEV_OC: 8817 rc = lpfc_pci_suspend_one_s4(pdev, msg); 8818 break; 8819 default: 8820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8821 "1425 Invalid PCI device group: 0x%x\n", 8822 phba->pci_dev_grp); 8823 break; 8824 } 8825 return rc; 8826} 8827 8828/** 8829 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 8830 * @pdev: pointer to PCI device 8831 * 8832 * This routine is to be registered to the kernel's PCI subsystem to support 8833 * system Power Management (PM). When PM invokes this method, it dispatches 8834 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 8835 * resume the device. 8836 * 8837 * Return code 8838 * 0 - driver suspended the device 8839 * Error otherwise 8840 **/ 8841static int 8842lpfc_pci_resume_one(struct pci_dev *pdev) 8843{ 8844 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8845 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8846 int rc = -ENODEV; 8847 8848 switch (phba->pci_dev_grp) { 8849 case LPFC_PCI_DEV_LP: 8850 rc = lpfc_pci_resume_one_s3(pdev); 8851 break; 8852 case LPFC_PCI_DEV_OC: 8853 rc = lpfc_pci_resume_one_s4(pdev); 8854 break; 8855 default: 8856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8857 "1426 Invalid PCI device group: 0x%x\n", 8858 phba->pci_dev_grp); 8859 break; 8860 } 8861 return rc; 8862} 8863 8864/** 8865 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 8866 * @pdev: pointer to PCI device. 8867 * @state: the current PCI connection state. 8868 * 8869 * This routine is registered to the PCI subsystem for error handling. This 8870 * function is called by the PCI subsystem after a PCI bus error affecting 8871 * this device has been detected. When this routine is invoked, it dispatches 8872 * the action to the proper SLI-3 or SLI-4 device error detected handling 8873 * routine, which will perform the proper error detected operation. 8874 * 8875 * Return codes 8876 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8877 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8878 **/ 8879static pci_ers_result_t 8880lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8881{ 8882 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8883 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8884 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8885 8886 switch (phba->pci_dev_grp) { 8887 case LPFC_PCI_DEV_LP: 8888 rc = lpfc_io_error_detected_s3(pdev, state); 8889 break; 8890 case LPFC_PCI_DEV_OC: 8891 rc = lpfc_io_error_detected_s4(pdev, state); 8892 break; 8893 default: 8894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8895 "1427 Invalid PCI device group: 0x%x\n", 8896 phba->pci_dev_grp); 8897 break; 8898 } 8899 return rc; 8900} 8901 8902/** 8903 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 8904 * @pdev: pointer to PCI device. 8905 * 8906 * This routine is registered to the PCI subsystem for error handling. This 8907 * function is called after PCI bus has been reset to restart the PCI card 8908 * from scratch, as if from a cold-boot. When this routine is invoked, it 8909 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 8910 * routine, which will perform the proper device reset. 8911 * 8912 * Return codes 8913 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8914 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8915 **/ 8916static pci_ers_result_t 8917lpfc_io_slot_reset(struct pci_dev *pdev) 8918{ 8919 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8920 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8921 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 8922 8923 switch (phba->pci_dev_grp) { 8924 case LPFC_PCI_DEV_LP: 8925 rc = lpfc_io_slot_reset_s3(pdev); 8926 break; 8927 case LPFC_PCI_DEV_OC: 8928 rc = lpfc_io_slot_reset_s4(pdev); 8929 break; 8930 default: 8931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8932 "1428 Invalid PCI device group: 0x%x\n", 8933 phba->pci_dev_grp); 8934 break; 8935 } 8936 return rc; 8937} 8938 8939/** 8940 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 8941 * @pdev: pointer to PCI device 8942 * 8943 * This routine is registered to the PCI subsystem for error handling. It 8944 * is called when kernel error recovery tells the lpfc driver that it is 8945 * OK to resume normal PCI operation after PCI bus error recovery. When 8946 * this routine is invoked, it dispatches the action to the proper SLI-3 8947 * or SLI-4 device io_resume routine, which will resume the device operation. 8948 **/ 8949static void 8950lpfc_io_resume(struct pci_dev *pdev) 8951{ 8952 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8953 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8954 8955 switch (phba->pci_dev_grp) { 8956 case LPFC_PCI_DEV_LP: 8957 lpfc_io_resume_s3(pdev); 8958 break; 8959 case LPFC_PCI_DEV_OC: 8960 lpfc_io_resume_s4(pdev); 8961 break; 8962 default: 8963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8964 "1429 Invalid PCI device group: 0x%x\n", 8965 phba->pci_dev_grp); 8966 break; 8967 } 8968 return; 8969} 8970 8971static struct pci_device_id lpfc_id_table[] = { 8972 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 8973 PCI_ANY_ID, PCI_ANY_ID, }, 8974 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 8975 PCI_ANY_ID, PCI_ANY_ID, }, 8976 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 8977 PCI_ANY_ID, PCI_ANY_ID, }, 8978 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 8979 PCI_ANY_ID, PCI_ANY_ID, }, 8980 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 8981 PCI_ANY_ID, PCI_ANY_ID, }, 8982 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 8983 PCI_ANY_ID, PCI_ANY_ID, }, 8984 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 8985 PCI_ANY_ID, PCI_ANY_ID, }, 8986 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 8987 PCI_ANY_ID, PCI_ANY_ID, }, 8988 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 8989 PCI_ANY_ID, PCI_ANY_ID, }, 8990 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 8991 PCI_ANY_ID, PCI_ANY_ID, }, 8992 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 8993 PCI_ANY_ID, PCI_ANY_ID, }, 8994 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 8995 PCI_ANY_ID, PCI_ANY_ID, }, 8996 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 8997 PCI_ANY_ID, PCI_ANY_ID, }, 8998 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 8999 PCI_ANY_ID, PCI_ANY_ID, }, 9000 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9001 PCI_ANY_ID, PCI_ANY_ID, }, 9002 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9003 PCI_ANY_ID, PCI_ANY_ID, }, 9004 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9005 PCI_ANY_ID, PCI_ANY_ID, }, 9006 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9007 PCI_ANY_ID, PCI_ANY_ID, }, 9008 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9009 PCI_ANY_ID, PCI_ANY_ID, }, 9010 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9011 PCI_ANY_ID, PCI_ANY_ID, }, 9012 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9013 PCI_ANY_ID, PCI_ANY_ID, }, 9014 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9015 PCI_ANY_ID, PCI_ANY_ID, }, 9016 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9017 PCI_ANY_ID, PCI_ANY_ID, }, 9018 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9019 PCI_ANY_ID, PCI_ANY_ID, }, 9020 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9021 PCI_ANY_ID, PCI_ANY_ID, }, 9022 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9023 PCI_ANY_ID, PCI_ANY_ID, }, 9024 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9025 PCI_ANY_ID, PCI_ANY_ID, }, 9026 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9027 PCI_ANY_ID, PCI_ANY_ID, }, 9028 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9029 PCI_ANY_ID, PCI_ANY_ID, }, 9030 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9031 PCI_ANY_ID, PCI_ANY_ID, }, 9032 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9033 PCI_ANY_ID, PCI_ANY_ID, }, 9034 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9035 PCI_ANY_ID, PCI_ANY_ID, }, 9036 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9037 PCI_ANY_ID, PCI_ANY_ID, }, 9038 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9039 PCI_ANY_ID, PCI_ANY_ID, }, 9040 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9041 PCI_ANY_ID, PCI_ANY_ID, }, 9042 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9043 PCI_ANY_ID, PCI_ANY_ID, }, 9044 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9045 PCI_ANY_ID, PCI_ANY_ID, }, 9046 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9047 PCI_ANY_ID, PCI_ANY_ID, }, 9048 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9049 PCI_ANY_ID, PCI_ANY_ID, }, 9050 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9051 PCI_ANY_ID, PCI_ANY_ID, }, 9052 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9053 PCI_ANY_ID, PCI_ANY_ID, }, 9054 { 0 } 9055}; 9056 9057MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9058 9059static struct pci_error_handlers lpfc_err_handler = { 9060 .error_detected = lpfc_io_error_detected, 9061 .slot_reset = lpfc_io_slot_reset, 9062 .resume = lpfc_io_resume, 9063}; 9064 9065static struct pci_driver lpfc_driver = { 9066 .name = LPFC_DRIVER_NAME, 9067 .id_table = lpfc_id_table, 9068 .probe = lpfc_pci_probe_one, 9069 .remove = __devexit_p(lpfc_pci_remove_one), 9070 .suspend = lpfc_pci_suspend_one, 9071 .resume = lpfc_pci_resume_one, 9072 .err_handler = &lpfc_err_handler, 9073}; 9074 9075/** 9076 * lpfc_init - lpfc module initialization routine 9077 * 9078 * This routine is to be invoked when the lpfc module is loaded into the 9079 * kernel. The special kernel macro module_init() is used to indicate the 9080 * role of this routine to the kernel as lpfc module entry point. 9081 * 9082 * Return codes 9083 * 0 - successful 9084 * -ENOMEM - FC attach transport failed 9085 * all others - failed 9086 */ 9087static int __init 9088lpfc_init(void) 9089{ 9090 int error = 0; 9091 9092 printk(LPFC_MODULE_DESC "\n"); 9093 printk(LPFC_COPYRIGHT "\n"); 9094 9095 if (lpfc_enable_npiv) { 9096 lpfc_transport_functions.vport_create = lpfc_vport_create; 9097 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9098 } 9099 lpfc_transport_template = 9100 fc_attach_transport(&lpfc_transport_functions); 9101 if (lpfc_transport_template == NULL) 9102 return -ENOMEM; 9103 if (lpfc_enable_npiv) { 9104 lpfc_vport_transport_template = 9105 fc_attach_transport(&lpfc_vport_transport_functions); 9106 if (lpfc_vport_transport_template == NULL) { 9107 fc_release_transport(lpfc_transport_template); 9108 return -ENOMEM; 9109 } 9110 } 9111 error = pci_register_driver(&lpfc_driver); 9112 if (error) { 9113 fc_release_transport(lpfc_transport_template); 9114 if (lpfc_enable_npiv) 9115 fc_release_transport(lpfc_vport_transport_template); 9116 } 9117 9118 return error; 9119} 9120 9121/** 9122 * lpfc_exit - lpfc module removal routine 9123 * 9124 * This routine is invoked when the lpfc module is removed from the kernel. 9125 * The special kernel macro module_exit() is used to indicate the role of 9126 * this routine to the kernel as lpfc module exit point. 9127 */ 9128static void __exit 9129lpfc_exit(void) 9130{ 9131 pci_unregister_driver(&lpfc_driver); 9132 fc_release_transport(lpfc_transport_template); 9133 if (lpfc_enable_npiv) 9134 fc_release_transport(lpfc_vport_transport_template); 9135 if (_dump_buf_data) { 9136 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9137 "_dump_buf_data at 0x%p\n", 9138 (1L << _dump_buf_data_order), _dump_buf_data); 9139 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9140 } 9141 9142 if (_dump_buf_dif) { 9143 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9144 "_dump_buf_dif at 0x%p\n", 9145 (1L << _dump_buf_dif_order), _dump_buf_dif); 9146 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9147 } 9148} 9149 9150module_init(lpfc_init); 9151module_exit(lpfc_exit); 9152MODULE_LICENSE("GPL"); 9153MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9154MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9155MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9156