1/******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21#include <linux/pci.h> 22#include <linux/slab.h> 23#include <linux/interrupt.h> 24#include <linux/delay.h> 25#include <asm/unaligned.h> 26 27#include <scsi/scsi.h> 28#include <scsi/scsi_device.h> 29#include <scsi/scsi_eh.h> 30#include <scsi/scsi_host.h> 31#include <scsi/scsi_tcq.h> 32#include <scsi/scsi_transport_fc.h> 33 34#include "lpfc_version.h" 35#include "lpfc_hw4.h" 36#include "lpfc_hw.h" 37#include "lpfc_sli.h" 38#include "lpfc_sli4.h" 39#include "lpfc_nl.h" 40#include "lpfc_disc.h" 41#include "lpfc_scsi.h" 42#include "lpfc.h" 43#include "lpfc_logmsg.h" 44#include "lpfc_crtn.h" 45#include "lpfc_vport.h" 46 47#define LPFC_RESET_WAIT 2 48#define LPFC_ABORT_WAIT 2 49 50int _dump_buf_done; 51 52static char *dif_op_str[] = { 53 "SCSI_PROT_NORMAL", 54 "SCSI_PROT_READ_INSERT", 55 "SCSI_PROT_WRITE_STRIP", 56 "SCSI_PROT_READ_STRIP", 57 "SCSI_PROT_WRITE_INSERT", 58 "SCSI_PROT_READ_PASS", 59 "SCSI_PROT_WRITE_PASS", 60}; 61static void 62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 63static void 64lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 65 66static void 67lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 68{ 69 void *src, *dst; 70 struct scatterlist *sgde = scsi_sglist(cmnd); 71 72 if (!_dump_buf_data) { 73 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 75 __func__); 76 return; 77 } 78 79 80 if (!sgde) { 81 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 82 "9051 BLKGRD: ERROR: data scatterlist is null\n"); 83 return; 84 } 85 86 dst = (void *) _dump_buf_data; 87 while (sgde) { 88 src = sg_virt(sgde); 89 memcpy(dst, src, sgde->length); 90 dst += sgde->length; 91 sgde = sg_next(sgde); 92 } 93} 94 95static void 96lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 97{ 98 void *src, *dst; 99 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 100 101 if (!_dump_buf_dif) { 102 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 104 __func__); 105 return; 106 } 107 108 if (!sgde) { 109 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 110 "9053 BLKGRD: ERROR: prot scatterlist is null\n"); 111 return; 112 } 113 114 dst = _dump_buf_dif; 115 while (sgde) { 116 src = sg_virt(sgde); 117 memcpy(dst, src, sgde->length); 118 dst += sgde->length; 119 sgde = sg_next(sgde); 120 } 121} 122 123/** 124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 125 * @phba: Pointer to HBA object. 126 * @lpfc_cmd: lpfc scsi command object pointer. 127 * 128 * This function is called from the lpfc_prep_task_mgmt_cmd function to 129 * set the last bit in the response sge entry. 130 **/ 131static void 132lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 133 struct lpfc_scsi_buf *lpfc_cmd) 134{ 135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 136 if (sgl) { 137 sgl += 1; 138 sgl->word2 = le32_to_cpu(sgl->word2); 139 bf_set(lpfc_sli4_sge_last, sgl, 1); 140 sgl->word2 = cpu_to_le32(sgl->word2); 141 } 142} 143 144/** 145 * lpfc_update_stats - Update statistical data for the command completion 146 * @phba: Pointer to HBA object. 147 * @lpfc_cmd: lpfc scsi command object pointer. 148 * 149 * This function is called when there is a command completion and this 150 * function updates the statistical data for the command completion. 151 **/ 152static void 153lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 154{ 155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 156 struct lpfc_nodelist *pnode = rdata->pnode; 157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 158 unsigned long flags; 159 struct Scsi_Host *shost = cmd->device->host; 160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 161 unsigned long latency; 162 int i; 163 164 if (cmd->result) 165 return; 166 167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 168 169 spin_lock_irqsave(shost->host_lock, flags); 170 if (!vport->stat_data_enabled || 171 vport->stat_data_blocked || 172 !pnode->lat_data || 173 (phba->bucket_type == LPFC_NO_BUCKET)) { 174 spin_unlock_irqrestore(shost->host_lock, flags); 175 return; 176 } 177 178 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 179 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 180 phba->bucket_step; 181 /* check array subscript bounds */ 182 if (i < 0) 183 i = 0; 184 else if (i >= LPFC_MAX_BUCKET_COUNT) 185 i = LPFC_MAX_BUCKET_COUNT - 1; 186 } else { 187 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 188 if (latency <= (phba->bucket_base + 189 ((1<<i)*phba->bucket_step))) 190 break; 191 } 192 193 pnode->lat_data[i].cmd_count++; 194 spin_unlock_irqrestore(shost->host_lock, flags); 195} 196 197/** 198 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event 199 * @phba: Pointer to HBA context object. 200 * @vport: Pointer to vport object. 201 * @ndlp: Pointer to FC node associated with the target. 202 * @lun: Lun number of the scsi device. 203 * @old_val: Old value of the queue depth. 204 * @new_val: New value of the queue depth. 205 * 206 * This function sends an event to the mgmt application indicating 207 * there is a change in the scsi device queue depth. 208 **/ 209static void 210lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, 211 struct lpfc_vport *vport, 212 struct lpfc_nodelist *ndlp, 213 uint32_t lun, 214 uint32_t old_val, 215 uint32_t new_val) 216{ 217 struct lpfc_fast_path_event *fast_path_evt; 218 unsigned long flags; 219 220 fast_path_evt = lpfc_alloc_fast_evt(phba); 221 if (!fast_path_evt) 222 return; 223 224 fast_path_evt->un.queue_depth_evt.scsi_event.event_type = 225 FC_REG_SCSI_EVENT; 226 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = 227 LPFC_EVENT_VARQUEDEPTH; 228 229 /* Report all luns with change in queue depth */ 230 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; 231 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 232 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, 233 &ndlp->nlp_portname, sizeof(struct lpfc_name)); 234 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, 235 &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 236 } 237 238 fast_path_evt->un.queue_depth_evt.oldval = old_val; 239 fast_path_evt->un.queue_depth_evt.newval = new_val; 240 fast_path_evt->vport = vport; 241 242 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 243 spin_lock_irqsave(&phba->hbalock, flags); 244 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 245 spin_unlock_irqrestore(&phba->hbalock, flags); 246 lpfc_worker_wake_up(phba); 247 248 return; 249} 250 251/** 252 * lpfc_change_queue_depth - Alter scsi device queue depth 253 * @sdev: Pointer the scsi device on which to change the queue depth. 254 * @qdepth: New queue depth to set the sdev to. 255 * @reason: The reason for the queue depth change. 256 * 257 * This function is called by the midlayer and the LLD to alter the queue 258 * depth for a scsi device. This function sets the queue depth to the new 259 * value and sends an event out to log the queue depth change. 260 **/ 261int 262lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 263{ 264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 265 struct lpfc_hba *phba = vport->phba; 266 struct lpfc_rport_data *rdata; 267 unsigned long new_queue_depth, old_queue_depth; 268 269 old_queue_depth = sdev->queue_depth; 270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 271 new_queue_depth = sdev->queue_depth; 272 rdata = sdev->hostdata; 273 if (rdata) 274 lpfc_send_sdev_queuedepth_change_event(phba, vport, 275 rdata->pnode, sdev->lun, 276 old_queue_depth, 277 new_queue_depth); 278 return sdev->queue_depth; 279} 280 281/** 282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 283 * @phba: The Hba for which this call is being executed. 284 * 285 * This routine is called when there is resource error in driver or firmware. 286 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 287 * posts at most 1 event each second. This routine wakes up worker thread of 288 * @phba to process WORKER_RAM_DOWN_EVENT event. 289 * 290 * This routine should be called with no lock held. 291 **/ 292void 293lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 294{ 295 unsigned long flags; 296 uint32_t evt_posted; 297 298 spin_lock_irqsave(&phba->hbalock, flags); 299 atomic_inc(&phba->num_rsrc_err); 300 phba->last_rsrc_error_time = jiffies; 301 302 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 303 spin_unlock_irqrestore(&phba->hbalock, flags); 304 return; 305 } 306 307 phba->last_ramp_down_time = jiffies; 308 309 spin_unlock_irqrestore(&phba->hbalock, flags); 310 311 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 312 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 313 if (!evt_posted) 314 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 315 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 316 317 if (!evt_posted) 318 lpfc_worker_wake_up(phba); 319 return; 320} 321 322/** 323 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread 324 * @phba: The Hba for which this call is being executed. 325 * 326 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine 327 * post at most 1 event every 5 minute after last_ramp_up_time or 328 * last_rsrc_error_time. This routine wakes up worker thread of @phba 329 * to process WORKER_RAM_DOWN_EVENT event. 330 * 331 * This routine should be called with no lock held. 332 **/ 333static inline void 334lpfc_rampup_queue_depth(struct lpfc_vport *vport, 335 uint32_t queue_depth) 336{ 337 unsigned long flags; 338 struct lpfc_hba *phba = vport->phba; 339 uint32_t evt_posted; 340 atomic_inc(&phba->num_cmd_success); 341 342 if (vport->cfg_lun_queue_depth <= queue_depth) 343 return; 344 spin_lock_irqsave(&phba->hbalock, flags); 345 if (time_before(jiffies, 346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || 347 time_before(jiffies, 348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { 349 spin_unlock_irqrestore(&phba->hbalock, flags); 350 return; 351 } 352 phba->last_ramp_up_time = jiffies; 353 spin_unlock_irqrestore(&phba->hbalock, flags); 354 355 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 356 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; 357 if (!evt_posted) 358 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 359 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 360 361 if (!evt_posted) 362 lpfc_worker_wake_up(phba); 363 return; 364} 365 366/** 367 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 368 * @phba: The Hba for which this call is being executed. 369 * 370 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 371 * thread.This routine reduces queue depth for all scsi device on each vport 372 * associated with @phba. 373 **/ 374void 375lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 376{ 377 struct lpfc_vport **vports; 378 struct Scsi_Host *shost; 379 struct scsi_device *sdev; 380 unsigned long new_queue_depth; 381 unsigned long num_rsrc_err, num_cmd_success; 382 int i; 383 384 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 385 num_cmd_success = atomic_read(&phba->num_cmd_success); 386 387 vports = lpfc_create_vport_work_array(phba); 388 if (vports != NULL) 389 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 390 shost = lpfc_shost_from_vport(vports[i]); 391 shost_for_each_device(sdev, shost) { 392 new_queue_depth = 393 sdev->queue_depth * num_rsrc_err / 394 (num_rsrc_err + num_cmd_success); 395 if (!new_queue_depth) 396 new_queue_depth = sdev->queue_depth - 1; 397 else 398 new_queue_depth = sdev->queue_depth - 399 new_queue_depth; 400 lpfc_change_queue_depth(sdev, new_queue_depth, 401 SCSI_QDEPTH_DEFAULT); 402 } 403 } 404 lpfc_destroy_vport_work_array(phba, vports); 405 atomic_set(&phba->num_rsrc_err, 0); 406 atomic_set(&phba->num_cmd_success, 0); 407} 408 409/** 410 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler 411 * @phba: The Hba for which this call is being executed. 412 * 413 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker 414 * thread.This routine increases queue depth for all scsi device on each vport 415 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and 416 * num_cmd_success to zero. 417 **/ 418void 419lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 420{ 421 struct lpfc_vport **vports; 422 struct Scsi_Host *shost; 423 struct scsi_device *sdev; 424 int i; 425 426 vports = lpfc_create_vport_work_array(phba); 427 if (vports != NULL) 428 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 429 shost = lpfc_shost_from_vport(vports[i]); 430 shost_for_each_device(sdev, shost) { 431 if (vports[i]->cfg_lun_queue_depth <= 432 sdev->queue_depth) 433 continue; 434 lpfc_change_queue_depth(sdev, 435 sdev->queue_depth+1, 436 SCSI_QDEPTH_RAMP_UP); 437 } 438 } 439 lpfc_destroy_vport_work_array(phba, vports); 440 atomic_set(&phba->num_rsrc_err, 0); 441 atomic_set(&phba->num_cmd_success, 0); 442} 443 444/** 445 * lpfc_scsi_dev_block - set all scsi hosts to block state 446 * @phba: Pointer to HBA context object. 447 * 448 * This function walks vport list and set each SCSI host to block state 449 * by invoking fc_remote_port_delete() routine. This function is invoked 450 * with EEH when device's PCI slot has been permanently disabled. 451 **/ 452void 453lpfc_scsi_dev_block(struct lpfc_hba *phba) 454{ 455 struct lpfc_vport **vports; 456 struct Scsi_Host *shost; 457 struct scsi_device *sdev; 458 struct fc_rport *rport; 459 int i; 460 461 vports = lpfc_create_vport_work_array(phba); 462 if (vports != NULL) 463 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 464 shost = lpfc_shost_from_vport(vports[i]); 465 shost_for_each_device(sdev, shost) { 466 rport = starget_to_rport(scsi_target(sdev)); 467 fc_remote_port_delete(rport); 468 } 469 } 470 lpfc_destroy_vport_work_array(phba, vports); 471} 472 473/** 474 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 475 * @vport: The virtual port for which this call being executed. 476 * @num_to_allocate: The requested number of buffers to allocate. 477 * 478 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 479 * the scsi buffer contains all the necessary information needed to initiate 480 * a SCSI I/O. The non-DMAable buffer region contains information to build 481 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 482 * and the initial BPL. In addition to allocating memory, the FCP CMND and 483 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 484 * 485 * Return codes: 486 * int - number of scsi buffers that were allocated. 487 * 0 = failure, less than num_to_alloc is a partial failure. 488 **/ 489static int 490lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 491{ 492 struct lpfc_hba *phba = vport->phba; 493 struct lpfc_scsi_buf *psb; 494 struct ulp_bde64 *bpl; 495 IOCB_t *iocb; 496 dma_addr_t pdma_phys_fcp_cmd; 497 dma_addr_t pdma_phys_fcp_rsp; 498 dma_addr_t pdma_phys_bpl; 499 uint16_t iotag; 500 int bcnt; 501 502 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 503 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 504 if (!psb) 505 break; 506 507 /* 508 * Get memory from the pci pool to map the virt space to pci 509 * bus space for an I/O. The DMA buffer includes space for the 510 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 511 * necessary to support the sg_tablesize. 512 */ 513 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 514 GFP_KERNEL, &psb->dma_handle); 515 if (!psb->data) { 516 kfree(psb); 517 break; 518 } 519 520 /* Initialize virtual ptrs to dma_buf region. */ 521 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 522 523 /* Allocate iotag for psb->cur_iocbq. */ 524 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 525 if (iotag == 0) { 526 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 527 psb->data, psb->dma_handle); 528 kfree(psb); 529 break; 530 } 531 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 532 533 psb->fcp_cmnd = psb->data; 534 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 535 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 536 sizeof(struct fcp_rsp); 537 538 /* Initialize local short-hand pointers. */ 539 bpl = psb->fcp_bpl; 540 pdma_phys_fcp_cmd = psb->dma_handle; 541 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 542 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 543 sizeof(struct fcp_rsp); 544 545 /* 546 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 547 * are sg list bdes. Initialize the first two and leave the 548 * rest for queuecommand. 549 */ 550 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 551 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 552 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 553 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 554 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 555 556 /* Setup the physical region for the FCP RSP */ 557 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 558 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 559 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 560 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 561 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 562 563 /* 564 * Since the IOCB for the FCP I/O is built into this 565 * lpfc_scsi_buf, initialize it with all known data now. 566 */ 567 iocb = &psb->cur_iocbq.iocb; 568 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 569 if ((phba->sli_rev == 3) && 570 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 571 /* fill in immediate fcp command BDE */ 572 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 573 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 574 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 575 unsli3.fcp_ext.icd); 576 iocb->un.fcpi64.bdl.addrHigh = 0; 577 iocb->ulpBdeCount = 0; 578 iocb->ulpLe = 0; 579 /* fill in responce BDE */ 580 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 581 BUFF_TYPE_BDE_64; 582 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 583 sizeof(struct fcp_rsp); 584 iocb->unsli3.fcp_ext.rbde.addrLow = 585 putPaddrLow(pdma_phys_fcp_rsp); 586 iocb->unsli3.fcp_ext.rbde.addrHigh = 587 putPaddrHigh(pdma_phys_fcp_rsp); 588 } else { 589 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 590 iocb->un.fcpi64.bdl.bdeSize = 591 (2 * sizeof(struct ulp_bde64)); 592 iocb->un.fcpi64.bdl.addrLow = 593 putPaddrLow(pdma_phys_bpl); 594 iocb->un.fcpi64.bdl.addrHigh = 595 putPaddrHigh(pdma_phys_bpl); 596 iocb->ulpBdeCount = 1; 597 iocb->ulpLe = 1; 598 } 599 iocb->ulpClass = CLASS3; 600 psb->status = IOSTAT_SUCCESS; 601 /* Put it back into the SCSI buffer list */ 602 lpfc_release_scsi_buf_s3(phba, psb); 603 604 } 605 606 return bcnt; 607} 608 609/** 610 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort 611 * @phba: pointer to lpfc hba data structure. 612 * @axri: pointer to the fcp xri abort wcqe structure. 613 * 614 * This routine is invoked by the worker thread to process a SLI4 fast-path 615 * FCP aborted xri. 616 **/ 617void 618lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, 619 struct sli4_wcqe_xri_aborted *axri) 620{ 621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 622 struct lpfc_scsi_buf *psb, *next_psb; 623 unsigned long iflag = 0; 624 struct lpfc_iocbq *iocbq; 625 int i; 626 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 627 628 spin_lock_irqsave(&phba->hbalock, iflag); 629 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 630 list_for_each_entry_safe(psb, next_psb, 631 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 632 if (psb->cur_iocbq.sli4_xritag == xri) { 633 list_del(&psb->list); 634 psb->exch_busy = 0; 635 psb->status = IOSTAT_SUCCESS; 636 spin_unlock( 637 &phba->sli4_hba.abts_scsi_buf_list_lock); 638 spin_unlock_irqrestore(&phba->hbalock, iflag); 639 lpfc_release_scsi_buf_s4(phba, psb); 640 return; 641 } 642 } 643 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 644 for (i = 1; i <= phba->sli.last_iotag; i++) { 645 iocbq = phba->sli.iocbq_lookup[i]; 646 647 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 648 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 649 continue; 650 if (iocbq->sli4_xritag != xri) 651 continue; 652 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 653 psb->exch_busy = 0; 654 spin_unlock_irqrestore(&phba->hbalock, iflag); 655 if (pring->txq_cnt) 656 lpfc_worker_wake_up(phba); 657 return; 658 659 } 660 spin_unlock_irqrestore(&phba->hbalock, iflag); 661} 662 663/** 664 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block 665 * @phba: pointer to lpfc hba data structure. 666 * 667 * This routine walks the list of scsi buffers that have been allocated and 668 * repost them to the HBA by using SGL block post. This is needed after a 669 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 670 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 671 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 672 * 673 * Returns: 0 = success, non-zero failure. 674 **/ 675int 676lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 677{ 678 struct lpfc_scsi_buf *psb; 679 int index, status, bcnt = 0, rcnt = 0, rc = 0; 680 LIST_HEAD(sblist); 681 682 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { 683 psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; 684 if (psb) { 685 /* Remove from SCSI buffer list */ 686 list_del(&psb->list); 687 /* Add it to a local SCSI buffer list */ 688 list_add_tail(&psb->list, &sblist); 689 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { 690 bcnt = rcnt; 691 rcnt = 0; 692 } 693 } else 694 /* A hole present in the XRI array, need to skip */ 695 bcnt = rcnt; 696 697 if (index == phba->sli4_hba.scsi_xri_cnt - 1) 698 /* End of XRI array for SCSI buffer, complete */ 699 bcnt = rcnt; 700 701 /* Continue until collect up to a nembed page worth of sgls */ 702 if (bcnt == 0) 703 continue; 704 /* Now, post the SCSI buffer list sgls as a block */ 705 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 706 /* Reset SCSI buffer count for next round of posting */ 707 bcnt = 0; 708 while (!list_empty(&sblist)) { 709 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 710 list); 711 if (status) { 712 /* Put this back on the abort scsi list */ 713 psb->exch_busy = 1; 714 rc++; 715 } else { 716 psb->exch_busy = 0; 717 psb->status = IOSTAT_SUCCESS; 718 } 719 /* Put it back into the SCSI buffer list */ 720 lpfc_release_scsi_buf_s4(phba, psb); 721 } 722 } 723 return rc; 724} 725 726/** 727 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec 728 * @vport: The virtual port for which this call being executed. 729 * @num_to_allocate: The requested number of buffers to allocate. 730 * 731 * This routine allocates a scsi buffer for device with SLI-4 interface spec, 732 * the scsi buffer contains all the necessary information needed to initiate 733 * a SCSI I/O. 734 * 735 * Return codes: 736 * int - number of scsi buffers that were allocated. 737 * 0 = failure, less than num_to_alloc is a partial failure. 738 **/ 739static int 740lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) 741{ 742 struct lpfc_hba *phba = vport->phba; 743 struct lpfc_scsi_buf *psb; 744 struct sli4_sge *sgl; 745 IOCB_t *iocb; 746 dma_addr_t pdma_phys_fcp_cmd; 747 dma_addr_t pdma_phys_fcp_rsp; 748 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 749 uint16_t iotag, last_xritag = NO_XRI; 750 int status = 0, index; 751 int bcnt; 752 int non_sequential_xri = 0; 753 LIST_HEAD(sblist); 754 755 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 756 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 757 if (!psb) 758 break; 759 760 /* 761 * Get memory from the pci pool to map the virt space to pci bus 762 * space for an I/O. The DMA buffer includes space for the 763 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 764 * necessary to support the sg_tablesize. 765 */ 766 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 767 GFP_KERNEL, &psb->dma_handle); 768 if (!psb->data) { 769 kfree(psb); 770 break; 771 } 772 773 /* Initialize virtual ptrs to dma_buf region. */ 774 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 775 776 /* Allocate iotag for psb->cur_iocbq. */ 777 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 778 if (iotag == 0) { 779 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 780 psb->data, psb->dma_handle); 781 kfree(psb); 782 break; 783 } 784 785 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 786 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 787 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 788 psb->data, psb->dma_handle); 789 kfree(psb); 790 break; 791 } 792 if (last_xritag != NO_XRI 793 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 794 non_sequential_xri = 1; 795 } else 796 list_add_tail(&psb->list, &sblist); 797 last_xritag = psb->cur_iocbq.sli4_xritag; 798 799 index = phba->sli4_hba.scsi_xri_cnt++; 800 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 801 802 psb->fcp_bpl = psb->data; 803 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 804 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 805 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 806 sizeof(struct fcp_cmnd)); 807 808 /* Initialize local short-hand pointers. */ 809 sgl = (struct sli4_sge *)psb->fcp_bpl; 810 pdma_phys_bpl = psb->dma_handle; 811 pdma_phys_fcp_cmd = 812 (psb->dma_handle + phba->cfg_sg_dma_buf_size) 813 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 814 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 815 816 /* 817 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 818 * are sg list bdes. Initialize the first two and leave the 819 * rest for queuecommand. 820 */ 821 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 822 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 823 bf_set(lpfc_sli4_sge_last, sgl, 0); 824 sgl->word2 = cpu_to_le32(sgl->word2); 825 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 826 sgl++; 827 828 /* Setup the physical region for the FCP RSP */ 829 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 830 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 831 bf_set(lpfc_sli4_sge_last, sgl, 1); 832 sgl->word2 = cpu_to_le32(sgl->word2); 833 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 834 835 /* 836 * Since the IOCB for the FCP I/O is built into this 837 * lpfc_scsi_buf, initialize it with all known data now. 838 */ 839 iocb = &psb->cur_iocbq.iocb; 840 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 841 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 842 /* setting the BLP size to 2 * sizeof BDE may not be correct. 843 * We are setting the bpl to point to out sgl. An sgl's 844 * entries are 16 bytes, a bpl entries are 12 bytes. 845 */ 846 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 847 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); 848 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); 849 iocb->ulpBdeCount = 1; 850 iocb->ulpLe = 1; 851 iocb->ulpClass = CLASS3; 852 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 853 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 854 else 855 pdma_phys_bpl1 = 0; 856 psb->dma_phys_bpl = pdma_phys_bpl; 857 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; 858 if (non_sequential_xri) { 859 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, 860 pdma_phys_bpl1, 861 psb->cur_iocbq.sli4_xritag); 862 if (status) { 863 /* Put this back on the abort scsi list */ 864 psb->exch_busy = 1; 865 } else { 866 psb->exch_busy = 0; 867 psb->status = IOSTAT_SUCCESS; 868 } 869 /* Put it back into the SCSI buffer list */ 870 lpfc_release_scsi_buf_s4(phba, psb); 871 break; 872 } 873 } 874 if (bcnt) { 875 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 876 /* Reset SCSI buffer count for next round of posting */ 877 while (!list_empty(&sblist)) { 878 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 879 list); 880 if (status) { 881 /* Put this back on the abort scsi list */ 882 psb->exch_busy = 1; 883 } else { 884 psb->exch_busy = 0; 885 psb->status = IOSTAT_SUCCESS; 886 } 887 /* Put it back into the SCSI buffer list */ 888 lpfc_release_scsi_buf_s4(phba, psb); 889 } 890 } 891 892 return bcnt + non_sequential_xri; 893} 894 895/** 896 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator 897 * @vport: The virtual port for which this call being executed. 898 * @num_to_allocate: The requested number of buffers to allocate. 899 * 900 * This routine wraps the actual SCSI buffer allocator function pointer from 901 * the lpfc_hba struct. 902 * 903 * Return codes: 904 * int - number of scsi buffers that were allocated. 905 * 0 = failure, less than num_to_alloc is a partial failure. 906 **/ 907static inline int 908lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) 909{ 910 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); 911} 912 913/** 914 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 915 * @phba: The HBA for which this call is being executed. 916 * 917 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 918 * and returns to caller. 919 * 920 * Return codes: 921 * NULL - Error 922 * Pointer to lpfc_scsi_buf - Success 923 **/ 924static struct lpfc_scsi_buf* 925lpfc_get_scsi_buf(struct lpfc_hba * phba) 926{ 927 struct lpfc_scsi_buf * lpfc_cmd = NULL; 928 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 929 unsigned long iflag = 0; 930 931 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 932 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 933 if (lpfc_cmd) { 934 lpfc_cmd->seg_cnt = 0; 935 lpfc_cmd->nonsg_phys = 0; 936 lpfc_cmd->prot_seg_cnt = 0; 937 } 938 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 939 return lpfc_cmd; 940} 941 942/** 943 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list 944 * @phba: The Hba for which this call is being executed. 945 * @psb: The scsi buffer which is being released. 946 * 947 * This routine releases @psb scsi buffer by adding it to tail of @phba 948 * lpfc_scsi_buf_list list. 949 **/ 950static void 951lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 952{ 953 unsigned long iflag = 0; 954 955 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 956 psb->pCmd = NULL; 957 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 958 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 959} 960 961/** 962 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 963 * @phba: The Hba for which this call is being executed. 964 * @psb: The scsi buffer which is being released. 965 * 966 * This routine releases @psb scsi buffer by adding it to tail of @phba 967 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer 968 * and cannot be reused for at least RA_TOV amount of time if it was 969 * aborted. 970 **/ 971static void 972lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 973{ 974 unsigned long iflag = 0; 975 976 if (psb->exch_busy) { 977 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 978 iflag); 979 psb->pCmd = NULL; 980 list_add_tail(&psb->list, 981 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 982 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 983 iflag); 984 } else { 985 986 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 987 psb->pCmd = NULL; 988 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 989 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 990 } 991} 992 993/** 994 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 995 * @phba: The Hba for which this call is being executed. 996 * @psb: The scsi buffer which is being released. 997 * 998 * This routine releases @psb scsi buffer by adding it to tail of @phba 999 * lpfc_scsi_buf_list list. 1000 **/ 1001static void 1002lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1003{ 1004 1005 phba->lpfc_release_scsi_buf(phba, psb); 1006} 1007 1008/** 1009 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1010 * @phba: The Hba for which this call is being executed. 1011 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1012 * 1013 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1014 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 1015 * through sg elements and format the bdea. This routine also initializes all 1016 * IOCB fields which are dependent on scsi command request buffer. 1017 * 1018 * Return codes: 1019 * 1 - Error 1020 * 0 - Success 1021 **/ 1022static int 1023lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1024{ 1025 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1026 struct scatterlist *sgel = NULL; 1027 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1028 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1029 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 1030 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1031 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1032 dma_addr_t physaddr; 1033 uint32_t num_bde = 0; 1034 int nseg, datadir = scsi_cmnd->sc_data_direction; 1035 1036 /* 1037 * There are three possibilities here - use scatter-gather segment, use 1038 * the single mapping, or neither. Start the lpfc command prep by 1039 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1040 * data bde entry. 1041 */ 1042 bpl += 2; 1043 if (scsi_sg_count(scsi_cmnd)) { 1044 /* 1045 * The driver stores the segment count returned from pci_map_sg 1046 * because this a count of dma-mappings used to map the use_sg 1047 * pages. They are not guaranteed to be the same for those 1048 * architectures that implement an IOMMU. 1049 */ 1050 1051 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 1052 scsi_sg_count(scsi_cmnd), datadir); 1053 if (unlikely(!nseg)) 1054 return 1; 1055 1056 lpfc_cmd->seg_cnt = nseg; 1057 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1058 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1059 "9064 BLKGRD: %s: Too many sg segments from " 1060 "dma_map_sg. Config %d, seg_cnt %d\n", 1061 __func__, phba->cfg_sg_seg_cnt, 1062 lpfc_cmd->seg_cnt); 1063 scsi_dma_unmap(scsi_cmnd); 1064 return 1; 1065 } 1066 1067 /* 1068 * The driver established a maximum scatter-gather segment count 1069 * during probe that limits the number of sg elements in any 1070 * single scsi command. Just run through the seg_cnt and format 1071 * the bde's. 1072 * When using SLI-3 the driver will try to fit all the BDEs into 1073 * the IOCB. If it can't then the BDEs get added to a BPL as it 1074 * does for SLI-2 mode. 1075 */ 1076 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1077 physaddr = sg_dma_address(sgel); 1078 if (phba->sli_rev == 3 && 1079 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1080 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 1081 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1082 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1083 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1084 data_bde->addrLow = putPaddrLow(physaddr); 1085 data_bde->addrHigh = putPaddrHigh(physaddr); 1086 data_bde++; 1087 } else { 1088 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1089 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1090 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1091 bpl->addrLow = 1092 le32_to_cpu(putPaddrLow(physaddr)); 1093 bpl->addrHigh = 1094 le32_to_cpu(putPaddrHigh(physaddr)); 1095 bpl++; 1096 } 1097 } 1098 } 1099 1100 /* 1101 * Finish initializing those IOCB fields that are dependent on the 1102 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1103 * explicitly reinitialized and for SLI-3 the extended bde count is 1104 * explicitly reinitialized since all iocb memory resources are reused. 1105 */ 1106 if (phba->sli_rev == 3 && 1107 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1108 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 1109 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1110 /* 1111 * The extended IOCB format can only fit 3 BDE or a BPL. 1112 * This I/O has more than 3 BDE so the 1st data bde will 1113 * be a BPL that is filled in here. 1114 */ 1115 physaddr = lpfc_cmd->dma_handle; 1116 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 1117 data_bde->tus.f.bdeSize = (num_bde * 1118 sizeof(struct ulp_bde64)); 1119 physaddr += (sizeof(struct fcp_cmnd) + 1120 sizeof(struct fcp_rsp) + 1121 (2 * sizeof(struct ulp_bde64))); 1122 data_bde->addrHigh = putPaddrHigh(physaddr); 1123 data_bde->addrLow = putPaddrLow(physaddr); 1124 /* ebde count includes the responce bde and data bpl */ 1125 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 1126 } else { 1127 /* ebde count includes the responce bde and data bdes */ 1128 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1129 } 1130 } else { 1131 iocb_cmd->un.fcpi64.bdl.bdeSize = 1132 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1133 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1134 } 1135 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1136 1137 /* 1138 * Due to difference in data length between DIF/non-DIF paths, 1139 * we need to set word 4 of IOCB here 1140 */ 1141 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1142 return 0; 1143} 1144 1145/* 1146 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it 1147 * @sc: The SCSI command to examine 1148 * @txopt: (out) BlockGuard operation for transmitted data 1149 * @rxopt: (out) BlockGuard operation for received data 1150 * 1151 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1152 * 1153 */ 1154static int 1155lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1156 uint8_t *txop, uint8_t *rxop) 1157{ 1158 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1159 uint8_t ret = 0; 1160 1161 if (guard_type == SHOST_DIX_GUARD_IP) { 1162 switch (scsi_get_prot_op(sc)) { 1163 case SCSI_PROT_READ_INSERT: 1164 case SCSI_PROT_WRITE_STRIP: 1165 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1166 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1167 break; 1168 1169 case SCSI_PROT_READ_STRIP: 1170 case SCSI_PROT_WRITE_INSERT: 1171 *txop = BG_OP_IN_NODIF_OUT_CRC; 1172 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1173 break; 1174 1175 case SCSI_PROT_READ_PASS: 1176 case SCSI_PROT_WRITE_PASS: 1177 *txop = BG_OP_IN_CSUM_OUT_CRC; 1178 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1179 break; 1180 1181 case SCSI_PROT_NORMAL: 1182 default: 1183 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1184 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1185 scsi_get_prot_op(sc), guard_type); 1186 ret = 1; 1187 break; 1188 1189 } 1190 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1191 switch (scsi_get_prot_op(sc)) { 1192 case SCSI_PROT_READ_STRIP: 1193 case SCSI_PROT_WRITE_INSERT: 1194 *txop = BG_OP_IN_NODIF_OUT_CRC; 1195 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1196 break; 1197 1198 case SCSI_PROT_READ_PASS: 1199 case SCSI_PROT_WRITE_PASS: 1200 *txop = BG_OP_IN_CRC_OUT_CRC; 1201 *rxop = BG_OP_IN_CRC_OUT_CRC; 1202 break; 1203 1204 case SCSI_PROT_READ_INSERT: 1205 case SCSI_PROT_WRITE_STRIP: 1206 case SCSI_PROT_NORMAL: 1207 default: 1208 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1209 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1210 scsi_get_prot_op(sc), guard_type); 1211 ret = 1; 1212 break; 1213 } 1214 } else { 1215 /* unsupported format */ 1216 BUG(); 1217 } 1218 1219 return ret; 1220} 1221 1222struct scsi_dif_tuple { 1223 __be16 guard_tag; /* Checksum */ 1224 __be16 app_tag; /* Opaque storage */ 1225 __be32 ref_tag; /* Target LBA or indirect LBA */ 1226}; 1227 1228static inline unsigned 1229lpfc_cmd_blksize(struct scsi_cmnd *sc) 1230{ 1231 return sc->device->sector_size; 1232} 1233 1234/** 1235 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command 1236 * @sc: in: SCSI command 1237 * @apptagmask: out: app tag mask 1238 * @apptagval: out: app tag value 1239 * @reftag: out: ref tag (reference tag) 1240 * 1241 * Description: 1242 * Extract DIF parameters from the command if possible. Otherwise, 1243 * use default parameters. 1244 * 1245 **/ 1246static inline void 1247lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, 1248 uint16_t *apptagval, uint32_t *reftag) 1249{ 1250 struct scsi_dif_tuple *spt; 1251 unsigned char op = scsi_get_prot_op(sc); 1252 unsigned int protcnt = scsi_prot_sg_count(sc); 1253 static int cnt; 1254 1255 if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 1256 op == SCSI_PROT_WRITE_PASS)) { 1257 1258 cnt++; 1259 spt = page_address(sg_page(scsi_prot_sglist(sc))) + 1260 scsi_prot_sglist(sc)[0].offset; 1261 *apptagmask = 0; 1262 *apptagval = 0; 1263 *reftag = cpu_to_be32(spt->ref_tag); 1264 1265 } else { 1266 /* SBC defines ref tag to be lower 32bits of LBA */ 1267 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc)); 1268 *apptagmask = 0; 1269 *apptagval = 0; 1270 } 1271} 1272 1273/* 1274 * This function sets up buffer list for protection groups of 1275 * type LPFC_PG_TYPE_NO_DIF 1276 * 1277 * This is usually used when the HBA is instructed to generate 1278 * DIFs and insert them into data stream (or strip DIF from 1279 * incoming data stream) 1280 * 1281 * The buffer list consists of just one protection group described 1282 * below: 1283 * +-------------------------+ 1284 * start of prot group --> | PDE_5 | 1285 * +-------------------------+ 1286 * | PDE_6 | 1287 * +-------------------------+ 1288 * | Data BDE | 1289 * +-------------------------+ 1290 * |more Data BDE's ... (opt)| 1291 * +-------------------------+ 1292 * 1293 * @sc: pointer to scsi command we're working on 1294 * @bpl: pointer to buffer list for protection groups 1295 * @datacnt: number of segments of data that have been dma mapped 1296 * 1297 * Note: Data s/g buffers have been dma mapped 1298 */ 1299static int 1300lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1301 struct ulp_bde64 *bpl, int datasegcnt) 1302{ 1303 struct scatterlist *sgde = NULL; /* s/g data entry */ 1304 struct lpfc_pde5 *pde5 = NULL; 1305 struct lpfc_pde6 *pde6 = NULL; 1306 dma_addr_t physaddr; 1307 int i = 0, num_bde = 0, status; 1308 int datadir = sc->sc_data_direction; 1309 unsigned blksize; 1310 uint32_t reftag; 1311 uint16_t apptagmask, apptagval; 1312 uint8_t txop, rxop; 1313 1314 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1315 if (status) 1316 goto out; 1317 1318 /* extract some info from the scsi command for pde*/ 1319 blksize = lpfc_cmd_blksize(sc); 1320 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1321 1322 /* setup PDE5 with what we have */ 1323 pde5 = (struct lpfc_pde5 *) bpl; 1324 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1325 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1326 pde5->reftag = reftag; 1327 1328 /* Endianness conversion if necessary for PDE5 */ 1329 pde5->word0 = cpu_to_le32(pde5->word0); 1330 pde5->reftag = cpu_to_le32(pde5->reftag); 1331 1332 /* advance bpl and increment bde count */ 1333 num_bde++; 1334 bpl++; 1335 pde6 = (struct lpfc_pde6 *) bpl; 1336 1337 /* setup PDE6 with the rest of the info */ 1338 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1339 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1340 bf_set(pde6_optx, pde6, txop); 1341 bf_set(pde6_oprx, pde6, rxop); 1342 if (datadir == DMA_FROM_DEVICE) { 1343 bf_set(pde6_ce, pde6, 1); 1344 bf_set(pde6_re, pde6, 1); 1345 bf_set(pde6_ae, pde6, 1); 1346 } 1347 bf_set(pde6_ai, pde6, 1); 1348 bf_set(pde6_apptagval, pde6, apptagval); 1349 1350 /* Endianness conversion if necessary for PDE6 */ 1351 pde6->word0 = cpu_to_le32(pde6->word0); 1352 pde6->word1 = cpu_to_le32(pde6->word1); 1353 pde6->word2 = cpu_to_le32(pde6->word2); 1354 1355 /* advance bpl and increment bde count */ 1356 num_bde++; 1357 bpl++; 1358 1359 /* assumption: caller has already run dma_map_sg on command data */ 1360 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1361 physaddr = sg_dma_address(sgde); 1362 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1363 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1364 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1365 if (datadir == DMA_TO_DEVICE) 1366 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1367 else 1368 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1369 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1370 bpl++; 1371 num_bde++; 1372 } 1373 1374out: 1375 return num_bde; 1376} 1377 1378/* 1379 * This function sets up buffer list for protection groups of 1380 * type LPFC_PG_TYPE_DIF_BUF 1381 * 1382 * This is usually used when DIFs are in their own buffers, 1383 * separate from the data. The HBA can then by instructed 1384 * to place the DIFs in the outgoing stream. For read operations, 1385 * The HBA could extract the DIFs and place it in DIF buffers. 1386 * 1387 * The buffer list for this type consists of one or more of the 1388 * protection groups described below: 1389 * +-------------------------+ 1390 * start of first prot group --> | PDE_5 | 1391 * +-------------------------+ 1392 * | PDE_6 | 1393 * +-------------------------+ 1394 * | PDE_7 (Prot BDE) | 1395 * +-------------------------+ 1396 * | Data BDE | 1397 * +-------------------------+ 1398 * |more Data BDE's ... (opt)| 1399 * +-------------------------+ 1400 * start of new prot group --> | PDE_5 | 1401 * +-------------------------+ 1402 * | ... | 1403 * +-------------------------+ 1404 * 1405 * @sc: pointer to scsi command we're working on 1406 * @bpl: pointer to buffer list for protection groups 1407 * @datacnt: number of segments of data that have been dma mapped 1408 * @protcnt: number of segment of protection data that have been dma mapped 1409 * 1410 * Note: It is assumed that both data and protection s/g buffers have been 1411 * mapped for DMA 1412 */ 1413static int 1414lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1415 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1416{ 1417 struct scatterlist *sgde = NULL; /* s/g data entry */ 1418 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1419 struct lpfc_pde5 *pde5 = NULL; 1420 struct lpfc_pde6 *pde6 = NULL; 1421 struct ulp_bde64 *prot_bde = NULL; 1422 dma_addr_t dataphysaddr, protphysaddr; 1423 unsigned short curr_data = 0, curr_prot = 0; 1424 unsigned int split_offset, protgroup_len; 1425 unsigned int protgrp_blks, protgrp_bytes; 1426 unsigned int remainder, subtotal; 1427 int status; 1428 int datadir = sc->sc_data_direction; 1429 unsigned char pgdone = 0, alldone = 0; 1430 unsigned blksize; 1431 uint32_t reftag; 1432 uint16_t apptagmask, apptagval; 1433 uint8_t txop, rxop; 1434 int num_bde = 0; 1435 1436 sgpe = scsi_prot_sglist(sc); 1437 sgde = scsi_sglist(sc); 1438 1439 if (!sgpe || !sgde) { 1440 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1441 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", 1442 sgpe, sgde); 1443 return 0; 1444 } 1445 1446 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1447 if (status) 1448 goto out; 1449 1450 /* extract some info from the scsi command */ 1451 blksize = lpfc_cmd_blksize(sc); 1452 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1453 1454 split_offset = 0; 1455 do { 1456 /* setup PDE5 with what we have */ 1457 pde5 = (struct lpfc_pde5 *) bpl; 1458 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1459 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1460 pde5->reftag = reftag; 1461 1462 /* Endianness conversion if necessary for PDE5 */ 1463 pde5->word0 = cpu_to_le32(pde5->word0); 1464 pde5->reftag = cpu_to_le32(pde5->reftag); 1465 1466 /* advance bpl and increment bde count */ 1467 num_bde++; 1468 bpl++; 1469 pde6 = (struct lpfc_pde6 *) bpl; 1470 1471 /* setup PDE6 with the rest of the info */ 1472 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1473 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1474 bf_set(pde6_optx, pde6, txop); 1475 bf_set(pde6_oprx, pde6, rxop); 1476 bf_set(pde6_ce, pde6, 1); 1477 bf_set(pde6_re, pde6, 1); 1478 bf_set(pde6_ae, pde6, 1); 1479 bf_set(pde6_ai, pde6, 1); 1480 bf_set(pde6_apptagval, pde6, apptagval); 1481 1482 /* Endianness conversion if necessary for PDE6 */ 1483 pde6->word0 = cpu_to_le32(pde6->word0); 1484 pde6->word1 = cpu_to_le32(pde6->word1); 1485 pde6->word2 = cpu_to_le32(pde6->word2); 1486 1487 /* advance bpl and increment bde count */ 1488 num_bde++; 1489 bpl++; 1490 1491 /* setup the first BDE that points to protection buffer */ 1492 prot_bde = (struct ulp_bde64 *) bpl; 1493 protphysaddr = sg_dma_address(sgpe); 1494 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1495 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1496 protgroup_len = sg_dma_len(sgpe); 1497 1498 /* must be integer multiple of the DIF block length */ 1499 BUG_ON(protgroup_len % 8); 1500 1501 protgrp_blks = protgroup_len / 8; 1502 protgrp_bytes = protgrp_blks * blksize; 1503 1504 prot_bde->tus.f.bdeSize = protgroup_len; 1505 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; 1506 prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1507 1508 curr_prot++; 1509 num_bde++; 1510 1511 /* setup BDE's for data blocks associated with DIF data */ 1512 pgdone = 0; 1513 subtotal = 0; /* total bytes processed for current prot grp */ 1514 while (!pgdone) { 1515 if (!sgde) { 1516 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1517 "9065 BLKGRD:%s Invalid data segment\n", 1518 __func__); 1519 return 0; 1520 } 1521 bpl++; 1522 dataphysaddr = sg_dma_address(sgde) + split_offset; 1523 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1524 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1525 1526 remainder = sg_dma_len(sgde) - split_offset; 1527 1528 if ((subtotal + remainder) <= protgrp_bytes) { 1529 /* we can use this whole buffer */ 1530 bpl->tus.f.bdeSize = remainder; 1531 split_offset = 0; 1532 1533 if ((subtotal + remainder) == protgrp_bytes) 1534 pgdone = 1; 1535 } else { 1536 /* must split this buffer with next prot grp */ 1537 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1538 split_offset += bpl->tus.f.bdeSize; 1539 } 1540 1541 subtotal += bpl->tus.f.bdeSize; 1542 1543 if (datadir == DMA_TO_DEVICE) 1544 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1545 else 1546 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1547 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1548 1549 num_bde++; 1550 curr_data++; 1551 1552 if (split_offset) 1553 break; 1554 1555 /* Move to the next s/g segment if possible */ 1556 sgde = sg_next(sgde); 1557 1558 } 1559 1560 /* are we done ? */ 1561 if (curr_prot == protcnt) { 1562 alldone = 1; 1563 } else if (curr_prot < protcnt) { 1564 /* advance to next prot buffer */ 1565 sgpe = sg_next(sgpe); 1566 bpl++; 1567 1568 /* update the reference tag */ 1569 reftag += protgrp_blks; 1570 } else { 1571 /* if we're here, we have a bug */ 1572 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1573 "9054 BLKGRD: bug in %s\n", __func__); 1574 } 1575 1576 } while (!alldone); 1577 1578out: 1579 1580 return num_bde; 1581} 1582/* 1583 * Given a SCSI command that supports DIF, determine composition of protection 1584 * groups involved in setting up buffer lists 1585 * 1586 * Returns: 1587 * for DIF (for both read and write) 1588 * */ 1589static int 1590lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 1591{ 1592 int ret = LPFC_PG_TYPE_INVALID; 1593 unsigned char op = scsi_get_prot_op(sc); 1594 1595 switch (op) { 1596 case SCSI_PROT_READ_STRIP: 1597 case SCSI_PROT_WRITE_INSERT: 1598 ret = LPFC_PG_TYPE_NO_DIF; 1599 break; 1600 case SCSI_PROT_READ_INSERT: 1601 case SCSI_PROT_WRITE_STRIP: 1602 case SCSI_PROT_READ_PASS: 1603 case SCSI_PROT_WRITE_PASS: 1604 ret = LPFC_PG_TYPE_DIF_BUF; 1605 break; 1606 default: 1607 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1608 "9021 Unsupported protection op:%d\n", op); 1609 break; 1610 } 1611 1612 return ret; 1613} 1614 1615/* 1616 * This is the protection/DIF aware version of 1617 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 1618 * two functions eventually, but for now, it's here 1619 */ 1620static int 1621lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, 1622 struct lpfc_scsi_buf *lpfc_cmd) 1623{ 1624 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1625 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1626 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1627 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1628 uint32_t num_bde = 0; 1629 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 1630 int prot_group_type = 0; 1631 int diflen, fcpdl; 1632 unsigned blksize; 1633 1634 /* 1635 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 1636 * fcp_rsp regions to the first data bde entry 1637 */ 1638 bpl += 2; 1639 if (scsi_sg_count(scsi_cmnd)) { 1640 /* 1641 * The driver stores the segment count returned from pci_map_sg 1642 * because this a count of dma-mappings used to map the use_sg 1643 * pages. They are not guaranteed to be the same for those 1644 * architectures that implement an IOMMU. 1645 */ 1646 datasegcnt = dma_map_sg(&phba->pcidev->dev, 1647 scsi_sglist(scsi_cmnd), 1648 scsi_sg_count(scsi_cmnd), datadir); 1649 if (unlikely(!datasegcnt)) 1650 return 1; 1651 1652 lpfc_cmd->seg_cnt = datasegcnt; 1653 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1654 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1655 "9067 BLKGRD: %s: Too many sg segments" 1656 " from dma_map_sg. Config %d, seg_cnt" 1657 " %d\n", 1658 __func__, phba->cfg_sg_seg_cnt, 1659 lpfc_cmd->seg_cnt); 1660 scsi_dma_unmap(scsi_cmnd); 1661 return 1; 1662 } 1663 1664 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 1665 1666 switch (prot_group_type) { 1667 case LPFC_PG_TYPE_NO_DIF: 1668 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 1669 datasegcnt); 1670 /* we should have 2 or more entries in buffer list */ 1671 if (num_bde < 2) 1672 goto err; 1673 break; 1674 case LPFC_PG_TYPE_DIF_BUF:{ 1675 /* 1676 * This type indicates that protection buffers are 1677 * passed to the driver, so that needs to be prepared 1678 * for DMA 1679 */ 1680 protsegcnt = dma_map_sg(&phba->pcidev->dev, 1681 scsi_prot_sglist(scsi_cmnd), 1682 scsi_prot_sg_count(scsi_cmnd), datadir); 1683 if (unlikely(!protsegcnt)) { 1684 scsi_dma_unmap(scsi_cmnd); 1685 return 1; 1686 } 1687 1688 lpfc_cmd->prot_seg_cnt = protsegcnt; 1689 if (lpfc_cmd->prot_seg_cnt 1690 > phba->cfg_prot_sg_seg_cnt) { 1691 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1692 "9068 BLKGRD: %s: Too many prot sg " 1693 "segments from dma_map_sg. Config %d," 1694 "prot_seg_cnt %d\n", __func__, 1695 phba->cfg_prot_sg_seg_cnt, 1696 lpfc_cmd->prot_seg_cnt); 1697 dma_unmap_sg(&phba->pcidev->dev, 1698 scsi_prot_sglist(scsi_cmnd), 1699 scsi_prot_sg_count(scsi_cmnd), 1700 datadir); 1701 scsi_dma_unmap(scsi_cmnd); 1702 return 1; 1703 } 1704 1705 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 1706 datasegcnt, protsegcnt); 1707 /* we should have 3 or more entries in buffer list */ 1708 if (num_bde < 3) 1709 goto err; 1710 break; 1711 } 1712 case LPFC_PG_TYPE_INVALID: 1713 default: 1714 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1715 "9022 Unexpected protection group %i\n", 1716 prot_group_type); 1717 return 1; 1718 } 1719 } 1720 1721 /* 1722 * Finish initializing those IOCB fields that are dependent on the 1723 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 1724 * reinitialized since all iocb memory resources are used many times 1725 * for transmit, receive, and continuation bpl's. 1726 */ 1727 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 1728 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 1729 iocb_cmd->ulpBdeCount = 1; 1730 iocb_cmd->ulpLe = 1; 1731 1732 fcpdl = scsi_bufflen(scsi_cmnd); 1733 1734 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { 1735 /* 1736 * We are in DIF Type 1 mode 1737 * Every data block has a 8 byte DIF (trailer) 1738 * attached to it. Must ajust FCP data length 1739 */ 1740 blksize = lpfc_cmd_blksize(scsi_cmnd); 1741 diflen = (fcpdl / blksize) * 8; 1742 fcpdl += diflen; 1743 } 1744 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 1745 1746 /* 1747 * Due to difference in data length between DIF/non-DIF paths, 1748 * we need to set word 4 of IOCB here 1749 */ 1750 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 1751 1752 return 0; 1753err: 1754 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1755 "9023 Could not setup all needed BDE's" 1756 "prot_group_type=%d, num_bde=%d\n", 1757 prot_group_type, num_bde); 1758 return 1; 1759} 1760 1761/* 1762 * This function checks for BlockGuard errors detected by 1763 * the HBA. In case of errors, the ASC/ASCQ fields in the 1764 * sense buffer will be set accordingly, paired with 1765 * ILLEGAL_REQUEST to signal to the kernel that the HBA 1766 * detected corruption. 1767 * 1768 * Returns: 1769 * 0 - No error found 1770 * 1 - BlockGuard error found 1771 * -1 - Internal error (bad profile, ...etc) 1772 */ 1773static int 1774lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, 1775 struct lpfc_iocbq *pIocbOut) 1776{ 1777 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 1778 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 1779 int ret = 0; 1780 uint32_t bghm = bgf->bghm; 1781 uint32_t bgstat = bgf->bgstat; 1782 uint64_t failing_sector = 0; 1783 1784 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" 1785 " 0x%x lba 0x%llx blk cnt 0x%x " 1786 "bgstat=0x%x bghm=0x%x\n", 1787 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1788 blk_rq_sectors(cmd->request), bgstat, bghm); 1789 1790 spin_lock(&_dump_buf_lock); 1791 if (!_dump_buf_done) { 1792 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 1793 " Data for %u blocks to debugfs\n", 1794 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1795 lpfc_debug_save_data(phba, cmd); 1796 1797 /* If we have a prot sgl, save the DIF buffer */ 1798 if (lpfc_prot_group_type(phba, cmd) == 1799 LPFC_PG_TYPE_DIF_BUF) { 1800 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " 1801 "Saving DIF for %u blocks to debugfs\n", 1802 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1803 lpfc_debug_save_dif(phba, cmd); 1804 } 1805 1806 _dump_buf_done = 1; 1807 } 1808 spin_unlock(&_dump_buf_lock); 1809 1810 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1811 cmd->result = ScsiResult(DID_ERROR, 0); 1812 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 1813 " BlockGuard profile. bgstat:0x%x\n", 1814 bgstat); 1815 ret = (-1); 1816 goto out; 1817 } 1818 1819 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1820 cmd->result = ScsiResult(DID_ERROR, 0); 1821 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 1822 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1823 bgstat); 1824 ret = (-1); 1825 goto out; 1826 } 1827 1828 if (lpfc_bgs_get_guard_err(bgstat)) { 1829 ret = 1; 1830 1831 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1832 0x10, 0x1); 1833 cmd->result = DRIVER_SENSE << 24 1834 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1835 phba->bg_guard_err_cnt++; 1836 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1837 "9055 BLKGRD: guard_tag error\n"); 1838 } 1839 1840 if (lpfc_bgs_get_reftag_err(bgstat)) { 1841 ret = 1; 1842 1843 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1844 0x10, 0x3); 1845 cmd->result = DRIVER_SENSE << 24 1846 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1847 1848 phba->bg_reftag_err_cnt++; 1849 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1850 "9056 BLKGRD: ref_tag error\n"); 1851 } 1852 1853 if (lpfc_bgs_get_apptag_err(bgstat)) { 1854 ret = 1; 1855 1856 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1857 0x10, 0x2); 1858 cmd->result = DRIVER_SENSE << 24 1859 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1860 1861 phba->bg_apptag_err_cnt++; 1862 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1863 "9061 BLKGRD: app_tag error\n"); 1864 } 1865 1866 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1867 /* 1868 * setup sense data descriptor 0 per SPC-4 as an information 1869 * field, and put the failing LBA in it 1870 */ 1871 cmd->sense_buffer[8] = 0; /* Information */ 1872 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1873 bghm /= cmd->device->sector_size; 1874 1875 failing_sector = scsi_get_lba(cmd); 1876 failing_sector += bghm; 1877 1878 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1879 } 1880 1881 if (!ret) { 1882 /* No error was reported - problem in FW? */ 1883 cmd->result = ScsiResult(DID_ERROR, 0); 1884 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1885 "9057 BLKGRD: no errors reported!\n"); 1886 } 1887 1888out: 1889 return ret; 1890} 1891 1892/** 1893 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 1894 * @phba: The Hba for which this call is being executed. 1895 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1896 * 1897 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1898 * field of @lpfc_cmd for device with SLI-4 interface spec. 1899 * 1900 * Return codes: 1901 * 1 - Error 1902 * 0 - Success 1903 **/ 1904static int 1905lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1906{ 1907 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1908 struct scatterlist *sgel = NULL; 1909 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1910 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 1911 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1912 dma_addr_t physaddr; 1913 uint32_t num_bde = 0; 1914 uint32_t dma_len; 1915 uint32_t dma_offset = 0; 1916 int nseg; 1917 1918 /* 1919 * There are three possibilities here - use scatter-gather segment, use 1920 * the single mapping, or neither. Start the lpfc command prep by 1921 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1922 * data bde entry. 1923 */ 1924 if (scsi_sg_count(scsi_cmnd)) { 1925 /* 1926 * The driver stores the segment count returned from pci_map_sg 1927 * because this a count of dma-mappings used to map the use_sg 1928 * pages. They are not guaranteed to be the same for those 1929 * architectures that implement an IOMMU. 1930 */ 1931 1932 nseg = scsi_dma_map(scsi_cmnd); 1933 if (unlikely(!nseg)) 1934 return 1; 1935 sgl += 1; 1936 /* clear the last flag in the fcp_rsp map entry */ 1937 sgl->word2 = le32_to_cpu(sgl->word2); 1938 bf_set(lpfc_sli4_sge_last, sgl, 0); 1939 sgl->word2 = cpu_to_le32(sgl->word2); 1940 sgl += 1; 1941 1942 lpfc_cmd->seg_cnt = nseg; 1943 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1944 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 1945 " %s: Too many sg segments from " 1946 "dma_map_sg. Config %d, seg_cnt %d\n", 1947 __func__, phba->cfg_sg_seg_cnt, 1948 lpfc_cmd->seg_cnt); 1949 scsi_dma_unmap(scsi_cmnd); 1950 return 1; 1951 } 1952 1953 /* 1954 * The driver established a maximum scatter-gather segment count 1955 * during probe that limits the number of sg elements in any 1956 * single scsi command. Just run through the seg_cnt and format 1957 * the sge's. 1958 * When using SLI-3 the driver will try to fit all the BDEs into 1959 * the IOCB. If it can't then the BDEs get added to a BPL as it 1960 * does for SLI-2 mode. 1961 */ 1962 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1963 physaddr = sg_dma_address(sgel); 1964 dma_len = sg_dma_len(sgel); 1965 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1966 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1967 if ((num_bde + 1) == nseg) 1968 bf_set(lpfc_sli4_sge_last, sgl, 1); 1969 else 1970 bf_set(lpfc_sli4_sge_last, sgl, 0); 1971 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1972 sgl->word2 = cpu_to_le32(sgl->word2); 1973 sgl->sge_len = cpu_to_le32(dma_len); 1974 dma_offset += dma_len; 1975 sgl++; 1976 } 1977 } else { 1978 sgl += 1; 1979 /* clear the last flag in the fcp_rsp map entry */ 1980 sgl->word2 = le32_to_cpu(sgl->word2); 1981 bf_set(lpfc_sli4_sge_last, sgl, 1); 1982 sgl->word2 = cpu_to_le32(sgl->word2); 1983 } 1984 1985 /* 1986 * Finish initializing those IOCB fields that are dependent on the 1987 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1988 * explicitly reinitialized. 1989 * all iocb memory resources are reused. 1990 */ 1991 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1992 1993 /* 1994 * Due to difference in data length between DIF/non-DIF paths, 1995 * we need to set word 4 of IOCB here 1996 */ 1997 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1998 return 0; 1999} 2000 2001/** 2002 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 2003 * @phba: The Hba for which this call is being executed. 2004 * @lpfc_cmd: The scsi buffer which is going to be mapped. 2005 * 2006 * This routine wraps the actual DMA mapping function pointer from the 2007 * lpfc_hba struct. 2008 * 2009 * Return codes: 2010 * 1 - Error 2011 * 0 - Success 2012 **/ 2013static inline int 2014lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 2015{ 2016 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2017} 2018 2019/** 2020 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 2021 * @phba: Pointer to hba context object. 2022 * @vport: Pointer to vport object. 2023 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 2024 * @rsp_iocb: Pointer to response iocb object which reported error. 2025 * 2026 * This function posts an event when there is a SCSI command reporting 2027 * error from the scsi device. 2028 **/ 2029static void 2030lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 2031 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { 2032 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2033 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2034 uint32_t resp_info = fcprsp->rspStatus2; 2035 uint32_t scsi_status = fcprsp->rspStatus3; 2036 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2037 struct lpfc_fast_path_event *fast_path_evt = NULL; 2038 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 2039 unsigned long flags; 2040 2041 /* If there is queuefull or busy condition send a scsi event */ 2042 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 2043 (cmnd->result == SAM_STAT_BUSY)) { 2044 fast_path_evt = lpfc_alloc_fast_evt(phba); 2045 if (!fast_path_evt) 2046 return; 2047 fast_path_evt->un.scsi_evt.event_type = 2048 FC_REG_SCSI_EVENT; 2049 fast_path_evt->un.scsi_evt.subcategory = 2050 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 2051 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 2052 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 2053 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 2054 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2055 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 2056 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2057 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 2058 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 2059 fast_path_evt = lpfc_alloc_fast_evt(phba); 2060 if (!fast_path_evt) 2061 return; 2062 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 2063 FC_REG_SCSI_EVENT; 2064 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 2065 LPFC_EVENT_CHECK_COND; 2066 fast_path_evt->un.check_cond_evt.scsi_event.lun = 2067 cmnd->device->lun; 2068 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 2069 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2070 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 2071 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2072 fast_path_evt->un.check_cond_evt.sense_key = 2073 cmnd->sense_buffer[2] & 0xf; 2074 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 2075 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 2076 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2077 fcpi_parm && 2078 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 2079 ((scsi_status == SAM_STAT_GOOD) && 2080 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 2081 /* 2082 * If status is good or resid does not match with fcp_param and 2083 * there is valid fcpi_parm, then there is a read_check error 2084 */ 2085 fast_path_evt = lpfc_alloc_fast_evt(phba); 2086 if (!fast_path_evt) 2087 return; 2088 fast_path_evt->un.read_check_error.header.event_type = 2089 FC_REG_FABRIC_EVENT; 2090 fast_path_evt->un.read_check_error.header.subcategory = 2091 LPFC_EVENT_FCPRDCHKERR; 2092 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 2093 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2094 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 2095 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2096 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 2097 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 2098 fast_path_evt->un.read_check_error.fcpiparam = 2099 fcpi_parm; 2100 } else 2101 return; 2102 2103 fast_path_evt->vport = vport; 2104 spin_lock_irqsave(&phba->hbalock, flags); 2105 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 2106 spin_unlock_irqrestore(&phba->hbalock, flags); 2107 lpfc_worker_wake_up(phba); 2108 return; 2109} 2110 2111/** 2112 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 2113 * @phba: The HBA for which this call is being executed. 2114 * @psb: The scsi buffer which is going to be un-mapped. 2115 * 2116 * This routine does DMA un-mapping of scatter gather list of scsi command 2117 * field of @lpfc_cmd for device with SLI-3 interface spec. 2118 **/ 2119static void 2120lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 2121{ 2122 /* 2123 * There are only two special cases to consider. (1) the scsi command 2124 * requested scatter-gather usage or (2) the scsi command allocated 2125 * a request buffer, but did not request use_sg. There is a third 2126 * case, but it does not require resource deallocation. 2127 */ 2128 if (psb->seg_cnt > 0) 2129 scsi_dma_unmap(psb->pCmd); 2130 if (psb->prot_seg_cnt > 0) 2131 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 2132 scsi_prot_sg_count(psb->pCmd), 2133 psb->pCmd->sc_data_direction); 2134} 2135 2136/** 2137 * lpfc_handler_fcp_err - FCP response handler 2138 * @vport: The virtual port for which this call is being executed. 2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2140 * @rsp_iocb: The response IOCB which contains FCP error. 2141 * 2142 * This routine is called to process response IOCB with status field 2143 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 2144 * based upon SCSI and FCP error. 2145 **/ 2146static void 2147lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2148 struct lpfc_iocbq *rsp_iocb) 2149{ 2150 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2151 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 2152 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2153 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2154 uint32_t resp_info = fcprsp->rspStatus2; 2155 uint32_t scsi_status = fcprsp->rspStatus3; 2156 uint32_t *lp; 2157 uint32_t host_status = DID_OK; 2158 uint32_t rsplen = 0; 2159 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 2160 2161 2162 /* 2163 * If this is a task management command, there is no 2164 * scsi packet associated with this lpfc_cmd. The driver 2165 * consumes it. 2166 */ 2167 if (fcpcmd->fcpCntl2) { 2168 scsi_status = 0; 2169 goto out; 2170 } 2171 2172 if (resp_info & RSP_LEN_VALID) { 2173 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2174 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 2175 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2176 "2719 Invalid response length: " 2177 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2178 cmnd->device->id, 2179 cmnd->device->lun, cmnd->cmnd[0], 2180 rsplen); 2181 host_status = DID_ERROR; 2182 goto out; 2183 } 2184 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 2185 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2186 "2757 Protocol failure detected during " 2187 "processing of FCP I/O op: " 2188 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", 2189 cmnd->device->id, 2190 cmnd->device->lun, cmnd->cmnd[0], 2191 fcprsp->rspInfo3); 2192 host_status = DID_ERROR; 2193 goto out; 2194 } 2195 } 2196 2197 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2198 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2199 if (snslen > SCSI_SENSE_BUFFERSIZE) 2200 snslen = SCSI_SENSE_BUFFERSIZE; 2201 2202 if (resp_info & RSP_LEN_VALID) 2203 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2204 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 2205 } 2206 lp = (uint32_t *)cmnd->sense_buffer; 2207 2208 if (!scsi_status && (resp_info & RESID_UNDER)) 2209 logit = LOG_FCP; 2210 2211 lpfc_printf_vlog(vport, KERN_WARNING, logit, 2212 "9024 FCP command x%x failed: x%x SNS x%x x%x " 2213 "Data: x%x x%x x%x x%x x%x\n", 2214 cmnd->cmnd[0], scsi_status, 2215 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 2216 be32_to_cpu(fcprsp->rspResId), 2217 be32_to_cpu(fcprsp->rspSnsLen), 2218 be32_to_cpu(fcprsp->rspRspLen), 2219 fcprsp->rspInfo3); 2220 2221 scsi_set_resid(cmnd, 0); 2222 if (resp_info & RESID_UNDER) { 2223 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2224 2225 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2226 "9025 FCP Read Underrun, expected %d, " 2227 "residual %d Data: x%x x%x x%x\n", 2228 be32_to_cpu(fcpcmd->fcpDl), 2229 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 2230 cmnd->underflow); 2231 2232 /* 2233 * If there is an under run check if under run reported by 2234 * storage array is same as the under run reported by HBA. 2235 * If this is not same, there is a dropped frame. 2236 */ 2237 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2238 fcpi_parm && 2239 (scsi_get_resid(cmnd) != fcpi_parm)) { 2240 lpfc_printf_vlog(vport, KERN_WARNING, 2241 LOG_FCP | LOG_FCP_ERROR, 2242 "9026 FCP Read Check Error " 2243 "and Underrun Data: x%x x%x x%x x%x\n", 2244 be32_to_cpu(fcpcmd->fcpDl), 2245 scsi_get_resid(cmnd), fcpi_parm, 2246 cmnd->cmnd[0]); 2247 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2248 host_status = DID_ERROR; 2249 } 2250 /* 2251 * The cmnd->underflow is the minimum number of bytes that must 2252 * be transfered for this command. Provided a sense condition 2253 * is not present, make sure the actual amount transferred is at 2254 * least the underflow value or fail. 2255 */ 2256 if (!(resp_info & SNS_LEN_VALID) && 2257 (scsi_status == SAM_STAT_GOOD) && 2258 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 2259 < cmnd->underflow)) { 2260 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2261 "9027 FCP command x%x residual " 2262 "underrun converted to error " 2263 "Data: x%x x%x x%x\n", 2264 cmnd->cmnd[0], scsi_bufflen(cmnd), 2265 scsi_get_resid(cmnd), cmnd->underflow); 2266 host_status = DID_ERROR; 2267 } 2268 } else if (resp_info & RESID_OVER) { 2269 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2270 "9028 FCP command x%x residual overrun error. " 2271 "Data: x%x x%x\n", cmnd->cmnd[0], 2272 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 2273 host_status = DID_ERROR; 2274 2275 /* 2276 * Check SLI validation that all the transfer was actually done 2277 * (fcpi_parm should be zero). Apply check only to reads. 2278 */ 2279 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 2280 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 2281 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 2282 "9029 FCP Read Check Error Data: " 2283 "x%x x%x x%x x%x\n", 2284 be32_to_cpu(fcpcmd->fcpDl), 2285 be32_to_cpu(fcprsp->rspResId), 2286 fcpi_parm, cmnd->cmnd[0]); 2287 host_status = DID_ERROR; 2288 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2289 } 2290 2291 out: 2292 cmnd->result = ScsiResult(host_status, scsi_status); 2293 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 2294} 2295 2296/** 2297 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2298 * @phba: The Hba for which this call is being executed. 2299 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2300 * @pIocbOut: The response IOCBQ for the scsi cmnd. 2301 * 2302 * This routine assigns scsi command result by looking into response IOCB 2303 * status field appropriately. This routine handles QUEUE FULL condition as 2304 * well by ramping down device queue depth. 2305 **/ 2306static void 2307lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 2308 struct lpfc_iocbq *pIocbOut) 2309{ 2310 struct lpfc_scsi_buf *lpfc_cmd = 2311 (struct lpfc_scsi_buf *) pIocbIn->context1; 2312 struct lpfc_vport *vport = pIocbIn->vport; 2313 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2314 struct lpfc_nodelist *pnode = rdata->pnode; 2315 struct scsi_cmnd *cmd; 2316 int result; 2317 struct scsi_device *tmp_sdev; 2318 int depth; 2319 unsigned long flags; 2320 struct lpfc_fast_path_event *fast_path_evt; 2321 struct Scsi_Host *shost; 2322 uint32_t queue_depth, scsi_id; 2323 2324 /* Sanity check on return of outstanding command */ 2325 if (!(lpfc_cmd->pCmd)) 2326 return; 2327 cmd = lpfc_cmd->pCmd; 2328 shost = cmd->device->host; 2329 2330 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2331 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2332 /* pick up SLI4 exhange busy status from HBA */ 2333 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 2334 2335 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2336 atomic_dec(&pnode->cmd_pending); 2337 2338 if (lpfc_cmd->status) { 2339 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 2340 (lpfc_cmd->result & IOERR_DRVR_MASK)) 2341 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 2342 else if (lpfc_cmd->status >= IOSTAT_CNT) 2343 lpfc_cmd->status = IOSTAT_DEFAULT; 2344 2345 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2346 "9030 FCP cmd x%x failed <%d/%d> " 2347 "status: x%x result: x%x Data: x%x x%x\n", 2348 cmd->cmnd[0], 2349 cmd->device ? cmd->device->id : 0xffff, 2350 cmd->device ? cmd->device->lun : 0xffff, 2351 lpfc_cmd->status, lpfc_cmd->result, 2352 pIocbOut->iocb.ulpContext, 2353 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 2354 2355 switch (lpfc_cmd->status) { 2356 case IOSTAT_FCP_RSP_ERROR: 2357 /* Call FCP RSP handler to determine result */ 2358 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 2359 break; 2360 case IOSTAT_NPORT_BSY: 2361 case IOSTAT_FABRIC_BSY: 2362 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2363 fast_path_evt = lpfc_alloc_fast_evt(phba); 2364 if (!fast_path_evt) 2365 break; 2366 fast_path_evt->un.fabric_evt.event_type = 2367 FC_REG_FABRIC_EVENT; 2368 fast_path_evt->un.fabric_evt.subcategory = 2369 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 2370 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 2371 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2372 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 2373 &pnode->nlp_portname, 2374 sizeof(struct lpfc_name)); 2375 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 2376 &pnode->nlp_nodename, 2377 sizeof(struct lpfc_name)); 2378 } 2379 fast_path_evt->vport = vport; 2380 fast_path_evt->work_evt.evt = 2381 LPFC_EVT_FASTPATH_MGMT_EVT; 2382 spin_lock_irqsave(&phba->hbalock, flags); 2383 list_add_tail(&fast_path_evt->work_evt.evt_listp, 2384 &phba->work_list); 2385 spin_unlock_irqrestore(&phba->hbalock, flags); 2386 lpfc_worker_wake_up(phba); 2387 break; 2388 case IOSTAT_LOCAL_REJECT: 2389 if (lpfc_cmd->result == IOERR_INVALID_RPI || 2390 lpfc_cmd->result == IOERR_NO_RESOURCES || 2391 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 2392 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 2393 cmd->result = ScsiResult(DID_REQUEUE, 0); 2394 break; 2395 } 2396 2397 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 2398 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 2399 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 2400 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2401 /* 2402 * This is a response for a BG enabled 2403 * cmd. Parse BG error 2404 */ 2405 lpfc_parse_bg_err(phba, lpfc_cmd, 2406 pIocbOut); 2407 break; 2408 } else { 2409 lpfc_printf_vlog(vport, KERN_WARNING, 2410 LOG_BG, 2411 "9031 non-zero BGSTAT " 2412 "on unprotected cmd\n"); 2413 } 2414 } 2415 2416 /* else: fall through */ 2417 default: 2418 cmd->result = ScsiResult(DID_ERROR, 0); 2419 break; 2420 } 2421 2422 if (!pnode || !NLP_CHK_NODE_ACT(pnode) 2423 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 2424 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 2425 SAM_STAT_BUSY); 2426 } else { 2427 cmd->result = ScsiResult(DID_OK, 0); 2428 } 2429 2430 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 2431 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 2432 2433 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2434 "0710 Iodone <%d/%d> cmd %p, error " 2435 "x%x SNS x%x x%x Data: x%x x%x\n", 2436 cmd->device->id, cmd->device->lun, cmd, 2437 cmd->result, *lp, *(lp + 3), cmd->retries, 2438 scsi_get_resid(cmd)); 2439 } 2440 2441 lpfc_update_stats(phba, lpfc_cmd); 2442 result = cmd->result; 2443 if (vport->cfg_max_scsicmpl_time && 2444 time_after(jiffies, lpfc_cmd->start_time + 2445 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 2446 spin_lock_irqsave(shost->host_lock, flags); 2447 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2448 if (pnode->cmd_qdepth > 2449 atomic_read(&pnode->cmd_pending) && 2450 (atomic_read(&pnode->cmd_pending) > 2451 LPFC_MIN_TGT_QDEPTH) && 2452 ((cmd->cmnd[0] == READ_10) || 2453 (cmd->cmnd[0] == WRITE_10))) 2454 pnode->cmd_qdepth = 2455 atomic_read(&pnode->cmd_pending); 2456 2457 pnode->last_change_time = jiffies; 2458 } 2459 spin_unlock_irqrestore(shost->host_lock, flags); 2460 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2461 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) && 2462 time_after(jiffies, pnode->last_change_time + 2463 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 2464 spin_lock_irqsave(shost->host_lock, flags); 2465 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT 2466 / 100; 2467 depth = depth ? depth : 1; 2468 pnode->cmd_qdepth += depth; 2469 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth) 2470 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth; 2471 pnode->last_change_time = jiffies; 2472 spin_unlock_irqrestore(shost->host_lock, flags); 2473 } 2474 } 2475 2476 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2477 2478 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 2479 queue_depth = cmd->device->queue_depth; 2480 scsi_id = cmd->device->id; 2481 cmd->scsi_done(cmd); 2482 2483 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2484 /* 2485 * If there is a thread waiting for command completion 2486 * wake up the thread. 2487 */ 2488 spin_lock_irqsave(shost->host_lock, flags); 2489 lpfc_cmd->pCmd = NULL; 2490 if (lpfc_cmd->waitq) 2491 wake_up(lpfc_cmd->waitq); 2492 spin_unlock_irqrestore(shost->host_lock, flags); 2493 lpfc_release_scsi_buf(phba, lpfc_cmd); 2494 return; 2495 } 2496 2497 if (!result) 2498 lpfc_rampup_queue_depth(vport, queue_depth); 2499 2500 /* 2501 * Check for queue full. If the lun is reporting queue full, then 2502 * back off the lun queue depth to prevent target overloads. 2503 */ 2504 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2505 NLP_CHK_NODE_ACT(pnode)) { 2506 shost_for_each_device(tmp_sdev, shost) { 2507 if (tmp_sdev->id != scsi_id) 2508 continue; 2509 depth = scsi_track_queue_full(tmp_sdev, 2510 tmp_sdev->queue_depth-1); 2511 if (depth <= 0) 2512 continue; 2513 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2514 "0711 detected queue full - lun queue " 2515 "depth adjusted to %d.\n", depth); 2516 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2517 pnode, 2518 tmp_sdev->lun, 2519 depth+1, depth); 2520 } 2521 } 2522 2523 /* 2524 * If there is a thread waiting for command completion 2525 * wake up the thread. 2526 */ 2527 spin_lock_irqsave(shost->host_lock, flags); 2528 lpfc_cmd->pCmd = NULL; 2529 if (lpfc_cmd->waitq) 2530 wake_up(lpfc_cmd->waitq); 2531 spin_unlock_irqrestore(shost->host_lock, flags); 2532 2533 lpfc_release_scsi_buf(phba, lpfc_cmd); 2534} 2535 2536/** 2537 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 2538 * @data: A pointer to the immediate command data portion of the IOCB. 2539 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 2540 * 2541 * The routine copies the entire FCP command from @fcp_cmnd to @data while 2542 * byte swapping the data to big endian format for transmission on the wire. 2543 **/ 2544static void 2545lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 2546{ 2547 int i, j; 2548 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 2549 i += sizeof(uint32_t), j++) { 2550 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 2551 } 2552} 2553 2554/** 2555 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 2556 * @vport: The virtual port for which this call is being executed. 2557 * @lpfc_cmd: The scsi command which needs to send. 2558 * @pnode: Pointer to lpfc_nodelist. 2559 * 2560 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2561 * to transfer for device with SLI3 interface spec. 2562 **/ 2563static void 2564lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2565 struct lpfc_nodelist *pnode) 2566{ 2567 struct lpfc_hba *phba = vport->phba; 2568 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2569 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2570 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2571 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 2572 int datadir = scsi_cmnd->sc_data_direction; 2573 char tag[2]; 2574 2575 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 2576 return; 2577 2578 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 2579 /* clear task management bits */ 2580 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 2581 2582 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 2583 &lpfc_cmd->fcp_cmnd->fcp_lun); 2584 2585 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 2586 2587 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 2588 switch (tag[0]) { 2589 case HEAD_OF_QUEUE_TAG: 2590 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 2591 break; 2592 case ORDERED_QUEUE_TAG: 2593 fcp_cmnd->fcpCntl1 = ORDERED_Q; 2594 break; 2595 default: 2596 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 2597 break; 2598 } 2599 } else 2600 fcp_cmnd->fcpCntl1 = 0; 2601 2602 /* 2603 * There are three possibilities here - use scatter-gather segment, use 2604 * the single mapping, or neither. Start the lpfc command prep by 2605 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 2606 * data bde entry. 2607 */ 2608 if (scsi_sg_count(scsi_cmnd)) { 2609 if (datadir == DMA_TO_DEVICE) { 2610 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2611 if (phba->sli_rev < LPFC_SLI_REV4) { 2612 iocb_cmd->un.fcpi.fcpi_parm = 0; 2613 iocb_cmd->ulpPU = 0; 2614 } else 2615 iocb_cmd->ulpPU = PARM_READ_CHECK; 2616 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2617 phba->fc4OutputRequests++; 2618 } else { 2619 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2620 iocb_cmd->ulpPU = PARM_READ_CHECK; 2621 fcp_cmnd->fcpCntl3 = READ_DATA; 2622 phba->fc4InputRequests++; 2623 } 2624 } else { 2625 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 2626 iocb_cmd->un.fcpi.fcpi_parm = 0; 2627 iocb_cmd->ulpPU = 0; 2628 fcp_cmnd->fcpCntl3 = 0; 2629 phba->fc4ControlRequests++; 2630 } 2631 if (phba->sli_rev == 3 && 2632 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2633 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2634 /* 2635 * Finish initializing those IOCB fields that are independent 2636 * of the scsi_cmnd request_buffer 2637 */ 2638 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2639 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2640 piocbq->iocb.ulpFCP2Rcvy = 1; 2641 else 2642 piocbq->iocb.ulpFCP2Rcvy = 0; 2643 2644 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 2645 piocbq->context1 = lpfc_cmd; 2646 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2647 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 2648 piocbq->vport = vport; 2649} 2650 2651/** 2652 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2653 * @vport: The virtual port for which this call is being executed. 2654 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2655 * @lun: Logical unit number. 2656 * @task_mgmt_cmd: SCSI task management command. 2657 * 2658 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 2659 * for device with SLI-3 interface spec. 2660 * 2661 * Return codes: 2662 * 0 - Error 2663 * 1 - Success 2664 **/ 2665static int 2666lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2667 struct lpfc_scsi_buf *lpfc_cmd, 2668 unsigned int lun, 2669 uint8_t task_mgmt_cmd) 2670{ 2671 struct lpfc_iocbq *piocbq; 2672 IOCB_t *piocb; 2673 struct fcp_cmnd *fcp_cmnd; 2674 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2675 struct lpfc_nodelist *ndlp = rdata->pnode; 2676 2677 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2678 ndlp->nlp_state != NLP_STE_MAPPED_NODE) 2679 return 0; 2680 2681 piocbq = &(lpfc_cmd->cur_iocbq); 2682 piocbq->vport = vport; 2683 2684 piocb = &piocbq->iocb; 2685 2686 fcp_cmnd = lpfc_cmd->fcp_cmnd; 2687 /* Clear out any old data in the FCP command area */ 2688 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2689 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2690 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2691 if (vport->phba->sli_rev == 3 && 2692 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2693 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2694 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2695 piocb->ulpContext = ndlp->nlp_rpi; 2696 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2697 piocb->ulpFCP2Rcvy = 1; 2698 } 2699 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 2700 2701 /* ulpTimeout is only one byte */ 2702 if (lpfc_cmd->timeout > 0xff) { 2703 /* 2704 * Do not timeout the command at the firmware level. 2705 * The driver will provide the timeout mechanism. 2706 */ 2707 piocb->ulpTimeout = 0; 2708 } else 2709 piocb->ulpTimeout = lpfc_cmd->timeout; 2710 2711 if (vport->phba->sli_rev == LPFC_SLI_REV4) 2712 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 2713 2714 return 1; 2715} 2716 2717/** 2718 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table 2719 * @phba: The hba struct for which this call is being executed. 2720 * @dev_grp: The HBA PCI-Device group number. 2721 * 2722 * This routine sets up the SCSI interface API function jump table in @phba 2723 * struct. 2724 * Returns: 0 - success, -ENODEV - failure. 2725 **/ 2726int 2727lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2728{ 2729 2730 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 2731 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; 2732 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2733 2734 switch (dev_grp) { 2735 case LPFC_PCI_DEV_LP: 2736 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2737 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2738 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2739 break; 2740 case LPFC_PCI_DEV_OC: 2741 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2742 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2743 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2744 break; 2745 default: 2746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2747 "1418 Invalid HBA PCI-device group: 0x%x\n", 2748 dev_grp); 2749 return -ENODEV; 2750 break; 2751 } 2752 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2753 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2754 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2755 return 0; 2756} 2757 2758/** 2759 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2760 * @phba: The Hba for which this call is being executed. 2761 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2762 * @rspiocbq: Pointer to lpfc_iocbq data structure. 2763 * 2764 * This routine is IOCB completion routine for device reset and target reset 2765 * routine. This routine release scsi buffer associated with lpfc_cmd. 2766 **/ 2767static void 2768lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2769 struct lpfc_iocbq *cmdiocbq, 2770 struct lpfc_iocbq *rspiocbq) 2771{ 2772 struct lpfc_scsi_buf *lpfc_cmd = 2773 (struct lpfc_scsi_buf *) cmdiocbq->context1; 2774 if (lpfc_cmd) 2775 lpfc_release_scsi_buf(phba, lpfc_cmd); 2776 return; 2777} 2778 2779/** 2780 * lpfc_info - Info entry point of scsi_host_template data structure 2781 * @host: The scsi host for which this call is being executed. 2782 * 2783 * This routine provides module information about hba. 2784 * 2785 * Reutrn code: 2786 * Pointer to char - Success. 2787 **/ 2788const char * 2789lpfc_info(struct Scsi_Host *host) 2790{ 2791 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 2792 struct lpfc_hba *phba = vport->phba; 2793 int len; 2794 static char lpfcinfobuf[384]; 2795 2796 memset(lpfcinfobuf,0,384); 2797 if (phba && phba->pcidev){ 2798 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 2799 len = strlen(lpfcinfobuf); 2800 snprintf(lpfcinfobuf + len, 2801 384-len, 2802 " on PCI bus %02x device %02x irq %d", 2803 phba->pcidev->bus->number, 2804 phba->pcidev->devfn, 2805 phba->pcidev->irq); 2806 len = strlen(lpfcinfobuf); 2807 if (phba->Port[0]) { 2808 snprintf(lpfcinfobuf + len, 2809 384-len, 2810 " port %s", 2811 phba->Port); 2812 } 2813 len = strlen(lpfcinfobuf); 2814 if (phba->sli4_hba.link_state.logical_speed) { 2815 snprintf(lpfcinfobuf + len, 2816 384-len, 2817 " Logical Link Speed: %d Mbps", 2818 phba->sli4_hba.link_state.logical_speed * 10); 2819 } 2820 } 2821 return lpfcinfobuf; 2822} 2823 2824/** 2825 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba 2826 * @phba: The Hba for which this call is being executed. 2827 * 2828 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 2829 * The default value of cfg_poll_tmo is 10 milliseconds. 2830 **/ 2831static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2832{ 2833 unsigned long poll_tmo_expires = 2834 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 2835 2836 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 2837 mod_timer(&phba->fcp_poll_timer, 2838 poll_tmo_expires); 2839} 2840 2841/** 2842 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 2843 * @phba: The Hba for which this call is being executed. 2844 * 2845 * This routine starts the fcp_poll_timer of @phba. 2846 **/ 2847void lpfc_poll_start_timer(struct lpfc_hba * phba) 2848{ 2849 lpfc_poll_rearm_timer(phba); 2850} 2851 2852/** 2853 * lpfc_poll_timeout - Restart polling timer 2854 * @ptr: Map to lpfc_hba data structure pointer. 2855 * 2856 * This routine restarts fcp_poll timer, when FCP ring polling is enable 2857 * and FCP Ring interrupt is disable. 2858 **/ 2859 2860void lpfc_poll_timeout(unsigned long ptr) 2861{ 2862 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2863 2864 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2865 lpfc_sli_handle_fast_ring_event(phba, 2866 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 2867 2868 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2869 lpfc_poll_rearm_timer(phba); 2870 } 2871} 2872 2873/** 2874 * lpfc_queuecommand - scsi_host_template queuecommand entry point 2875 * @cmnd: Pointer to scsi_cmnd data structure. 2876 * @done: Pointer to done routine. 2877 * 2878 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 2879 * This routine prepares an IOCB from scsi command and provides to firmware. 2880 * The @done callback is invoked after driver finished processing the command. 2881 * 2882 * Return value : 2883 * 0 - Success 2884 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 2885 **/ 2886static int 2887lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2888{ 2889 struct Scsi_Host *shost = cmnd->device->host; 2890 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2891 struct lpfc_hba *phba = vport->phba; 2892 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2893 struct lpfc_nodelist *ndlp; 2894 struct lpfc_scsi_buf *lpfc_cmd; 2895 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2896 int err; 2897 2898 err = fc_remote_port_chkready(rport); 2899 if (err) { 2900 cmnd->result = err; 2901 goto out_fail_command; 2902 } 2903 ndlp = rdata->pnode; 2904 2905 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2906 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2907 2908 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2909 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 2910 " op:%02x str=%s without registering for" 2911 " BlockGuard - Rejecting command\n", 2912 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2913 dif_op_str[scsi_get_prot_op(cmnd)]); 2914 goto out_fail_command; 2915 } 2916 2917 /* 2918 * Catch race where our node has transitioned, but the 2919 * transport is still transitioning. 2920 */ 2921 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 2922 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2923 goto out_fail_command; 2924 } 2925 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 2926 goto out_host_busy; 2927 2928 lpfc_cmd = lpfc_get_scsi_buf(phba); 2929 if (lpfc_cmd == NULL) { 2930 lpfc_rampdown_queue_depth(phba); 2931 2932 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2933 "0707 driver's buffer pool is empty, " 2934 "IO busied\n"); 2935 goto out_host_busy; 2936 } 2937 2938 /* 2939 * Store the midlayer's command structure for the completion phase 2940 * and complete the command initialization. 2941 */ 2942 lpfc_cmd->pCmd = cmnd; 2943 lpfc_cmd->rdata = rdata; 2944 lpfc_cmd->timeout = 0; 2945 lpfc_cmd->start_time = jiffies; 2946 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 2947 cmnd->scsi_done = done; 2948 2949 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2950 if (vport->phba->cfg_enable_bg) { 2951 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2952 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 2953 "str=%s\n", 2954 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2955 dif_op_str[scsi_get_prot_op(cmnd)]); 2956 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2957 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2958 "%02x %02x %02x %02x %02x\n", 2959 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2960 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2961 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2962 cmnd->cmnd[9]); 2963 if (cmnd->cmnd[0] == READ_10) 2964 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2965 "9035 BLKGRD: READ @ sector %llu, " 2966 "count %u\n", 2967 (unsigned long long)scsi_get_lba(cmnd), 2968 blk_rq_sectors(cmnd->request)); 2969 else if (cmnd->cmnd[0] == WRITE_10) 2970 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2971 "9036 BLKGRD: WRITE @ sector %llu, " 2972 "count %u cmd=%p\n", 2973 (unsigned long long)scsi_get_lba(cmnd), 2974 blk_rq_sectors(cmnd->request), 2975 cmnd); 2976 } 2977 2978 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2979 } else { 2980 if (vport->phba->cfg_enable_bg) { 2981 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2982 "9038 BLKGRD: rcvd unprotected cmd:" 2983 "%02x op:%02x str=%s\n", 2984 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2985 dif_op_str[scsi_get_prot_op(cmnd)]); 2986 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2987 "9039 BLKGRD: CDB: %02x %02x %02x " 2988 "%02x %02x %02x %02x %02x %02x %02x\n", 2989 cmnd->cmnd[0], cmnd->cmnd[1], 2990 cmnd->cmnd[2], cmnd->cmnd[3], 2991 cmnd->cmnd[4], cmnd->cmnd[5], 2992 cmnd->cmnd[6], cmnd->cmnd[7], 2993 cmnd->cmnd[8], cmnd->cmnd[9]); 2994 if (cmnd->cmnd[0] == READ_10) 2995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2996 "9040 dbg: READ @ sector %llu, " 2997 "count %u\n", 2998 (unsigned long long)scsi_get_lba(cmnd), 2999 blk_rq_sectors(cmnd->request)); 3000 else if (cmnd->cmnd[0] == WRITE_10) 3001 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3002 "9041 dbg: WRITE @ sector %llu, " 3003 "count %u cmd=%p\n", 3004 (unsigned long long)scsi_get_lba(cmnd), 3005 blk_rq_sectors(cmnd->request), cmnd); 3006 else 3007 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3008 "9042 dbg: parser not implemented\n"); 3009 } 3010 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3011 } 3012 3013 if (err) 3014 goto out_host_busy_free_buf; 3015 3016 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3017 3018 atomic_inc(&ndlp->cmd_pending); 3019 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 3020 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3021 if (err) { 3022 atomic_dec(&ndlp->cmd_pending); 3023 goto out_host_busy_free_buf; 3024 } 3025 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3026 spin_unlock(shost->host_lock); 3027 lpfc_sli_handle_fast_ring_event(phba, 3028 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3029 3030 spin_lock(shost->host_lock); 3031 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3032 lpfc_poll_rearm_timer(phba); 3033 } 3034 3035 return 0; 3036 3037 out_host_busy_free_buf: 3038 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 3039 lpfc_release_scsi_buf(phba, lpfc_cmd); 3040 out_host_busy: 3041 return SCSI_MLQUEUE_HOST_BUSY; 3042 3043 out_fail_command: 3044 done(cmnd); 3045 return 0; 3046} 3047 3048/** 3049 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 3050 * @cmnd: Pointer to scsi_cmnd data structure. 3051 * 3052 * This routine aborts @cmnd pending in base driver. 3053 * 3054 * Return code : 3055 * 0x2003 - Error 3056 * 0x2002 - Success 3057 **/ 3058static int 3059lpfc_abort_handler(struct scsi_cmnd *cmnd) 3060{ 3061 struct Scsi_Host *shost = cmnd->device->host; 3062 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3063 struct lpfc_hba *phba = vport->phba; 3064 struct lpfc_iocbq *iocb; 3065 struct lpfc_iocbq *abtsiocb; 3066 struct lpfc_scsi_buf *lpfc_cmd; 3067 IOCB_t *cmd, *icmd; 3068 int ret = SUCCESS; 3069 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3070 3071 ret = fc_block_scsi_eh(cmnd); 3072 if (ret) 3073 return ret; 3074 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3075 BUG_ON(!lpfc_cmd); 3076 3077 /* 3078 * If pCmd field of the corresponding lpfc_scsi_buf structure 3079 * points to a different SCSI command, then the driver has 3080 * already completed this command, but the midlayer did not 3081 * see the completion before the eh fired. Just return 3082 * SUCCESS. 3083 */ 3084 iocb = &lpfc_cmd->cur_iocbq; 3085 if (lpfc_cmd->pCmd != cmnd) 3086 goto out; 3087 3088 BUG_ON(iocb->context1 != lpfc_cmd); 3089 3090 abtsiocb = lpfc_sli_get_iocbq(phba); 3091 if (abtsiocb == NULL) { 3092 ret = FAILED; 3093 goto out; 3094 } 3095 3096 /* 3097 * The scsi command can not be in txq and it is in flight because the 3098 * pCmd is still pointig at the SCSI command we have to abort. There 3099 * is no need to search the txcmplq. Just send an abort to the FW. 3100 */ 3101 3102 cmd = &iocb->iocb; 3103 icmd = &abtsiocb->iocb; 3104 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3105 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3106 if (phba->sli_rev == LPFC_SLI_REV4) 3107 icmd->un.acxri.abortIoTag = iocb->sli4_xritag; 3108 else 3109 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3110 3111 icmd->ulpLe = 1; 3112 icmd->ulpClass = cmd->ulpClass; 3113 3114 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 3115 abtsiocb->fcp_wqidx = iocb->fcp_wqidx; 3116 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 3117 3118 if (lpfc_is_link_up(phba)) 3119 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3120 else 3121 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 3122 3123 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3124 abtsiocb->vport = vport; 3125 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 3126 IOCB_ERROR) { 3127 lpfc_sli_release_iocbq(phba, abtsiocb); 3128 ret = FAILED; 3129 goto out; 3130 } 3131 3132 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3133 lpfc_sli_handle_fast_ring_event(phba, 3134 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3135 3136 lpfc_cmd->waitq = &waitq; 3137 /* Wait for abort to complete */ 3138 wait_event_timeout(waitq, 3139 (lpfc_cmd->pCmd != cmnd), 3140 (2*vport->cfg_devloss_tmo*HZ)); 3141 3142 spin_lock_irq(shost->host_lock); 3143 lpfc_cmd->waitq = NULL; 3144 spin_unlock_irq(shost->host_lock); 3145 3146 if (lpfc_cmd->pCmd == cmnd) { 3147 ret = FAILED; 3148 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3149 "0748 abort handler timed out waiting " 3150 "for abort to complete: ret %#x, ID %d, " 3151 "LUN %d, snum %#lx\n", 3152 ret, cmnd->device->id, cmnd->device->lun, 3153 cmnd->serial_number); 3154 } 3155 3156 out: 3157 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3158 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 3159 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3160 cmnd->device->lun, cmnd->serial_number); 3161 return ret; 3162} 3163 3164static char * 3165lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 3166{ 3167 switch (task_mgmt_cmd) { 3168 case FCP_ABORT_TASK_SET: 3169 return "ABORT_TASK_SET"; 3170 case FCP_CLEAR_TASK_SET: 3171 return "FCP_CLEAR_TASK_SET"; 3172 case FCP_BUS_RESET: 3173 return "FCP_BUS_RESET"; 3174 case FCP_LUN_RESET: 3175 return "FCP_LUN_RESET"; 3176 case FCP_TARGET_RESET: 3177 return "FCP_TARGET_RESET"; 3178 case FCP_CLEAR_ACA: 3179 return "FCP_CLEAR_ACA"; 3180 case FCP_TERMINATE_TASK: 3181 return "FCP_TERMINATE_TASK"; 3182 default: 3183 return "unknown"; 3184 } 3185} 3186 3187/** 3188 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 3189 * @vport: The virtual port for which this call is being executed. 3190 * @rdata: Pointer to remote port local data 3191 * @tgt_id: Target ID of remote device. 3192 * @lun_id: Lun number for the TMF 3193 * @task_mgmt_cmd: type of TMF to send 3194 * 3195 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 3196 * a remote port. 3197 * 3198 * Return Code: 3199 * 0x2003 - Error 3200 * 0x2002 - Success. 3201 **/ 3202static int 3203lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, 3204 unsigned tgt_id, unsigned int lun_id, 3205 uint8_t task_mgmt_cmd) 3206{ 3207 struct lpfc_hba *phba = vport->phba; 3208 struct lpfc_scsi_buf *lpfc_cmd; 3209 struct lpfc_iocbq *iocbq; 3210 struct lpfc_iocbq *iocbqrsp; 3211 int ret; 3212 int status; 3213 3214 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 3215 return FAILED; 3216 3217 lpfc_cmd = lpfc_get_scsi_buf(phba); 3218 if (lpfc_cmd == NULL) 3219 return FAILED; 3220 lpfc_cmd->timeout = 60; 3221 lpfc_cmd->rdata = rdata; 3222 3223 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 3224 task_mgmt_cmd); 3225 if (!status) { 3226 lpfc_release_scsi_buf(phba, lpfc_cmd); 3227 return FAILED; 3228 } 3229 3230 iocbq = &lpfc_cmd->cur_iocbq; 3231 iocbqrsp = lpfc_sli_get_iocbq(phba); 3232 if (iocbqrsp == NULL) { 3233 lpfc_release_scsi_buf(phba, lpfc_cmd); 3234 return FAILED; 3235 } 3236 3237 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3238 "0702 Issue %s to TGT %d LUN %d " 3239 "rpi x%x nlp_flag x%x\n", 3240 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3241 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 3242 3243 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3244 iocbq, iocbqrsp, lpfc_cmd->timeout); 3245 if (status != IOCB_SUCCESS) { 3246 if (status == IOCB_TIMEDOUT) { 3247 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3248 ret = TIMEOUT_ERROR; 3249 } else 3250 ret = FAILED; 3251 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3252 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3253 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3254 lpfc_taskmgmt_name(task_mgmt_cmd), 3255 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3256 iocbqrsp->iocb.un.ulpWord[4]); 3257 } else if (status == IOCB_BUSY) 3258 ret = FAILED; 3259 else 3260 ret = SUCCESS; 3261 3262 lpfc_sli_release_iocbq(phba, iocbqrsp); 3263 3264 if (ret != TIMEOUT_ERROR) 3265 lpfc_release_scsi_buf(phba, lpfc_cmd); 3266 3267 return ret; 3268} 3269 3270/** 3271 * lpfc_chk_tgt_mapped - 3272 * @vport: The virtual port to check on 3273 * @cmnd: Pointer to scsi_cmnd data structure. 3274 * 3275 * This routine delays until the scsi target (aka rport) for the 3276 * command exists (is present and logged in) or we declare it non-existent. 3277 * 3278 * Return code : 3279 * 0x2003 - Error 3280 * 0x2002 - Success 3281 **/ 3282static int 3283lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3284{ 3285 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3286 struct lpfc_nodelist *pnode; 3287 unsigned long later; 3288 3289 if (!rdata) { 3290 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3291 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 3292 return FAILED; 3293 } 3294 pnode = rdata->pnode; 3295 /* 3296 * If target is not in a MAPPED state, delay until 3297 * target is rediscovered or devloss timeout expires. 3298 */ 3299 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3300 while (time_after(later, jiffies)) { 3301 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3302 return FAILED; 3303 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 3304 return SUCCESS; 3305 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 3306 rdata = cmnd->device->hostdata; 3307 if (!rdata) 3308 return FAILED; 3309 pnode = rdata->pnode; 3310 } 3311 if (!pnode || !NLP_CHK_NODE_ACT(pnode) || 3312 (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 3313 return FAILED; 3314 return SUCCESS; 3315} 3316 3317/** 3318 * lpfc_reset_flush_io_context - 3319 * @vport: The virtual port (scsi_host) for the flush context 3320 * @tgt_id: If aborting by Target contect - specifies the target id 3321 * @lun_id: If aborting by Lun context - specifies the lun id 3322 * @context: specifies the context level to flush at. 3323 * 3324 * After a reset condition via TMF, we need to flush orphaned i/o 3325 * contexts from the adapter. This routine aborts any contexts 3326 * outstanding, then waits for their completions. The wait is 3327 * bounded by devloss_tmo though. 3328 * 3329 * Return code : 3330 * 0x2003 - Error 3331 * 0x2002 - Success 3332 **/ 3333static int 3334lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 3335 uint64_t lun_id, lpfc_ctx_cmd context) 3336{ 3337 struct lpfc_hba *phba = vport->phba; 3338 unsigned long later; 3339 int cnt; 3340 3341 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3342 if (cnt) 3343 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3344 tgt_id, lun_id, context); 3345 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3346 while (time_after(later, jiffies) && cnt) { 3347 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3348 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3349 } 3350 if (cnt) { 3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3352 "0724 I/O flush failure for context %s : cnt x%x\n", 3353 ((context == LPFC_CTX_LUN) ? "LUN" : 3354 ((context == LPFC_CTX_TGT) ? "TGT" : 3355 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 3356 cnt); 3357 return FAILED; 3358 } 3359 return SUCCESS; 3360} 3361 3362/** 3363 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3364 * @cmnd: Pointer to scsi_cmnd data structure. 3365 * 3366 * This routine does a device reset by sending a LUN_RESET task management 3367 * command. 3368 * 3369 * Return code : 3370 * 0x2003 - Error 3371 * 0x2002 - Success 3372 **/ 3373static int 3374lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3375{ 3376 struct Scsi_Host *shost = cmnd->device->host; 3377 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3378 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3379 struct lpfc_nodelist *pnode; 3380 unsigned tgt_id = cmnd->device->id; 3381 unsigned int lun_id = cmnd->device->lun; 3382 struct lpfc_scsi_event_header scsi_event; 3383 int status; 3384 3385 if (!rdata) { 3386 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3387 "0798 Device Reset rport failure: rdata x%p\n", rdata); 3388 return FAILED; 3389 } 3390 pnode = rdata->pnode; 3391 status = fc_block_scsi_eh(cmnd); 3392 if (status) 3393 return status; 3394 3395 status = lpfc_chk_tgt_mapped(vport, cmnd); 3396 if (status == FAILED) { 3397 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3398 "0721 Device Reset rport failure: rdata x%p\n", rdata); 3399 return FAILED; 3400 } 3401 3402 scsi_event.event_type = FC_REG_SCSI_EVENT; 3403 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 3404 scsi_event.lun = lun_id; 3405 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3406 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3407 3408 fc_host_post_vendor_event(shost, fc_get_event_number(), 3409 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3410 3411 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3412 FCP_LUN_RESET); 3413 3414 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3415 "0713 SCSI layer issued Device Reset (%d, %d) " 3416 "return x%x\n", tgt_id, lun_id, status); 3417 3418 /* 3419 * We have to clean up i/o as : they may be orphaned by the TMF; 3420 * or if the TMF failed, they may be in an indeterminate state. 3421 * So, continue on. 3422 * We will report success if all the i/o aborts successfully. 3423 */ 3424 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3425 LPFC_CTX_LUN); 3426 return status; 3427} 3428 3429/** 3430 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 3431 * @cmnd: Pointer to scsi_cmnd data structure. 3432 * 3433 * This routine does a target reset by sending a TARGET_RESET task management 3434 * command. 3435 * 3436 * Return code : 3437 * 0x2003 - Error 3438 * 0x2002 - Success 3439 **/ 3440static int 3441lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 3442{ 3443 struct Scsi_Host *shost = cmnd->device->host; 3444 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3445 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3446 struct lpfc_nodelist *pnode; 3447 unsigned tgt_id = cmnd->device->id; 3448 unsigned int lun_id = cmnd->device->lun; 3449 struct lpfc_scsi_event_header scsi_event; 3450 int status; 3451 3452 if (!rdata) { 3453 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3454 "0799 Target Reset rport failure: rdata x%p\n", rdata); 3455 return FAILED; 3456 } 3457 pnode = rdata->pnode; 3458 status = fc_block_scsi_eh(cmnd); 3459 if (status) 3460 return status; 3461 3462 status = lpfc_chk_tgt_mapped(vport, cmnd); 3463 if (status == FAILED) { 3464 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3465 "0722 Target Reset rport failure: rdata x%p\n", rdata); 3466 return FAILED; 3467 } 3468 3469 scsi_event.event_type = FC_REG_SCSI_EVENT; 3470 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 3471 scsi_event.lun = 0; 3472 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3473 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3474 3475 fc_host_post_vendor_event(shost, fc_get_event_number(), 3476 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3477 3478 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3479 FCP_TARGET_RESET); 3480 3481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3482 "0723 SCSI layer issued Target Reset (%d, %d) " 3483 "return x%x\n", tgt_id, lun_id, status); 3484 3485 /* 3486 * We have to clean up i/o as : they may be orphaned by the TMF; 3487 * or if the TMF failed, they may be in an indeterminate state. 3488 * So, continue on. 3489 * We will report success if all the i/o aborts successfully. 3490 */ 3491 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3492 LPFC_CTX_TGT); 3493 return status; 3494} 3495 3496/** 3497 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3498 * @cmnd: Pointer to scsi_cmnd data structure. 3499 * 3500 * This routine does target reset to all targets on @cmnd->device->host. 3501 * This emulates Parallel SCSI Bus Reset Semantics. 3502 * 3503 * Return code : 3504 * 0x2003 - Error 3505 * 0x2002 - Success 3506 **/ 3507static int 3508lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3509{ 3510 struct Scsi_Host *shost = cmnd->device->host; 3511 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3512 struct lpfc_nodelist *ndlp = NULL; 3513 struct lpfc_scsi_event_header scsi_event; 3514 int match; 3515 int ret = SUCCESS, status, i; 3516 3517 scsi_event.event_type = FC_REG_SCSI_EVENT; 3518 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3519 scsi_event.lun = 0; 3520 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3521 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3522 3523 fc_host_post_vendor_event(shost, fc_get_event_number(), 3524 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3525 3526 ret = fc_block_scsi_eh(cmnd); 3527 if (ret) 3528 return ret; 3529 3530 /* 3531 * Since the driver manages a single bus device, reset all 3532 * targets known to the driver. Should any target reset 3533 * fail, this routine returns failure to the midlayer. 3534 */ 3535 for (i = 0; i < LPFC_MAX_TARGET; i++) { 3536 /* Search for mapped node by target ID */ 3537 match = 0; 3538 spin_lock_irq(shost->host_lock); 3539 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3540 if (!NLP_CHK_NODE_ACT(ndlp)) 3541 continue; 3542 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 3543 ndlp->nlp_sid == i && 3544 ndlp->rport) { 3545 match = 1; 3546 break; 3547 } 3548 } 3549 spin_unlock_irq(shost->host_lock); 3550 if (!match) 3551 continue; 3552 3553 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, 3554 i, 0, FCP_TARGET_RESET); 3555 3556 if (status != SUCCESS) { 3557 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3558 "0700 Bus Reset on target %d failed\n", 3559 i); 3560 ret = FAILED; 3561 } 3562 } 3563 /* 3564 * We have to clean up i/o as : they may be orphaned by the TMFs 3565 * above; or if any of the TMFs failed, they may be in an 3566 * indeterminate state. 3567 * We will report success if all the i/o aborts successfully. 3568 */ 3569 3570 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 3571 if (status != SUCCESS) 3572 ret = FAILED; 3573 3574 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3575 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3576 return ret; 3577} 3578 3579/** 3580 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 3581 * @sdev: Pointer to scsi_device. 3582 * 3583 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 3584 * globally available list of scsi buffers. This routine also makes sure scsi 3585 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 3586 * of scsi buffer exists for the lifetime of the driver. 3587 * 3588 * Return codes: 3589 * non-0 - Error 3590 * 0 - Success 3591 **/ 3592static int 3593lpfc_slave_alloc(struct scsi_device *sdev) 3594{ 3595 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3596 struct lpfc_hba *phba = vport->phba; 3597 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3598 uint32_t total = 0; 3599 uint32_t num_to_alloc = 0; 3600 int num_allocated = 0; 3601 uint32_t sdev_cnt; 3602 3603 if (!rport || fc_remote_port_chkready(rport)) 3604 return -ENXIO; 3605 3606 sdev->hostdata = rport->dd_data; 3607 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 3608 3609 /* 3610 * Populate the cmds_per_lun count scsi_bufs into this host's globally 3611 * available list of scsi buffers. Don't allocate more than the 3612 * HBA limit conveyed to the midlayer via the host structure. The 3613 * formula accounts for the lun_queue_depth + error handlers + 1 3614 * extra. This list of scsi bufs exists for the lifetime of the driver. 3615 */ 3616 total = phba->total_scsi_bufs; 3617 num_to_alloc = vport->cfg_lun_queue_depth + 2; 3618 3619 /* If allocated buffers are enough do nothing */ 3620 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 3621 return 0; 3622 3623 /* Allow some exchanges to be available always to complete discovery */ 3624 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3625 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3626 "0704 At limitation of %d preallocated " 3627 "command buffers\n", total); 3628 return 0; 3629 /* Allow some exchanges to be available always to complete discovery */ 3630 } else if (total + num_to_alloc > 3631 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3632 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3633 "0705 Allocation request of %d " 3634 "command buffers will exceed max of %d. " 3635 "Reducing allocation request to %d.\n", 3636 num_to_alloc, phba->cfg_hba_queue_depth, 3637 (phba->cfg_hba_queue_depth - total)); 3638 num_to_alloc = phba->cfg_hba_queue_depth - total; 3639 } 3640 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 3641 if (num_to_alloc != num_allocated) { 3642 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3643 "0708 Allocation request of %d " 3644 "command buffers did not succeed. " 3645 "Allocated %d buffers.\n", 3646 num_to_alloc, num_allocated); 3647 } 3648 if (num_allocated > 0) 3649 phba->total_scsi_bufs += num_allocated; 3650 return 0; 3651} 3652 3653/** 3654 * lpfc_slave_configure - scsi_host_template slave_configure entry point 3655 * @sdev: Pointer to scsi_device. 3656 * 3657 * This routine configures following items 3658 * - Tag command queuing support for @sdev if supported. 3659 * - Dev loss time out value of fc_rport. 3660 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 3661 * 3662 * Return codes: 3663 * 0 - Success 3664 **/ 3665static int 3666lpfc_slave_configure(struct scsi_device *sdev) 3667{ 3668 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3669 struct lpfc_hba *phba = vport->phba; 3670 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 3671 3672 if (sdev->tagged_supported) 3673 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 3674 else 3675 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 3676 3677 /* 3678 * Initialize the fc transport attributes for the target 3679 * containing this scsi device. Also note that the driver's 3680 * target pointer is stored in the starget_data for the 3681 * driver's sysfs entry point functions. 3682 */ 3683 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3684 3685 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3686 lpfc_sli_handle_fast_ring_event(phba, 3687 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3688 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3689 lpfc_poll_rearm_timer(phba); 3690 } 3691 3692 return 0; 3693} 3694 3695/** 3696 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 3697 * @sdev: Pointer to scsi_device. 3698 * 3699 * This routine sets @sdev hostatdata filed to null. 3700 **/ 3701static void 3702lpfc_slave_destroy(struct scsi_device *sdev) 3703{ 3704 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3705 struct lpfc_hba *phba = vport->phba; 3706 atomic_dec(&phba->sdev_cnt); 3707 sdev->hostdata = NULL; 3708 return; 3709} 3710 3711 3712struct scsi_host_template lpfc_template = { 3713 .module = THIS_MODULE, 3714 .name = LPFC_DRIVER_NAME, 3715 .info = lpfc_info, 3716 .queuecommand = lpfc_queuecommand, 3717 .eh_abort_handler = lpfc_abort_handler, 3718 .eh_device_reset_handler = lpfc_device_reset_handler, 3719 .eh_target_reset_handler = lpfc_target_reset_handler, 3720 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3721 .slave_alloc = lpfc_slave_alloc, 3722 .slave_configure = lpfc_slave_configure, 3723 .slave_destroy = lpfc_slave_destroy, 3724 .scan_finished = lpfc_scan_finished, 3725 .this_id = -1, 3726 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3727 .cmd_per_lun = LPFC_CMD_PER_LUN, 3728 .use_clustering = ENABLE_CLUSTERING, 3729 .shost_attrs = lpfc_hba_attrs, 3730 .max_sectors = 0xFFFF, 3731 .vendor_id = LPFC_NL_VENDOR_ID, 3732 .change_queue_depth = lpfc_change_queue_depth, 3733}; 3734 3735struct scsi_host_template lpfc_vport_template = { 3736 .module = THIS_MODULE, 3737 .name = LPFC_DRIVER_NAME, 3738 .info = lpfc_info, 3739 .queuecommand = lpfc_queuecommand, 3740 .eh_abort_handler = lpfc_abort_handler, 3741 .eh_device_reset_handler = lpfc_device_reset_handler, 3742 .eh_target_reset_handler = lpfc_target_reset_handler, 3743 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3744 .slave_alloc = lpfc_slave_alloc, 3745 .slave_configure = lpfc_slave_configure, 3746 .slave_destroy = lpfc_slave_destroy, 3747 .scan_finished = lpfc_scan_finished, 3748 .this_id = -1, 3749 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3750 .cmd_per_lun = LPFC_CMD_PER_LUN, 3751 .use_clustering = ENABLE_CLUSTERING, 3752 .shost_attrs = lpfc_vport_attrs, 3753 .max_sectors = 0xFFFF, 3754 .change_queue_depth = lpfc_change_queue_depth, 3755}; 3756