1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * ���Broadcom��� refers to Broadcom Inc. and/or its subsidiaries.     *
6 * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
7 * EMULEX and SLI are trademarks of Emulex.                        *
8 * www.broadcom.com                                                *
9 *                                                                 *
10 * This program is free software; you can redistribute it and/or   *
11 * modify it under the terms of version 2 of the GNU General       *
12 * Public License as published by the Free Software Foundation.    *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19 * more details, a copy of which can be found in the file COPYING  *
20 * included with this package.                                     *
21 *******************************************************************/
22
23#include <linux/interrupt.h>
24#include <linux/mempool.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/list.h>
29#include <linux/bsg-lib.h>
30#include <linux/vmalloc.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35#include <scsi/scsi_bsg_fc.h>
36#include <scsi/fc/fc_fs.h>
37
38#include "lpfc_hw4.h"
39#include "lpfc_hw.h"
40#include "lpfc_sli.h"
41#include "lpfc_sli4.h"
42#include "lpfc_nl.h"
43#include "lpfc_bsg.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_debugfs.h"
50#include "lpfc_vport.h"
51#include "lpfc_version.h"
52
53struct lpfc_bsg_event {
54	struct list_head node;
55	struct kref kref;
56	wait_queue_head_t wq;
57
58	/* Event type and waiter identifiers */
59	uint32_t type_mask;
60	uint32_t req_id;
61	uint32_t reg_id;
62
63	/* next two flags are here for the auto-delete logic */
64	unsigned long wait_time_stamp;
65	int waiting;
66
67	/* seen and not seen events */
68	struct list_head events_to_get;
69	struct list_head events_to_see;
70
71	/* driver data associated with the job */
72	void *dd_data;
73};
74
75struct lpfc_bsg_iocb {
76	struct lpfc_iocbq *cmdiocbq;
77	struct lpfc_dmabuf *rmp;
78	struct lpfc_nodelist *ndlp;
79};
80
81struct lpfc_bsg_mbox {
82	LPFC_MBOXQ_t *pmboxq;
83	MAILBOX_t *mb;
84	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85	uint8_t *ext; /* extended mailbox data */
86	uint32_t mbOffset; /* from app */
87	uint32_t inExtWLen; /* from app */
88	uint32_t outExtWLen; /* from app */
89};
90
91#define TYPE_EVT 	1
92#define TYPE_IOCB	2
93#define TYPE_MBOX	3
94struct bsg_job_data {
95	uint32_t type;
96	struct bsg_job *set_job; /* job waiting for this iocb to finish */
97	union {
98		struct lpfc_bsg_event *evt;
99		struct lpfc_bsg_iocb iocb;
100		struct lpfc_bsg_mbox mbox;
101	} context_un;
102};
103
104struct event_data {
105	struct list_head node;
106	uint32_t type;
107	uint32_t immed_dat;
108	void *data;
109	uint32_t len;
110};
111
112#define BUF_SZ_4K 4096
113#define SLI_CT_ELX_LOOPBACK 0x10
114
115enum ELX_LOOPBACK_CMD {
116	ELX_LOOPBACK_XRI_SETUP,
117	ELX_LOOPBACK_DATA,
118};
119
120#define ELX_LOOPBACK_HEADER_SZ \
121	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
122
123struct lpfc_dmabufext {
124	struct lpfc_dmabuf dma;
125	uint32_t size;
126	uint32_t flag;
127};
128
129static void
130lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
131{
132	struct lpfc_dmabuf *mlast, *next_mlast;
133
134	if (mlist) {
135		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
136					 list) {
137			list_del(&mlast->list);
138			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
139			kfree(mlast);
140		}
141		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
142		kfree(mlist);
143	}
144	return;
145}
146
147static struct lpfc_dmabuf *
148lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
149		       int outbound_buffers, struct ulp_bde64 *bpl,
150		       int *bpl_entries)
151{
152	struct lpfc_dmabuf *mlist = NULL;
153	struct lpfc_dmabuf *mp;
154	unsigned int bytes_left = size;
155
156	/* Verify we can support the size specified */
157	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
158		return NULL;
159
160	/* Determine the number of dma buffers to allocate */
161	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
162			size/LPFC_BPL_SIZE);
163
164	/* Allocate dma buffer and place in BPL passed */
165	while (bytes_left) {
166		/* Allocate dma buffer  */
167		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
168		if (!mp) {
169			if (mlist)
170				lpfc_free_bsg_buffers(phba, mlist);
171			return NULL;
172		}
173
174		INIT_LIST_HEAD(&mp->list);
175		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
176
177		if (!mp->virt) {
178			kfree(mp);
179			if (mlist)
180				lpfc_free_bsg_buffers(phba, mlist);
181			return NULL;
182		}
183
184		/* Queue it to a linked list */
185		if (!mlist)
186			mlist = mp;
187		else
188			list_add_tail(&mp->list, &mlist->list);
189
190		/* Add buffer to buffer pointer list */
191		if (outbound_buffers)
192			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
193		else
194			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
195		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
196		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
197		bpl->tus.f.bdeSize = (uint16_t)
198			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
199			 bytes_left);
200		bytes_left -= bpl->tus.f.bdeSize;
201		bpl->tus.w = le32_to_cpu(bpl->tus.w);
202		bpl++;
203	}
204	return mlist;
205}
206
207static unsigned int
208lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
209		   struct bsg_buffer *bsg_buffers,
210		   unsigned int bytes_to_transfer, int to_buffers)
211{
212
213	struct lpfc_dmabuf *mp;
214	unsigned int transfer_bytes, bytes_copied = 0;
215	unsigned int sg_offset, dma_offset;
216	unsigned char *dma_address, *sg_address;
217	LIST_HEAD(temp_list);
218	struct sg_mapping_iter miter;
219	unsigned long flags;
220	unsigned int sg_flags = SG_MITER_ATOMIC;
221	bool sg_valid;
222
223	list_splice_init(&dma_buffers->list, &temp_list);
224	list_add(&dma_buffers->list, &temp_list);
225	sg_offset = 0;
226	if (to_buffers)
227		sg_flags |= SG_MITER_FROM_SG;
228	else
229		sg_flags |= SG_MITER_TO_SG;
230	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
231		       sg_flags);
232	local_irq_save(flags);
233	sg_valid = sg_miter_next(&miter);
234	list_for_each_entry(mp, &temp_list, list) {
235		dma_offset = 0;
236		while (bytes_to_transfer && sg_valid &&
237		       (dma_offset < LPFC_BPL_SIZE)) {
238			dma_address = mp->virt + dma_offset;
239			if (sg_offset) {
240				/* Continue previous partial transfer of sg */
241				sg_address = miter.addr + sg_offset;
242				transfer_bytes = miter.length - sg_offset;
243			} else {
244				sg_address = miter.addr;
245				transfer_bytes = miter.length;
246			}
247			if (bytes_to_transfer < transfer_bytes)
248				transfer_bytes = bytes_to_transfer;
249			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
250				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
251			if (to_buffers)
252				memcpy(dma_address, sg_address, transfer_bytes);
253			else
254				memcpy(sg_address, dma_address, transfer_bytes);
255			dma_offset += transfer_bytes;
256			sg_offset += transfer_bytes;
257			bytes_to_transfer -= transfer_bytes;
258			bytes_copied += transfer_bytes;
259			if (sg_offset >= miter.length) {
260				sg_offset = 0;
261				sg_valid = sg_miter_next(&miter);
262			}
263		}
264	}
265	sg_miter_stop(&miter);
266	local_irq_restore(flags);
267	list_del_init(&dma_buffers->list);
268	list_splice(&temp_list, &dma_buffers->list);
269	return bytes_copied;
270}
271
272/**
273 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
274 * @phba: Pointer to HBA context object.
275 * @cmdiocbq: Pointer to command iocb.
276 * @rspiocbq: Pointer to response iocb.
277 *
278 * This function is the completion handler for iocbs issued using
279 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
280 * ring event handler function without any lock held. This function
281 * can be called from both worker thread context and interrupt
282 * context. This function also can be called from another thread which
283 * cleans up the SLI layer objects.
284 * This function copies the contents of the response iocb to the
285 * response iocb memory object provided by the caller of
286 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
287 * sleeps for the iocb completion.
288 **/
289static void
290lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
291			struct lpfc_iocbq *cmdiocbq,
292			struct lpfc_iocbq *rspiocbq)
293{
294	struct bsg_job_data *dd_data;
295	struct bsg_job *job;
296	struct fc_bsg_reply *bsg_reply;
297	struct lpfc_dmabuf *bmp, *cmp, *rmp;
298	struct lpfc_nodelist *ndlp;
299	struct lpfc_bsg_iocb *iocb;
300	unsigned long flags;
301	int rc = 0;
302	u32 ulp_status, ulp_word4, total_data_placed;
303
304	dd_data = cmdiocbq->context_un.dd_data;
305
306	/* Determine if job has been aborted */
307	spin_lock_irqsave(&phba->ct_ev_lock, flags);
308	job = dd_data->set_job;
309	if (job) {
310		bsg_reply = job->reply;
311		/* Prevent timeout handling from trying to abort job */
312		job->dd_data = NULL;
313	}
314	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
315
316	/* Close the timeout handler abort window */
317	spin_lock_irqsave(&phba->hbalock, flags);
318	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
319	spin_unlock_irqrestore(&phba->hbalock, flags);
320
321	iocb = &dd_data->context_un.iocb;
322	ndlp = iocb->cmdiocbq->ndlp;
323	rmp = iocb->rmp;
324	cmp = cmdiocbq->cmd_dmabuf;
325	bmp = cmdiocbq->bpl_dmabuf;
326	ulp_status = get_job_ulpstatus(phba, rspiocbq);
327	ulp_word4 = get_job_word4(phba, rspiocbq);
328	total_data_placed = get_job_data_placed(phba, rspiocbq);
329
330	/* Copy the completed data or set the error status */
331
332	if (job) {
333		if (ulp_status) {
334			if (ulp_status == IOSTAT_LOCAL_REJECT) {
335				switch (ulp_word4 & IOERR_PARAM_MASK) {
336				case IOERR_SEQUENCE_TIMEOUT:
337					rc = -ETIMEDOUT;
338					break;
339				case IOERR_INVALID_RPI:
340					rc = -EFAULT;
341					break;
342				default:
343					rc = -EACCES;
344					break;
345				}
346			} else {
347				rc = -EACCES;
348			}
349		} else {
350			bsg_reply->reply_payload_rcv_len =
351				lpfc_bsg_copy_data(rmp, &job->reply_payload,
352						   total_data_placed, 0);
353		}
354	}
355
356	lpfc_free_bsg_buffers(phba, cmp);
357	lpfc_free_bsg_buffers(phba, rmp);
358	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
359	kfree(bmp);
360	lpfc_nlp_put(ndlp);
361	lpfc_sli_release_iocbq(phba, cmdiocbq);
362	kfree(dd_data);
363
364	/* Complete the job if the job is still active */
365
366	if (job) {
367		bsg_reply->result = rc;
368		bsg_job_done(job, bsg_reply->result,
369			       bsg_reply->reply_payload_rcv_len);
370	}
371	return;
372}
373
374/**
375 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
376 * @job: fc_bsg_job to handle
377 **/
378static int
379lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
380{
381	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
382	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
383	struct lpfc_hba *phba = vport->phba;
384	struct lpfc_nodelist *ndlp = rdata->pnode;
385	struct fc_bsg_reply *bsg_reply = job->reply;
386	struct ulp_bde64 *bpl = NULL;
387	struct lpfc_iocbq *cmdiocbq = NULL;
388	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
389	int request_nseg, reply_nseg;
390	u32 num_entry;
391	struct bsg_job_data *dd_data;
392	unsigned long flags;
393	uint32_t creg_val;
394	int rc = 0;
395	int iocb_stat;
396	u16 ulp_context;
397
398	/* in case no data is transferred */
399	bsg_reply->reply_payload_rcv_len = 0;
400
401	if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
402		return -ENODEV;
403
404	/* allocate our bsg tracking structure */
405	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
406	if (!dd_data) {
407		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
408				"2733 Failed allocation of dd_data\n");
409		rc = -ENOMEM;
410		goto no_dd_data;
411	}
412
413	cmdiocbq = lpfc_sli_get_iocbq(phba);
414	if (!cmdiocbq) {
415		rc = -ENOMEM;
416		goto free_dd;
417	}
418
419	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
420	if (!bmp) {
421		rc = -ENOMEM;
422		goto free_cmdiocbq;
423	}
424	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
425	if (!bmp->virt) {
426		rc = -ENOMEM;
427		goto free_bmp;
428	}
429
430	INIT_LIST_HEAD(&bmp->list);
431
432	bpl = (struct ulp_bde64 *) bmp->virt;
433	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
434	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
435				     1, bpl, &request_nseg);
436	if (!cmp) {
437		rc = -ENOMEM;
438		goto free_bmp;
439	}
440	lpfc_bsg_copy_data(cmp, &job->request_payload,
441			   job->request_payload.payload_len, 1);
442
443	bpl += request_nseg;
444	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
445	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
446				     bpl, &reply_nseg);
447	if (!rmp) {
448		rc = -ENOMEM;
449		goto free_cmp;
450	}
451
452	num_entry = request_nseg + reply_nseg;
453
454	if (phba->sli_rev == LPFC_SLI_REV4)
455		ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
456	else
457		ulp_context = ndlp->nlp_rpi;
458
459	lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
460			      phba->fc_ratov * 2);
461
462	cmdiocbq->num_bdes = num_entry;
463	cmdiocbq->vport = phba->pport;
464	cmdiocbq->cmd_dmabuf = cmp;
465	cmdiocbq->bpl_dmabuf = bmp;
466	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
467
468	cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
469	cmdiocbq->context_un.dd_data = dd_data;
470
471	dd_data->type = TYPE_IOCB;
472	dd_data->set_job = job;
473	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
474	dd_data->context_un.iocb.rmp = rmp;
475	job->dd_data = dd_data;
476
477	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
478		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
479			rc = -EIO ;
480			goto free_rmp;
481		}
482		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
483		writel(creg_val, phba->HCregaddr);
484		readl(phba->HCregaddr); /* flush */
485	}
486
487	cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
488	if (!cmdiocbq->ndlp) {
489		rc = -ENODEV;
490		goto free_rmp;
491	}
492
493	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
494	if (iocb_stat == IOCB_SUCCESS) {
495		spin_lock_irqsave(&phba->hbalock, flags);
496		/* make sure the I/O had not been completed yet */
497		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
498			/* open up abort window to timeout handler */
499			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
500		}
501		spin_unlock_irqrestore(&phba->hbalock, flags);
502		return 0; /* done for now */
503	} else if (iocb_stat == IOCB_BUSY) {
504		rc = -EAGAIN;
505	} else {
506		rc = -EIO;
507	}
508
509	/* iocb failed so cleanup */
510	lpfc_nlp_put(ndlp);
511
512free_rmp:
513	lpfc_free_bsg_buffers(phba, rmp);
514free_cmp:
515	lpfc_free_bsg_buffers(phba, cmp);
516free_bmp:
517	if (bmp->virt)
518		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
519	kfree(bmp);
520free_cmdiocbq:
521	lpfc_sli_release_iocbq(phba, cmdiocbq);
522free_dd:
523	kfree(dd_data);
524no_dd_data:
525	/* make error code available to userspace */
526	bsg_reply->result = rc;
527	job->dd_data = NULL;
528	return rc;
529}
530
531/**
532 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
533 * @phba: Pointer to HBA context object.
534 * @cmdiocbq: Pointer to command iocb.
535 * @rspiocbq: Pointer to response iocb.
536 *
537 * This function is the completion handler for iocbs issued using
538 * lpfc_bsg_rport_els_cmp function. This function is called by the
539 * ring event handler function without any lock held. This function
540 * can be called from both worker thread context and interrupt
541 * context. This function also can be called from other thread which
542 * cleans up the SLI layer objects.
543 * This function copies the contents of the response iocb to the
544 * response iocb memory object provided by the caller of
545 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
546 * sleeps for the iocb completion.
547 **/
548static void
549lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
550			struct lpfc_iocbq *cmdiocbq,
551			struct lpfc_iocbq *rspiocbq)
552{
553	struct bsg_job_data *dd_data;
554	struct bsg_job *job;
555	struct fc_bsg_reply *bsg_reply;
556	struct lpfc_nodelist *ndlp;
557	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
558	struct fc_bsg_ctels_reply *els_reply;
559	uint8_t *rjt_data;
560	unsigned long flags;
561	unsigned int rsp_size;
562	int rc = 0;
563	u32 ulp_status, ulp_word4, total_data_placed;
564
565	dd_data = cmdiocbq->context_un.dd_data;
566	ndlp = dd_data->context_un.iocb.ndlp;
567	cmdiocbq->ndlp = ndlp;
568
569	/* Determine if job has been aborted */
570	spin_lock_irqsave(&phba->ct_ev_lock, flags);
571	job = dd_data->set_job;
572	if (job) {
573		bsg_reply = job->reply;
574		/* Prevent timeout handling from trying to abort job  */
575		job->dd_data = NULL;
576	}
577	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
578
579	/* Close the timeout handler abort window */
580	spin_lock_irqsave(&phba->hbalock, flags);
581	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
582	spin_unlock_irqrestore(&phba->hbalock, flags);
583
584	ulp_status = get_job_ulpstatus(phba, rspiocbq);
585	ulp_word4 = get_job_word4(phba, rspiocbq);
586	total_data_placed = get_job_data_placed(phba, rspiocbq);
587	pcmd = cmdiocbq->cmd_dmabuf;
588	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
589
590	/* Copy the completed job data or determine the job status if job is
591	 * still active
592	 */
593
594	if (job) {
595		if (ulp_status == IOSTAT_SUCCESS) {
596			rsp_size = total_data_placed;
597			bsg_reply->reply_payload_rcv_len =
598				sg_copy_from_buffer(job->reply_payload.sg_list,
599						    job->reply_payload.sg_cnt,
600						    prsp->virt,
601						    rsp_size);
602		} else if (ulp_status == IOSTAT_LS_RJT) {
603			bsg_reply->reply_payload_rcv_len =
604				sizeof(struct fc_bsg_ctels_reply);
605			/* LS_RJT data returned in word 4 */
606			rjt_data = (uint8_t *)&ulp_word4;
607			els_reply = &bsg_reply->reply_data.ctels_reply;
608			els_reply->status = FC_CTELS_STATUS_REJECT;
609			els_reply->rjt_data.action = rjt_data[3];
610			els_reply->rjt_data.reason_code = rjt_data[2];
611			els_reply->rjt_data.reason_explanation = rjt_data[1];
612			els_reply->rjt_data.vendor_unique = rjt_data[0];
613		} else if (ulp_status == IOSTAT_LOCAL_REJECT &&
614			   (ulp_word4 & IOERR_PARAM_MASK) ==
615			   IOERR_SEQUENCE_TIMEOUT) {
616			rc = -ETIMEDOUT;
617		} else {
618			rc = -EIO;
619		}
620	}
621
622	lpfc_els_free_iocb(phba, cmdiocbq);
623
624	lpfc_nlp_put(ndlp);
625	kfree(dd_data);
626
627	/* Complete the job if the job is still active */
628
629	if (job) {
630		bsg_reply->result = rc;
631		bsg_job_done(job, bsg_reply->result,
632			       bsg_reply->reply_payload_rcv_len);
633	}
634	return;
635}
636
637/**
638 * lpfc_bsg_rport_els - send an ELS command from a bsg request
639 * @job: fc_bsg_job to handle
640 **/
641static int
642lpfc_bsg_rport_els(struct bsg_job *job)
643{
644	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
645	struct lpfc_hba *phba = vport->phba;
646	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
647	struct lpfc_nodelist *ndlp = rdata->pnode;
648	struct fc_bsg_request *bsg_request = job->request;
649	struct fc_bsg_reply *bsg_reply = job->reply;
650	uint32_t elscmd;
651	uint32_t cmdsize;
652	struct lpfc_iocbq *cmdiocbq;
653	uint16_t rpi = 0;
654	struct bsg_job_data *dd_data;
655	unsigned long flags;
656	uint32_t creg_val;
657	int rc = 0;
658
659	/* in case no data is transferred */
660	bsg_reply->reply_payload_rcv_len = 0;
661
662	/* verify the els command is not greater than the
663	 * maximum ELS transfer size.
664	 */
665
666	if (job->request_payload.payload_len > FCELSSIZE) {
667		rc = -EINVAL;
668		goto no_dd_data;
669	}
670
671	/* allocate our bsg tracking structure */
672	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
673	if (!dd_data) {
674		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
675				"2735 Failed allocation of dd_data\n");
676		rc = -ENOMEM;
677		goto no_dd_data;
678	}
679
680	elscmd = bsg_request->rqst_data.r_els.els_code;
681	cmdsize = job->request_payload.payload_len;
682
683	if (!lpfc_nlp_get(ndlp)) {
684		rc = -ENODEV;
685		goto free_dd_data;
686	}
687
688	/* We will use the allocated dma buffers by prep els iocb for command
689	 * and response to ensure if the job times out and the request is freed,
690	 * we won't be dma into memory that is no longer allocated to for the
691	 * request.
692	 */
693	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
694				      ndlp->nlp_DID, elscmd);
695	if (!cmdiocbq) {
696		rc = -EIO;
697		goto release_ndlp;
698	}
699
700	/* Transfer the request payload to allocated command dma buffer */
701	sg_copy_to_buffer(job->request_payload.sg_list,
702			  job->request_payload.sg_cnt,
703			  cmdiocbq->cmd_dmabuf->virt,
704			  cmdsize);
705
706	rpi = ndlp->nlp_rpi;
707
708	if (phba->sli_rev == LPFC_SLI_REV4)
709		bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
710		       phba->sli4_hba.rpi_ids[rpi]);
711	else
712		cmdiocbq->iocb.ulpContext = rpi;
713	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
714	cmdiocbq->context_un.dd_data = dd_data;
715	cmdiocbq->ndlp = ndlp;
716	cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
717	dd_data->type = TYPE_IOCB;
718	dd_data->set_job = job;
719	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
720	dd_data->context_un.iocb.ndlp = ndlp;
721	dd_data->context_un.iocb.rmp = NULL;
722	job->dd_data = dd_data;
723
724	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
725		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
726			rc = -EIO;
727			goto linkdown_err;
728		}
729		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
730		writel(creg_val, phba->HCregaddr);
731		readl(phba->HCregaddr); /* flush */
732	}
733
734	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
735	if (rc == IOCB_SUCCESS) {
736		spin_lock_irqsave(&phba->hbalock, flags);
737		/* make sure the I/O had not been completed/released */
738		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
739			/* open up abort window to timeout handler */
740			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
741		}
742		spin_unlock_irqrestore(&phba->hbalock, flags);
743		return 0; /* done for now */
744	} else if (rc == IOCB_BUSY) {
745		rc = -EAGAIN;
746	} else {
747		rc = -EIO;
748	}
749
750	/* I/O issue failed.  Cleanup resources. */
751
752linkdown_err:
753	lpfc_els_free_iocb(phba, cmdiocbq);
754
755release_ndlp:
756	lpfc_nlp_put(ndlp);
757
758free_dd_data:
759	kfree(dd_data);
760
761no_dd_data:
762	/* make error code available to userspace */
763	bsg_reply->result = rc;
764	job->dd_data = NULL;
765	return rc;
766}
767
768/**
769 * lpfc_bsg_event_free - frees an allocated event structure
770 * @kref: Pointer to a kref.
771 *
772 * Called from kref_put. Back cast the kref into an event structure address.
773 * Free any events to get, delete associated nodes, free any events to see,
774 * free any data then free the event itself.
775 **/
776static void
777lpfc_bsg_event_free(struct kref *kref)
778{
779	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
780						  kref);
781	struct event_data *ed;
782
783	list_del(&evt->node);
784
785	while (!list_empty(&evt->events_to_get)) {
786		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
787		list_del(&ed->node);
788		kfree(ed->data);
789		kfree(ed);
790	}
791
792	while (!list_empty(&evt->events_to_see)) {
793		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
794		list_del(&ed->node);
795		kfree(ed->data);
796		kfree(ed);
797	}
798
799	kfree(evt->dd_data);
800	kfree(evt);
801}
802
803/**
804 * lpfc_bsg_event_ref - increments the kref for an event
805 * @evt: Pointer to an event structure.
806 **/
807static inline void
808lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
809{
810	kref_get(&evt->kref);
811}
812
813/**
814 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
815 * @evt: Pointer to an event structure.
816 **/
817static inline void
818lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
819{
820	kref_put(&evt->kref, lpfc_bsg_event_free);
821}
822
823/**
824 * lpfc_bsg_event_new - allocate and initialize a event structure
825 * @ev_mask: Mask of events.
826 * @ev_reg_id: Event reg id.
827 * @ev_req_id: Event request id.
828 **/
829static struct lpfc_bsg_event *
830lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
831{
832	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
833
834	if (!evt)
835		return NULL;
836
837	INIT_LIST_HEAD(&evt->events_to_get);
838	INIT_LIST_HEAD(&evt->events_to_see);
839	evt->type_mask = ev_mask;
840	evt->req_id = ev_req_id;
841	evt->reg_id = ev_reg_id;
842	evt->wait_time_stamp = jiffies;
843	evt->dd_data = NULL;
844	init_waitqueue_head(&evt->wq);
845	kref_init(&evt->kref);
846	return evt;
847}
848
849/**
850 * diag_cmd_data_free - Frees an lpfc dma buffer extension
851 * @phba: Pointer to HBA context object.
852 * @mlist: Pointer to an lpfc dma buffer extension.
853 **/
854static int
855diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
856{
857	struct lpfc_dmabufext *mlast;
858	struct pci_dev *pcidev;
859	struct list_head head, *curr, *next;
860
861	if ((!mlist) || (!lpfc_is_link_up(phba) &&
862		(phba->link_flag & LS_LOOPBACK_MODE))) {
863		return 0;
864	}
865
866	pcidev = phba->pcidev;
867	list_add_tail(&head, &mlist->dma.list);
868
869	list_for_each_safe(curr, next, &head) {
870		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
871		if (mlast->dma.virt)
872			dma_free_coherent(&pcidev->dev,
873					  mlast->size,
874					  mlast->dma.virt,
875					  mlast->dma.phys);
876		kfree(mlast);
877	}
878	return 0;
879}
880
881/*
882 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
883 *
884 * This function is called when an unsolicited CT command is received.  It
885 * forwards the event to any processes registered to receive CT events.
886 **/
887int
888lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
889			struct lpfc_iocbq *piocbq)
890{
891	uint32_t evt_req_id = 0;
892	u16 cmd;
893	struct lpfc_dmabuf *dmabuf = NULL;
894	struct lpfc_bsg_event *evt;
895	struct event_data *evt_dat = NULL;
896	struct lpfc_iocbq *iocbq;
897	IOCB_t *iocb = NULL;
898	size_t offset = 0;
899	struct list_head head;
900	struct ulp_bde64 *bde;
901	dma_addr_t dma_addr;
902	int i;
903	struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
904	struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
905	struct lpfc_sli_ct_request *ct_req;
906	struct bsg_job *job = NULL;
907	struct fc_bsg_reply *bsg_reply;
908	struct bsg_job_data *dd_data = NULL;
909	unsigned long flags;
910	int size = 0;
911	u32 bde_count = 0;
912
913	INIT_LIST_HEAD(&head);
914	list_add_tail(&head, &piocbq->list);
915
916	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
917	evt_req_id = ct_req->FsType;
918	cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
919
920	spin_lock_irqsave(&phba->ct_ev_lock, flags);
921	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
922		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
923			evt->req_id != evt_req_id)
924			continue;
925
926		lpfc_bsg_event_ref(evt);
927		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
928		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
929		if (evt_dat == NULL) {
930			spin_lock_irqsave(&phba->ct_ev_lock, flags);
931			lpfc_bsg_event_unref(evt);
932			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
933					"2614 Memory allocation failed for "
934					"CT event\n");
935			break;
936		}
937
938		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
939			/* take accumulated byte count from the last iocbq */
940			iocbq = list_entry(head.prev, typeof(*iocbq), list);
941			if (phba->sli_rev == LPFC_SLI_REV4)
942				evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
943			else
944				evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
945		} else {
946			list_for_each_entry(iocbq, &head, list) {
947				iocb = &iocbq->iocb;
948				for (i = 0; i < iocb->ulpBdeCount;
949				     i++)
950					evt_dat->len +=
951					iocb->un.cont64[i].tus.f.bdeSize;
952			}
953		}
954
955		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
956		if (evt_dat->data == NULL) {
957			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
958					"2615 Memory allocation failed for "
959					"CT event data, size %d\n",
960					evt_dat->len);
961			kfree(evt_dat);
962			spin_lock_irqsave(&phba->ct_ev_lock, flags);
963			lpfc_bsg_event_unref(evt);
964			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
965			goto error_ct_unsol_exit;
966		}
967
968		list_for_each_entry(iocbq, &head, list) {
969			size = 0;
970			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
971				bdeBuf1 = iocbq->cmd_dmabuf;
972				bdeBuf2 = iocbq->bpl_dmabuf;
973			}
974			if (phba->sli_rev == LPFC_SLI_REV4)
975				bde_count = iocbq->wcqe_cmpl.word3;
976			else
977				bde_count = iocbq->iocb.ulpBdeCount;
978			for (i = 0; i < bde_count; i++) {
979				if (phba->sli3_options &
980				    LPFC_SLI3_HBQ_ENABLED) {
981					if (i == 0) {
982						size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
983						dmabuf = bdeBuf1;
984					} else if (i == 1) {
985						size = iocbq->unsol_rcv_len;
986						dmabuf = bdeBuf2;
987					}
988					if ((offset + size) > evt_dat->len)
989						size = evt_dat->len - offset;
990				} else {
991					size = iocbq->iocb.un.cont64[i].
992						tus.f.bdeSize;
993					bde = &iocbq->iocb.un.cont64[i];
994					dma_addr = getPaddr(bde->addrHigh,
995							    bde->addrLow);
996					dmabuf = lpfc_sli_ringpostbuf_get(phba,
997							pring, dma_addr);
998				}
999				if (!dmabuf) {
1000					lpfc_printf_log(phba, KERN_ERR,
1001						LOG_LIBDFC, "2616 No dmabuf "
1002						"found for iocbq x%px\n",
1003						iocbq);
1004					kfree(evt_dat->data);
1005					kfree(evt_dat);
1006					spin_lock_irqsave(&phba->ct_ev_lock,
1007						flags);
1008					lpfc_bsg_event_unref(evt);
1009					spin_unlock_irqrestore(
1010						&phba->ct_ev_lock, flags);
1011					goto error_ct_unsol_exit;
1012				}
1013				memcpy((char *)(evt_dat->data) + offset,
1014				       dmabuf->virt, size);
1015				offset += size;
1016				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1017				    !(phba->sli3_options &
1018				      LPFC_SLI3_HBQ_ENABLED)) {
1019					lpfc_sli_ringpostbuf_put(phba, pring,
1020								 dmabuf);
1021				} else {
1022					switch (cmd) {
1023					case ELX_LOOPBACK_DATA:
1024						if (phba->sli_rev <
1025						    LPFC_SLI_REV4)
1026							diag_cmd_data_free(phba,
1027							(struct lpfc_dmabufext
1028							 *)dmabuf);
1029						break;
1030					case ELX_LOOPBACK_XRI_SETUP:
1031						if ((phba->sli_rev ==
1032							LPFC_SLI_REV2) ||
1033							(phba->sli3_options &
1034							LPFC_SLI3_HBQ_ENABLED
1035							)) {
1036							lpfc_in_buf_free(phba,
1037									dmabuf);
1038						} else {
1039							lpfc_sli3_post_buffer(phba,
1040									      pring,
1041									      1);
1042						}
1043						break;
1044					default:
1045						if (!(phba->sli3_options &
1046						      LPFC_SLI3_HBQ_ENABLED))
1047							lpfc_sli3_post_buffer(phba,
1048									      pring,
1049									      1);
1050						break;
1051					}
1052				}
1053			}
1054		}
1055
1056		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1057		if (phba->sli_rev == LPFC_SLI_REV4) {
1058			evt_dat->immed_dat = phba->ctx_idx;
1059			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1060			/* Provide warning for over-run of the ct_ctx array */
1061			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1062			    UNSOL_VALID)
1063				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1064						"2717 CT context array entry "
1065						"[%d] over-run: oxid:x%x, "
1066						"sid:x%x\n", phba->ctx_idx,
1067						phba->ct_ctx[
1068						    evt_dat->immed_dat].oxid,
1069						phba->ct_ctx[
1070						    evt_dat->immed_dat].SID);
1071			phba->ct_ctx[evt_dat->immed_dat].rxid =
1072				get_job_ulpcontext(phba, piocbq);
1073			phba->ct_ctx[evt_dat->immed_dat].oxid =
1074				get_job_rcvoxid(phba, piocbq);
1075			phba->ct_ctx[evt_dat->immed_dat].SID =
1076				bf_get(wqe_els_did,
1077				       &piocbq->wqe.xmit_els_rsp.wqe_dest);
1078			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1079		} else
1080			evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
1081
1082		evt_dat->type = FC_REG_CT_EVENT;
1083		list_add(&evt_dat->node, &evt->events_to_see);
1084		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1085			wake_up_interruptible(&evt->wq);
1086			lpfc_bsg_event_unref(evt);
1087			break;
1088		}
1089
1090		list_move(evt->events_to_see.prev, &evt->events_to_get);
1091
1092		dd_data = (struct bsg_job_data *)evt->dd_data;
1093		job = dd_data->set_job;
1094		dd_data->set_job = NULL;
1095		lpfc_bsg_event_unref(evt);
1096		if (job) {
1097			bsg_reply = job->reply;
1098			bsg_reply->reply_payload_rcv_len = size;
1099			/* make error code available to userspace */
1100			bsg_reply->result = 0;
1101			job->dd_data = NULL;
1102			/* complete the job back to userspace */
1103			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1104			bsg_job_done(job, bsg_reply->result,
1105				       bsg_reply->reply_payload_rcv_len);
1106			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1107		}
1108	}
1109	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1110
1111error_ct_unsol_exit:
1112	if (!list_empty(&head))
1113		list_del(&head);
1114	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1115	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1116		return 0;
1117	return 1;
1118}
1119
1120/**
1121 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1122 * @phba: Pointer to HBA context object.
1123 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1124 *
1125 * This function handles abort to the CT command toward management plane
1126 * for SLI4 port.
1127 *
1128 * If the pending context of a CT command to management plane present, clears
1129 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1130 * no context exists.
1131 **/
1132int
1133lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1134{
1135	struct fc_frame_header fc_hdr;
1136	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1137	int ctx_idx, handled = 0;
1138	uint16_t oxid, rxid;
1139	uint32_t sid;
1140
1141	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1142	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1143	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1144	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1145
1146	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1147		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1148			continue;
1149		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1150			continue;
1151		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1152			continue;
1153		if (phba->ct_ctx[ctx_idx].SID != sid)
1154			continue;
1155		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1156		handled = 1;
1157	}
1158	return handled;
1159}
1160
1161/**
1162 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1163 * @job: SET_EVENT fc_bsg_job
1164 **/
1165static int
1166lpfc_bsg_hba_set_event(struct bsg_job *job)
1167{
1168	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1169	struct lpfc_hba *phba = vport->phba;
1170	struct fc_bsg_request *bsg_request = job->request;
1171	struct set_ct_event *event_req;
1172	struct lpfc_bsg_event *evt;
1173	int rc = 0;
1174	struct bsg_job_data *dd_data = NULL;
1175	uint32_t ev_mask;
1176	unsigned long flags;
1177
1178	if (job->request_len <
1179	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1180		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1181				"2612 Received SET_CT_EVENT below minimum "
1182				"size\n");
1183		rc = -EINVAL;
1184		goto job_error;
1185	}
1186
1187	event_req = (struct set_ct_event *)
1188		bsg_request->rqst_data.h_vendor.vendor_cmd;
1189	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1190				FC_REG_EVENT_MASK);
1191	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1192	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1193		if (evt->reg_id == event_req->ev_reg_id) {
1194			lpfc_bsg_event_ref(evt);
1195			evt->wait_time_stamp = jiffies;
1196			dd_data = (struct bsg_job_data *)evt->dd_data;
1197			break;
1198		}
1199	}
1200	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1201
1202	if (&evt->node == &phba->ct_ev_waiters) {
1203		/* no event waiting struct yet - first call */
1204		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1205		if (dd_data == NULL) {
1206			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1207					"2734 Failed allocation of dd_data\n");
1208			rc = -ENOMEM;
1209			goto job_error;
1210		}
1211		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1212					event_req->ev_req_id);
1213		if (!evt) {
1214			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1215					"2617 Failed allocation of event "
1216					"waiter\n");
1217			rc = -ENOMEM;
1218			goto job_error;
1219		}
1220		dd_data->type = TYPE_EVT;
1221		dd_data->set_job = NULL;
1222		dd_data->context_un.evt = evt;
1223		evt->dd_data = (void *)dd_data;
1224		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1225		list_add(&evt->node, &phba->ct_ev_waiters);
1226		lpfc_bsg_event_ref(evt);
1227		evt->wait_time_stamp = jiffies;
1228		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1229	}
1230
1231	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1232	evt->waiting = 1;
1233	dd_data->set_job = job; /* for unsolicited command */
1234	job->dd_data = dd_data; /* for fc transport timeout callback*/
1235	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1236	return 0; /* call job done later */
1237
1238job_error:
1239	kfree(dd_data);
1240	job->dd_data = NULL;
1241	return rc;
1242}
1243
1244/**
1245 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1246 * @job: GET_EVENT fc_bsg_job
1247 **/
1248static int
1249lpfc_bsg_hba_get_event(struct bsg_job *job)
1250{
1251	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1252	struct lpfc_hba *phba = vport->phba;
1253	struct fc_bsg_request *bsg_request = job->request;
1254	struct fc_bsg_reply *bsg_reply = job->reply;
1255	struct get_ct_event *event_req;
1256	struct get_ct_event_reply *event_reply;
1257	struct lpfc_bsg_event *evt, *evt_next;
1258	struct event_data *evt_dat = NULL;
1259	unsigned long flags;
1260	uint32_t rc = 0;
1261
1262	if (job->request_len <
1263	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1264		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1265				"2613 Received GET_CT_EVENT request below "
1266				"minimum size\n");
1267		rc = -EINVAL;
1268		goto job_error;
1269	}
1270
1271	event_req = (struct get_ct_event *)
1272		bsg_request->rqst_data.h_vendor.vendor_cmd;
1273
1274	event_reply = (struct get_ct_event_reply *)
1275		bsg_reply->reply_data.vendor_reply.vendor_rsp;
1276	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1277	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1278		if (evt->reg_id == event_req->ev_reg_id) {
1279			if (list_empty(&evt->events_to_get))
1280				break;
1281			lpfc_bsg_event_ref(evt);
1282			evt->wait_time_stamp = jiffies;
1283			evt_dat = list_entry(evt->events_to_get.prev,
1284					     struct event_data, node);
1285			list_del(&evt_dat->node);
1286			break;
1287		}
1288	}
1289	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1290
1291	/* The app may continue to ask for event data until it gets
1292	 * an error indicating that there isn't anymore
1293	 */
1294	if (evt_dat == NULL) {
1295		bsg_reply->reply_payload_rcv_len = 0;
1296		rc = -ENOENT;
1297		goto job_error;
1298	}
1299
1300	if (evt_dat->len > job->request_payload.payload_len) {
1301		evt_dat->len = job->request_payload.payload_len;
1302		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1303				"2618 Truncated event data at %d "
1304				"bytes\n",
1305				job->request_payload.payload_len);
1306	}
1307
1308	event_reply->type = evt_dat->type;
1309	event_reply->immed_data = evt_dat->immed_dat;
1310	if (evt_dat->len > 0)
1311		bsg_reply->reply_payload_rcv_len =
1312			sg_copy_from_buffer(job->request_payload.sg_list,
1313					    job->request_payload.sg_cnt,
1314					    evt_dat->data, evt_dat->len);
1315	else
1316		bsg_reply->reply_payload_rcv_len = 0;
1317
1318	if (evt_dat) {
1319		kfree(evt_dat->data);
1320		kfree(evt_dat);
1321	}
1322
1323	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1324	lpfc_bsg_event_unref(evt);
1325	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1326	job->dd_data = NULL;
1327	bsg_reply->result = 0;
1328	bsg_job_done(job, bsg_reply->result,
1329		       bsg_reply->reply_payload_rcv_len);
1330	return 0;
1331
1332job_error:
1333	job->dd_data = NULL;
1334	bsg_reply->result = rc;
1335	return rc;
1336}
1337
1338/**
1339 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1340 * @phba: Pointer to HBA context object.
1341 * @cmdiocbq: Pointer to command iocb.
1342 * @rspiocbq: Pointer to response iocb.
1343 *
1344 * This function is the completion handler for iocbs issued using
1345 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1346 * ring event handler function without any lock held. This function
1347 * can be called from both worker thread context and interrupt
1348 * context. This function also can be called from other thread which
1349 * cleans up the SLI layer objects.
1350 * This function copy the contents of the response iocb to the
1351 * response iocb memory object provided by the caller of
1352 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1353 * sleeps for the iocb completion.
1354 **/
1355static void
1356lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1357			struct lpfc_iocbq *cmdiocbq,
1358			struct lpfc_iocbq *rspiocbq)
1359{
1360	struct bsg_job_data *dd_data;
1361	struct bsg_job *job;
1362	struct fc_bsg_reply *bsg_reply;
1363	struct lpfc_dmabuf *bmp, *cmp;
1364	struct lpfc_nodelist *ndlp;
1365	unsigned long flags;
1366	int rc = 0;
1367	u32 ulp_status, ulp_word4;
1368
1369	dd_data = cmdiocbq->context_un.dd_data;
1370
1371	/* Determine if job has been aborted */
1372	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1373	job = dd_data->set_job;
1374	if (job) {
1375		/* Prevent timeout handling from trying to abort job  */
1376		job->dd_data = NULL;
1377	}
1378	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1379
1380	/* Close the timeout handler abort window */
1381	spin_lock_irqsave(&phba->hbalock, flags);
1382	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1383	spin_unlock_irqrestore(&phba->hbalock, flags);
1384
1385	ndlp = dd_data->context_un.iocb.ndlp;
1386	cmp = cmdiocbq->cmd_dmabuf;
1387	bmp = cmdiocbq->bpl_dmabuf;
1388
1389	ulp_status = get_job_ulpstatus(phba, rspiocbq);
1390	ulp_word4 = get_job_word4(phba, rspiocbq);
1391
1392	/* Copy the completed job data or set the error status */
1393
1394	if (job) {
1395		bsg_reply = job->reply;
1396		if (ulp_status) {
1397			if (ulp_status == IOSTAT_LOCAL_REJECT) {
1398				switch (ulp_word4 & IOERR_PARAM_MASK) {
1399				case IOERR_SEQUENCE_TIMEOUT:
1400					rc = -ETIMEDOUT;
1401					break;
1402				case IOERR_INVALID_RPI:
1403					rc = -EFAULT;
1404					break;
1405				default:
1406					rc = -EACCES;
1407					break;
1408				}
1409			} else {
1410				rc = -EACCES;
1411			}
1412		} else {
1413			bsg_reply->reply_payload_rcv_len = 0;
1414		}
1415	}
1416
1417	lpfc_free_bsg_buffers(phba, cmp);
1418	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1419	kfree(bmp);
1420	lpfc_sli_release_iocbq(phba, cmdiocbq);
1421	lpfc_nlp_put(ndlp);
1422	kfree(dd_data);
1423
1424	/* Complete the job if the job is still active */
1425
1426	if (job) {
1427		bsg_reply->result = rc;
1428		bsg_job_done(job, bsg_reply->result,
1429			       bsg_reply->reply_payload_rcv_len);
1430	}
1431	return;
1432}
1433
1434/**
1435 * lpfc_issue_ct_rsp - issue a ct response
1436 * @phba: Pointer to HBA context object.
1437 * @job: Pointer to the job object.
1438 * @tag: tag index value into the ports context exchange array.
1439 * @cmp: Pointer to a cmp dma buffer descriptor.
1440 * @bmp: Pointer to a bmp dma buffer descriptor.
1441 * @num_entry: Number of enties in the bde.
1442 **/
1443static int
1444lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1445		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1446		  int num_entry)
1447{
1448	struct lpfc_iocbq *ctiocb = NULL;
1449	int rc = 0;
1450	struct lpfc_nodelist *ndlp = NULL;
1451	struct bsg_job_data *dd_data;
1452	unsigned long flags;
1453	uint32_t creg_val;
1454	u16 ulp_context, iotag;
1455
1456	ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1457	if (!ndlp) {
1458		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1459				"2721 ndlp null for oxid %x SID %x\n",
1460				phba->ct_ctx[tag].rxid,
1461				phba->ct_ctx[tag].SID);
1462		return IOCB_ERROR;
1463	}
1464
1465	/* allocate our bsg tracking structure */
1466	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1467	if (!dd_data) {
1468		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1469				"2736 Failed allocation of dd_data\n");
1470		rc = -ENOMEM;
1471		goto no_dd_data;
1472	}
1473
1474	/* Allocate buffer for  command iocb */
1475	ctiocb = lpfc_sli_get_iocbq(phba);
1476	if (!ctiocb) {
1477		rc = -ENOMEM;
1478		goto no_ctiocb;
1479	}
1480
1481	if (phba->sli_rev == LPFC_SLI_REV4) {
1482		/* Do not issue unsol response if oxid not marked as valid */
1483		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1484			rc = IOCB_ERROR;
1485			goto issue_ct_rsp_exit;
1486		}
1487
1488		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
1489					 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
1490					 phba->ct_ctx[tag].oxid, num_entry,
1491					 FC_RCTL_DD_SOL_CTL, 1,
1492					 CMD_XMIT_SEQUENCE64_WQE);
1493
1494		/* The exchange is done, mark the entry as invalid */
1495		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1496		iotag = get_wqe_reqtag(ctiocb);
1497	} else {
1498		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
1499					 FC_RCTL_DD_SOL_CTL, 1,
1500					 CMD_XMIT_SEQUENCE64_CX);
1501		ctiocb->num_bdes = num_entry;
1502		iotag = ctiocb->iocb.ulpIoTag;
1503	}
1504
1505	ulp_context = get_job_ulpcontext(phba, ctiocb);
1506
1507	/* Xmit CT response on exchange <xid> */
1508	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1509			"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1510			ulp_context, iotag, tag, phba->link_state);
1511
1512	ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
1513	ctiocb->vport = phba->pport;
1514	ctiocb->context_un.dd_data = dd_data;
1515	ctiocb->cmd_dmabuf = cmp;
1516	ctiocb->bpl_dmabuf = bmp;
1517	ctiocb->ndlp = ndlp;
1518	ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
1519
1520	dd_data->type = TYPE_IOCB;
1521	dd_data->set_job = job;
1522	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1523	dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1524	if (!dd_data->context_un.iocb.ndlp) {
1525		rc = -IOCB_ERROR;
1526		goto issue_ct_rsp_exit;
1527	}
1528	dd_data->context_un.iocb.rmp = NULL;
1529	job->dd_data = dd_data;
1530
1531	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1532		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1533			rc = -IOCB_ERROR;
1534			goto issue_ct_rsp_exit;
1535		}
1536		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1537		writel(creg_val, phba->HCregaddr);
1538		readl(phba->HCregaddr); /* flush */
1539	}
1540
1541	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1542	if (rc == IOCB_SUCCESS) {
1543		spin_lock_irqsave(&phba->hbalock, flags);
1544		/* make sure the I/O had not been completed/released */
1545		if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
1546			/* open up abort window to timeout handler */
1547			ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
1548		}
1549		spin_unlock_irqrestore(&phba->hbalock, flags);
1550		return 0; /* done for now */
1551	}
1552
1553	/* iocb failed so cleanup */
1554	job->dd_data = NULL;
1555	lpfc_nlp_put(ndlp);
1556
1557issue_ct_rsp_exit:
1558	lpfc_sli_release_iocbq(phba, ctiocb);
1559no_ctiocb:
1560	kfree(dd_data);
1561no_dd_data:
1562	return rc;
1563}
1564
1565/**
1566 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1567 * @job: SEND_MGMT_RESP fc_bsg_job
1568 **/
1569static int
1570lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1571{
1572	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1573	struct lpfc_hba *phba = vport->phba;
1574	struct fc_bsg_request *bsg_request = job->request;
1575	struct fc_bsg_reply *bsg_reply = job->reply;
1576	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1577		bsg_request->rqst_data.h_vendor.vendor_cmd;
1578	struct ulp_bde64 *bpl;
1579	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1580	int bpl_entries;
1581	uint32_t tag = mgmt_resp->tag;
1582	unsigned long reqbfrcnt =
1583			(unsigned long)job->request_payload.payload_len;
1584	int rc = 0;
1585
1586	/* in case no data is transferred */
1587	bsg_reply->reply_payload_rcv_len = 0;
1588
1589	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1590		rc = -ERANGE;
1591		goto send_mgmt_rsp_exit;
1592	}
1593
1594	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1595	if (!bmp) {
1596		rc = -ENOMEM;
1597		goto send_mgmt_rsp_exit;
1598	}
1599
1600	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1601	if (!bmp->virt) {
1602		rc = -ENOMEM;
1603		goto send_mgmt_rsp_free_bmp;
1604	}
1605
1606	INIT_LIST_HEAD(&bmp->list);
1607	bpl = (struct ulp_bde64 *) bmp->virt;
1608	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1609	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1610				     1, bpl, &bpl_entries);
1611	if (!cmp) {
1612		rc = -ENOMEM;
1613		goto send_mgmt_rsp_free_bmp;
1614	}
1615	lpfc_bsg_copy_data(cmp, &job->request_payload,
1616			   job->request_payload.payload_len, 1);
1617
1618	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1619
1620	if (rc == IOCB_SUCCESS)
1621		return 0; /* done for now */
1622
1623	rc = -EACCES;
1624
1625	lpfc_free_bsg_buffers(phba, cmp);
1626
1627send_mgmt_rsp_free_bmp:
1628	if (bmp->virt)
1629		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1630	kfree(bmp);
1631send_mgmt_rsp_exit:
1632	/* make error code available to userspace */
1633	bsg_reply->result = rc;
1634	job->dd_data = NULL;
1635	return rc;
1636}
1637
1638/**
1639 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1640 * @phba: Pointer to HBA context object.
1641 *
1642 * This function is responsible for preparing driver for diag loopback
1643 * on device.
1644 */
1645static int
1646lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1647{
1648	struct lpfc_vport **vports;
1649	struct Scsi_Host *shost;
1650	struct lpfc_sli *psli;
1651	struct lpfc_queue *qp = NULL;
1652	struct lpfc_sli_ring *pring;
1653	int i = 0;
1654
1655	psli = &phba->sli;
1656	if (!psli)
1657		return -ENODEV;
1658
1659
1660	if ((phba->link_state == LPFC_HBA_ERROR) ||
1661	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1662	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1663		return -EACCES;
1664
1665	vports = lpfc_create_vport_work_array(phba);
1666	if (vports) {
1667		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1668			shost = lpfc_shost_from_vport(vports[i]);
1669			scsi_block_requests(shost);
1670		}
1671		lpfc_destroy_vport_work_array(phba, vports);
1672	} else {
1673		shost = lpfc_shost_from_vport(phba->pport);
1674		scsi_block_requests(shost);
1675	}
1676
1677	if (phba->sli_rev != LPFC_SLI_REV4) {
1678		pring = &psli->sli3_ring[LPFC_FCP_RING];
1679		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1680		return 0;
1681	}
1682	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1683		pring = qp->pring;
1684		if (!pring || (pring->ringno != LPFC_FCP_RING))
1685			continue;
1686		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1687				      &pring->ring_lock))
1688			break;
1689	}
1690	return 0;
1691}
1692
1693/**
1694 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1695 * @phba: Pointer to HBA context object.
1696 *
1697 * This function is responsible for driver exit processing of setting up
1698 * diag loopback mode on device.
1699 */
1700static void
1701lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1702{
1703	struct Scsi_Host *shost;
1704	struct lpfc_vport **vports;
1705	int i;
1706
1707	vports = lpfc_create_vport_work_array(phba);
1708	if (vports) {
1709		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1710			shost = lpfc_shost_from_vport(vports[i]);
1711			scsi_unblock_requests(shost);
1712		}
1713		lpfc_destroy_vport_work_array(phba, vports);
1714	} else {
1715		shost = lpfc_shost_from_vport(phba->pport);
1716		scsi_unblock_requests(shost);
1717	}
1718	return;
1719}
1720
1721/**
1722 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1723 * @phba: Pointer to HBA context object.
1724 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1725 *
1726 * This function is responsible for placing an sli3  port into diagnostic
1727 * loopback mode in order to perform a diagnostic loopback test.
1728 * All new scsi requests are blocked, a small delay is used to allow the
1729 * scsi requests to complete then the link is brought down. If the link is
1730 * is placed in loopback mode then scsi requests are again allowed
1731 * so the scsi mid-layer doesn't give up on the port.
1732 * All of this is done in-line.
1733 */
1734static int
1735lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1736{
1737	struct fc_bsg_request *bsg_request = job->request;
1738	struct fc_bsg_reply *bsg_reply = job->reply;
1739	struct diag_mode_set *loopback_mode;
1740	uint32_t link_flags;
1741	uint32_t timeout;
1742	LPFC_MBOXQ_t *pmboxq  = NULL;
1743	int mbxstatus = MBX_SUCCESS;
1744	int i = 0;
1745	int rc = 0;
1746
1747	/* no data to return just the return code */
1748	bsg_reply->reply_payload_rcv_len = 0;
1749
1750	if (job->request_len < sizeof(struct fc_bsg_request) +
1751	    sizeof(struct diag_mode_set)) {
1752		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1753				"2738 Received DIAG MODE request size:%d "
1754				"below the minimum size:%d\n",
1755				job->request_len,
1756				(int)(sizeof(struct fc_bsg_request) +
1757				sizeof(struct diag_mode_set)));
1758		rc = -EINVAL;
1759		goto job_error;
1760	}
1761
1762	rc = lpfc_bsg_diag_mode_enter(phba);
1763	if (rc)
1764		goto job_error;
1765
1766	/* bring the link to diagnostic mode */
1767	loopback_mode = (struct diag_mode_set *)
1768		bsg_request->rqst_data.h_vendor.vendor_cmd;
1769	link_flags = loopback_mode->type;
1770	timeout = loopback_mode->timeout * 100;
1771
1772	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1773	if (!pmboxq) {
1774		rc = -ENOMEM;
1775		goto loopback_mode_exit;
1776	}
1777	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1778	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1779	pmboxq->u.mb.mbxOwner = OWN_HOST;
1780
1781	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1782
1783	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1784		/* wait for link down before proceeding */
1785		i = 0;
1786		while (phba->link_state != LPFC_LINK_DOWN) {
1787			if (i++ > timeout) {
1788				rc = -ETIMEDOUT;
1789				goto loopback_mode_exit;
1790			}
1791			msleep(10);
1792		}
1793
1794		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1795		if (link_flags == INTERNAL_LOOP_BACK)
1796			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1797		else
1798			pmboxq->u.mb.un.varInitLnk.link_flags =
1799				FLAGS_TOPOLOGY_MODE_LOOP;
1800
1801		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1802		pmboxq->u.mb.mbxOwner = OWN_HOST;
1803
1804		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1805						     LPFC_MBOX_TMO);
1806
1807		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1808			rc = -ENODEV;
1809		else {
1810			spin_lock_irq(&phba->hbalock);
1811			phba->link_flag |= LS_LOOPBACK_MODE;
1812			spin_unlock_irq(&phba->hbalock);
1813			/* wait for the link attention interrupt */
1814			msleep(100);
1815
1816			i = 0;
1817			while (phba->link_state != LPFC_HBA_READY) {
1818				if (i++ > timeout) {
1819					rc = -ETIMEDOUT;
1820					break;
1821				}
1822
1823				msleep(10);
1824			}
1825		}
1826
1827	} else
1828		rc = -ENODEV;
1829
1830loopback_mode_exit:
1831	lpfc_bsg_diag_mode_exit(phba);
1832
1833	/*
1834	 * Let SLI layer release mboxq if mbox command completed after timeout.
1835	 */
1836	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1837		mempool_free(pmboxq, phba->mbox_mem_pool);
1838
1839job_error:
1840	/* make error code available to userspace */
1841	bsg_reply->result = rc;
1842	/* complete the job back to userspace if no error */
1843	if (rc == 0)
1844		bsg_job_done(job, bsg_reply->result,
1845			       bsg_reply->reply_payload_rcv_len);
1846	return rc;
1847}
1848
1849/**
1850 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1851 * @phba: Pointer to HBA context object.
1852 * @diag: Flag for set link to diag or nomral operation state.
1853 *
1854 * This function is responsible for issuing a sli4 mailbox command for setting
1855 * link to either diag state or normal operation state.
1856 */
1857static int
1858lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1859{
1860	LPFC_MBOXQ_t *pmboxq;
1861	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1862	uint32_t req_len, alloc_len;
1863	int mbxstatus = MBX_SUCCESS, rc;
1864
1865	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1866	if (!pmboxq)
1867		return -ENOMEM;
1868
1869	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1870		   sizeof(struct lpfc_sli4_cfg_mhdr));
1871	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1872				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1873				req_len, LPFC_SLI4_MBX_EMBED);
1874	if (alloc_len != req_len) {
1875		rc = -ENOMEM;
1876		goto link_diag_state_set_out;
1877	}
1878	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1879			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1880			diag, phba->sli4_hba.lnk_info.lnk_tp,
1881			phba->sli4_hba.lnk_info.lnk_no);
1882
1883	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1884	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1885	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1886	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1887	       phba->sli4_hba.lnk_info.lnk_no);
1888	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1889	       phba->sli4_hba.lnk_info.lnk_tp);
1890	if (diag)
1891		bf_set(lpfc_mbx_set_diag_state_diag,
1892		       &link_diag_state->u.req, 1);
1893	else
1894		bf_set(lpfc_mbx_set_diag_state_diag,
1895		       &link_diag_state->u.req, 0);
1896
1897	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1898
1899	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1900		rc = 0;
1901	else
1902		rc = -ENODEV;
1903
1904link_diag_state_set_out:
1905	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1906		mempool_free(pmboxq, phba->mbox_mem_pool);
1907
1908	return rc;
1909}
1910
1911/**
1912 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1913 * @phba: Pointer to HBA context object.
1914 * @mode: loopback mode to set
1915 * @link_no: link number for loopback mode to set
1916 *
1917 * This function is responsible for issuing a sli4 mailbox command for setting
1918 * up loopback diagnostic for a link.
1919 */
1920static int
1921lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1922				uint32_t link_no)
1923{
1924	LPFC_MBOXQ_t *pmboxq;
1925	uint32_t req_len, alloc_len;
1926	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1927	int mbxstatus = MBX_SUCCESS, rc = 0;
1928
1929	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1930	if (!pmboxq)
1931		return -ENOMEM;
1932	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1933		   sizeof(struct lpfc_sli4_cfg_mhdr));
1934	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1935				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1936				req_len, LPFC_SLI4_MBX_EMBED);
1937	if (alloc_len != req_len) {
1938		mempool_free(pmboxq, phba->mbox_mem_pool);
1939		return -ENOMEM;
1940	}
1941	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1942	bf_set(lpfc_mbx_set_diag_state_link_num,
1943	       &link_diag_loopback->u.req, link_no);
1944
1945	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1946		bf_set(lpfc_mbx_set_diag_state_link_type,
1947		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1948	} else {
1949		bf_set(lpfc_mbx_set_diag_state_link_type,
1950		       &link_diag_loopback->u.req,
1951		       phba->sli4_hba.lnk_info.lnk_tp);
1952	}
1953
1954	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1955	       mode);
1956
1957	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1958	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1959		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1960				"3127 Failed setup loopback mode mailbox "
1961				"command, rc:x%x, status:x%x\n", mbxstatus,
1962				pmboxq->u.mb.mbxStatus);
1963		rc = -ENODEV;
1964	}
1965	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1966		mempool_free(pmboxq, phba->mbox_mem_pool);
1967	return rc;
1968}
1969
1970/**
1971 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1972 * @phba: Pointer to HBA context object.
1973 *
1974 * This function set up SLI4 FC port registrations for diagnostic run, which
1975 * includes all the rpis, vfi, and also vpi.
1976 */
1977static int
1978lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1979{
1980	if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) {
1981		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1982				"3136 Port still had vfi registered: "
1983				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1984				phba->pport->fc_myDID, phba->fcf.fcfi,
1985				phba->sli4_hba.vfi_ids[phba->pport->vfi],
1986				phba->vpi_ids[phba->pport->vpi]);
1987		return -EINVAL;
1988	}
1989	return lpfc_issue_reg_vfi(phba->pport);
1990}
1991
1992/**
1993 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1994 * @phba: Pointer to HBA context object.
1995 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1996 *
1997 * This function is responsible for placing an sli4 port into diagnostic
1998 * loopback mode in order to perform a diagnostic loopback test.
1999 */
2000static int
2001lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2002{
2003	struct fc_bsg_request *bsg_request = job->request;
2004	struct fc_bsg_reply *bsg_reply = job->reply;
2005	struct diag_mode_set *loopback_mode;
2006	uint32_t link_flags, timeout, link_no;
2007	int i, rc = 0;
2008
2009	/* no data to return just the return code */
2010	bsg_reply->reply_payload_rcv_len = 0;
2011
2012	if (job->request_len < sizeof(struct fc_bsg_request) +
2013	    sizeof(struct diag_mode_set)) {
2014		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2015				"3011 Received DIAG MODE request size:%d "
2016				"below the minimum size:%d\n",
2017				job->request_len,
2018				(int)(sizeof(struct fc_bsg_request) +
2019				sizeof(struct diag_mode_set)));
2020		rc = -EINVAL;
2021		goto job_done;
2022	}
2023
2024	loopback_mode = (struct diag_mode_set *)
2025		bsg_request->rqst_data.h_vendor.vendor_cmd;
2026	link_flags = loopback_mode->type;
2027	timeout = loopback_mode->timeout * 100;
2028
2029	if (loopback_mode->physical_link == -1)
2030		link_no = phba->sli4_hba.lnk_info.lnk_no;
2031	else
2032		link_no = loopback_mode->physical_link;
2033
2034	if (link_flags == DISABLE_LOOP_BACK) {
2035		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2036					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2037					link_no);
2038		if (!rc) {
2039			/* Unset the need disable bit */
2040			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2041		}
2042		goto job_done;
2043	} else {
2044		/* Check if we need to disable the loopback state */
2045		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2046			rc = -EPERM;
2047			goto job_done;
2048		}
2049	}
2050
2051	rc = lpfc_bsg_diag_mode_enter(phba);
2052	if (rc)
2053		goto job_done;
2054
2055	/* indicate we are in loobpack diagnostic mode */
2056	spin_lock_irq(&phba->hbalock);
2057	phba->link_flag |= LS_LOOPBACK_MODE;
2058	spin_unlock_irq(&phba->hbalock);
2059
2060	/* reset port to start frome scratch */
2061	rc = lpfc_selective_reset(phba);
2062	if (rc)
2063		goto job_done;
2064
2065	/* bring the link to diagnostic mode */
2066	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2067			"3129 Bring link to diagnostic state.\n");
2068
2069	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2070	if (rc) {
2071		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2072				"3130 Failed to bring link to diagnostic "
2073				"state, rc:x%x\n", rc);
2074		goto loopback_mode_exit;
2075	}
2076
2077	/* wait for link down before proceeding */
2078	i = 0;
2079	while (phba->link_state != LPFC_LINK_DOWN) {
2080		if (i++ > timeout) {
2081			rc = -ETIMEDOUT;
2082			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2083					"3131 Timeout waiting for link to "
2084					"diagnostic mode, timeout:%d ms\n",
2085					timeout * 10);
2086			goto loopback_mode_exit;
2087		}
2088		msleep(10);
2089	}
2090
2091	/* set up loopback mode */
2092	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2093			"3132 Set up loopback mode:x%x\n", link_flags);
2094
2095	switch (link_flags) {
2096	case INTERNAL_LOOP_BACK:
2097		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2098			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2099					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2100					link_no);
2101		} else {
2102			/* Trunk is configured, but link is not in this trunk */
2103			if (phba->sli4_hba.conf_trunk) {
2104				rc = -ELNRNG;
2105				goto loopback_mode_exit;
2106			}
2107
2108			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2109					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2110					link_no);
2111		}
2112
2113		if (!rc) {
2114			/* Set the need disable bit */
2115			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2116		}
2117
2118		break;
2119	case EXTERNAL_LOOP_BACK:
2120		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2121			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2122				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2123				link_no);
2124		} else {
2125			/* Trunk is configured, but link is not in this trunk */
2126			if (phba->sli4_hba.conf_trunk) {
2127				rc = -ELNRNG;
2128				goto loopback_mode_exit;
2129			}
2130
2131			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2132						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2133						link_no);
2134		}
2135
2136		if (!rc) {
2137			/* Set the need disable bit */
2138			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2139		}
2140
2141		break;
2142	default:
2143		rc = -EINVAL;
2144		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2145				"3141 Loopback mode:x%x not supported\n",
2146				link_flags);
2147		goto loopback_mode_exit;
2148	}
2149
2150	if (!rc) {
2151		/* wait for the link attention interrupt */
2152		msleep(100);
2153		i = 0;
2154		while (phba->link_state < LPFC_LINK_UP) {
2155			if (i++ > timeout) {
2156				rc = -ETIMEDOUT;
2157				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2158					"3137 Timeout waiting for link up "
2159					"in loopback mode, timeout:%d ms\n",
2160					timeout * 10);
2161				break;
2162			}
2163			msleep(10);
2164		}
2165	}
2166
2167	/* port resource registration setup for loopback diagnostic */
2168	if (!rc) {
2169		/* set up a none zero myDID for loopback test */
2170		phba->pport->fc_myDID = 1;
2171		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2172	} else
2173		goto loopback_mode_exit;
2174
2175	if (!rc) {
2176		/* wait for the port ready */
2177		msleep(100);
2178		i = 0;
2179		while (phba->link_state != LPFC_HBA_READY) {
2180			if (i++ > timeout) {
2181				rc = -ETIMEDOUT;
2182				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2183					"3133 Timeout waiting for port "
2184					"loopback mode ready, timeout:%d ms\n",
2185					timeout * 10);
2186				break;
2187			}
2188			msleep(10);
2189		}
2190	}
2191
2192loopback_mode_exit:
2193	/* clear loopback diagnostic mode */
2194	if (rc) {
2195		spin_lock_irq(&phba->hbalock);
2196		phba->link_flag &= ~LS_LOOPBACK_MODE;
2197		spin_unlock_irq(&phba->hbalock);
2198	}
2199	lpfc_bsg_diag_mode_exit(phba);
2200
2201job_done:
2202	/* make error code available to userspace */
2203	bsg_reply->result = rc;
2204	/* complete the job back to userspace if no error */
2205	if (rc == 0)
2206		bsg_job_done(job, bsg_reply->result,
2207			       bsg_reply->reply_payload_rcv_len);
2208	return rc;
2209}
2210
2211/**
2212 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2213 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2214 *
2215 * This function is responsible for responding to check and dispatch bsg diag
2216 * command from the user to proper driver action routines.
2217 */
2218static int
2219lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2220{
2221	struct Scsi_Host *shost;
2222	struct lpfc_vport *vport;
2223	struct lpfc_hba *phba;
2224	int rc;
2225
2226	shost = fc_bsg_to_shost(job);
2227	if (!shost)
2228		return -ENODEV;
2229	vport = shost_priv(shost);
2230	if (!vport)
2231		return -ENODEV;
2232	phba = vport->phba;
2233	if (!phba)
2234		return -ENODEV;
2235
2236	if (phba->sli_rev < LPFC_SLI_REV4)
2237		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2238	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2239		 LPFC_SLI_INTF_IF_TYPE_2)
2240		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2241	else
2242		rc = -ENODEV;
2243
2244	return rc;
2245}
2246
2247/**
2248 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2249 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2250 *
2251 * This function is responsible for responding to check and dispatch bsg diag
2252 * command from the user to proper driver action routines.
2253 */
2254static int
2255lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2256{
2257	struct fc_bsg_request *bsg_request = job->request;
2258	struct fc_bsg_reply *bsg_reply = job->reply;
2259	struct Scsi_Host *shost;
2260	struct lpfc_vport *vport;
2261	struct lpfc_hba *phba;
2262	struct diag_mode_set *loopback_mode_end_cmd;
2263	uint32_t timeout;
2264	int rc, i;
2265
2266	shost = fc_bsg_to_shost(job);
2267	if (!shost)
2268		return -ENODEV;
2269	vport = shost_priv(shost);
2270	if (!vport)
2271		return -ENODEV;
2272	phba = vport->phba;
2273	if (!phba)
2274		return -ENODEV;
2275
2276	if (phba->sli_rev < LPFC_SLI_REV4)
2277		return -ENODEV;
2278	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2279	    LPFC_SLI_INTF_IF_TYPE_2)
2280		return -ENODEV;
2281
2282	/* clear loopback diagnostic mode */
2283	spin_lock_irq(&phba->hbalock);
2284	phba->link_flag &= ~LS_LOOPBACK_MODE;
2285	spin_unlock_irq(&phba->hbalock);
2286	loopback_mode_end_cmd = (struct diag_mode_set *)
2287			bsg_request->rqst_data.h_vendor.vendor_cmd;
2288	timeout = loopback_mode_end_cmd->timeout * 100;
2289
2290	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2291	if (rc) {
2292		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2293				"3139 Failed to bring link to diagnostic "
2294				"state, rc:x%x\n", rc);
2295		goto loopback_mode_end_exit;
2296	}
2297
2298	/* wait for link down before proceeding */
2299	i = 0;
2300	while (phba->link_state != LPFC_LINK_DOWN) {
2301		if (i++ > timeout) {
2302			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2303					"3140 Timeout waiting for link to "
2304					"diagnostic mode_end, timeout:%d ms\n",
2305					timeout * 10);
2306			/* there is nothing much we can do here */
2307			break;
2308		}
2309		msleep(10);
2310	}
2311
2312	/* reset port resource registrations */
2313	rc = lpfc_selective_reset(phba);
2314	phba->pport->fc_myDID = 0;
2315
2316loopback_mode_end_exit:
2317	/* make return code available to userspace */
2318	bsg_reply->result = rc;
2319	/* complete the job back to userspace if no error */
2320	if (rc == 0)
2321		bsg_job_done(job, bsg_reply->result,
2322			       bsg_reply->reply_payload_rcv_len);
2323	return rc;
2324}
2325
2326/**
2327 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2328 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2329 *
2330 * This function is to perform SLI4 diag link test request from the user
2331 * applicaiton.
2332 */
2333static int
2334lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2335{
2336	struct fc_bsg_request *bsg_request = job->request;
2337	struct fc_bsg_reply *bsg_reply = job->reply;
2338	struct Scsi_Host *shost;
2339	struct lpfc_vport *vport;
2340	struct lpfc_hba *phba;
2341	LPFC_MBOXQ_t *pmboxq;
2342	struct sli4_link_diag *link_diag_test_cmd;
2343	uint32_t req_len, alloc_len;
2344	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2345	union lpfc_sli4_cfg_shdr *shdr;
2346	uint32_t shdr_status, shdr_add_status;
2347	struct diag_status *diag_status_reply;
2348	int mbxstatus, rc = -ENODEV, rc1 = 0;
2349
2350	shost = fc_bsg_to_shost(job);
2351	if (!shost)
2352		goto job_error;
2353
2354	vport = shost_priv(shost);
2355	if (!vport)
2356		goto job_error;
2357
2358	phba = vport->phba;
2359	if (!phba)
2360		goto job_error;
2361
2362
2363	if (phba->sli_rev < LPFC_SLI_REV4)
2364		goto job_error;
2365
2366	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2367	    LPFC_SLI_INTF_IF_TYPE_2)
2368		goto job_error;
2369
2370	if (job->request_len < sizeof(struct fc_bsg_request) +
2371	    sizeof(struct sli4_link_diag)) {
2372		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2373				"3013 Received LINK DIAG TEST request "
2374				" size:%d below the minimum size:%d\n",
2375				job->request_len,
2376				(int)(sizeof(struct fc_bsg_request) +
2377				sizeof(struct sli4_link_diag)));
2378		rc = -EINVAL;
2379		goto job_error;
2380	}
2381
2382	rc = lpfc_bsg_diag_mode_enter(phba);
2383	if (rc)
2384		goto job_error;
2385
2386	link_diag_test_cmd = (struct sli4_link_diag *)
2387			 bsg_request->rqst_data.h_vendor.vendor_cmd;
2388
2389	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2390
2391	if (rc)
2392		goto job_error;
2393
2394	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2395	if (!pmboxq) {
2396		rc = -ENOMEM;
2397		goto link_diag_test_exit;
2398	}
2399
2400	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2401		   sizeof(struct lpfc_sli4_cfg_mhdr));
2402	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2403				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2404				     req_len, LPFC_SLI4_MBX_EMBED);
2405	if (alloc_len != req_len) {
2406		rc = -ENOMEM;
2407		goto link_diag_test_exit;
2408	}
2409
2410	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2411	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2412	       phba->sli4_hba.lnk_info.lnk_no);
2413	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2414	       phba->sli4_hba.lnk_info.lnk_tp);
2415	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2416	       link_diag_test_cmd->test_id);
2417	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2418	       link_diag_test_cmd->loops);
2419	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2420	       link_diag_test_cmd->test_version);
2421	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2422	       link_diag_test_cmd->error_action);
2423
2424	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2425
2426	shdr = (union lpfc_sli4_cfg_shdr *)
2427		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2428	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2429	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2430	if (shdr_status || shdr_add_status || mbxstatus) {
2431		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2432				"3010 Run link diag test mailbox failed with "
2433				"mbx_status x%x status x%x, add_status x%x\n",
2434				mbxstatus, shdr_status, shdr_add_status);
2435	}
2436
2437	diag_status_reply = (struct diag_status *)
2438			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
2439
2440	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2441		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2442				"3012 Received Run link diag test reply "
2443				"below minimum size (%d): reply_len:%d\n",
2444				(int)(sizeof(*bsg_reply) +
2445				sizeof(*diag_status_reply)),
2446				job->reply_len);
2447		rc = -EINVAL;
2448		goto job_error;
2449	}
2450
2451	diag_status_reply->mbox_status = mbxstatus;
2452	diag_status_reply->shdr_status = shdr_status;
2453	diag_status_reply->shdr_add_status = shdr_add_status;
2454
2455link_diag_test_exit:
2456	rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2457
2458	if (pmboxq)
2459		mempool_free(pmboxq, phba->mbox_mem_pool);
2460
2461	lpfc_bsg_diag_mode_exit(phba);
2462
2463job_error:
2464	/* make error code available to userspace */
2465	if (rc1 && !rc)
2466		rc = rc1;
2467	bsg_reply->result = rc;
2468	/* complete the job back to userspace if no error */
2469	if (rc == 0)
2470		bsg_job_done(job, bsg_reply->result,
2471			       bsg_reply->reply_payload_rcv_len);
2472	return rc;
2473}
2474
2475/**
2476 * lpfcdiag_loop_self_reg - obtains a remote port login id
2477 * @phba: Pointer to HBA context object
2478 * @rpi: Pointer to a remote port login id
2479 *
2480 * This function obtains a remote port login id so the diag loopback test
2481 * can send and receive its own unsolicited CT command.
2482 **/
2483static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2484{
2485	LPFC_MBOXQ_t *mbox;
2486	struct lpfc_dmabuf *dmabuff;
2487	int status;
2488
2489	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2490	if (!mbox)
2491		return -ENOMEM;
2492
2493	if (phba->sli_rev < LPFC_SLI_REV4)
2494		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2495				(uint8_t *)&phba->pport->fc_sparam,
2496				mbox, *rpi);
2497	else {
2498		*rpi = lpfc_sli4_alloc_rpi(phba);
2499		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2500			mempool_free(mbox, phba->mbox_mem_pool);
2501			return -EBUSY;
2502		}
2503		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2504				phba->pport->fc_myDID,
2505				(uint8_t *)&phba->pport->fc_sparam,
2506				mbox, *rpi);
2507	}
2508
2509	if (status) {
2510		mempool_free(mbox, phba->mbox_mem_pool);
2511		if (phba->sli_rev == LPFC_SLI_REV4)
2512			lpfc_sli4_free_rpi(phba, *rpi);
2513		return -ENOMEM;
2514	}
2515
2516	dmabuff = mbox->ctx_buf;
2517	mbox->ctx_buf = NULL;
2518	mbox->ctx_ndlp = NULL;
2519	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2520
2521	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2522		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2523		kfree(dmabuff);
2524		if (status != MBX_TIMEOUT)
2525			mempool_free(mbox, phba->mbox_mem_pool);
2526		if (phba->sli_rev == LPFC_SLI_REV4)
2527			lpfc_sli4_free_rpi(phba, *rpi);
2528		return -ENODEV;
2529	}
2530
2531	if (phba->sli_rev < LPFC_SLI_REV4)
2532		*rpi = mbox->u.mb.un.varWords[0];
2533
2534	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2535	kfree(dmabuff);
2536	mempool_free(mbox, phba->mbox_mem_pool);
2537	return 0;
2538}
2539
2540/**
2541 * lpfcdiag_loop_self_unreg - unregs from the rpi
2542 * @phba: Pointer to HBA context object
2543 * @rpi: Remote port login id
2544 *
2545 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2546 **/
2547static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2548{
2549	LPFC_MBOXQ_t *mbox;
2550	int status;
2551
2552	/* Allocate mboxq structure */
2553	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2554	if (mbox == NULL)
2555		return -ENOMEM;
2556
2557	if (phba->sli_rev < LPFC_SLI_REV4)
2558		lpfc_unreg_login(phba, 0, rpi, mbox);
2559	else
2560		lpfc_unreg_login(phba, phba->pport->vpi,
2561				 phba->sli4_hba.rpi_ids[rpi], mbox);
2562
2563	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2564
2565	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2566		if (status != MBX_TIMEOUT)
2567			mempool_free(mbox, phba->mbox_mem_pool);
2568		return -EIO;
2569	}
2570	mempool_free(mbox, phba->mbox_mem_pool);
2571	if (phba->sli_rev == LPFC_SLI_REV4)
2572		lpfc_sli4_free_rpi(phba, rpi);
2573	return 0;
2574}
2575
2576/**
2577 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2578 * @phba: Pointer to HBA context object
2579 * @rpi: Remote port login id
2580 * @txxri: Pointer to transmit exchange id
2581 * @rxxri: Pointer to response exchabge id
2582 *
2583 * This function obtains the transmit and receive ids required to send
2584 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2585 * flags are used to the unsolicited response handler is able to process
2586 * the ct command sent on the same port.
2587 **/
2588static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2589			 uint16_t *txxri, uint16_t * rxxri)
2590{
2591	struct lpfc_bsg_event *evt;
2592	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2593	struct lpfc_dmabuf *dmabuf;
2594	struct ulp_bde64 *bpl = NULL;
2595	struct lpfc_sli_ct_request *ctreq = NULL;
2596	int ret_val = 0;
2597	int time_left;
2598	int iocb_stat = IOCB_SUCCESS;
2599	unsigned long flags;
2600	u32 status;
2601
2602	*txxri = 0;
2603	*rxxri = 0;
2604	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2605				SLI_CT_ELX_LOOPBACK);
2606	if (!evt)
2607		return -ENOMEM;
2608
2609	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2610	list_add(&evt->node, &phba->ct_ev_waiters);
2611	lpfc_bsg_event_ref(evt);
2612	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2613
2614	cmdiocbq = lpfc_sli_get_iocbq(phba);
2615	rspiocbq = lpfc_sli_get_iocbq(phba);
2616
2617	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2618	if (dmabuf) {
2619		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2620		if (dmabuf->virt) {
2621			INIT_LIST_HEAD(&dmabuf->list);
2622			bpl = (struct ulp_bde64 *) dmabuf->virt;
2623			memset(bpl, 0, sizeof(*bpl));
2624			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2625			bpl->addrHigh =
2626				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2627					sizeof(*bpl)));
2628			bpl->addrLow =
2629				le32_to_cpu(putPaddrLow(dmabuf->phys +
2630					sizeof(*bpl)));
2631			bpl->tus.f.bdeFlags = 0;
2632			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2633			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2634		}
2635	}
2636
2637	if (cmdiocbq == NULL || rspiocbq == NULL ||
2638	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2639		dmabuf->virt == NULL) {
2640		ret_val = -ENOMEM;
2641		goto err_get_xri_exit;
2642	}
2643
2644	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2645
2646	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2647	ctreq->RevisionId.bits.InId = 0;
2648	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2649	ctreq->FsSubType = 0;
2650	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2651	ctreq->CommandResponse.bits.Size = 0;
2652
2653	cmdiocbq->bpl_dmabuf = dmabuf;
2654	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
2655	cmdiocbq->vport = phba->pport;
2656	cmdiocbq->cmd_cmpl = NULL;
2657
2658	lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
2659				 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
2660
2661	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2662					     rspiocbq, (phba->fc_ratov * 2)
2663					     + LPFC_DRVR_TIMEOUT);
2664
2665	status = get_job_ulpstatus(phba, rspiocbq);
2666	if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
2667		ret_val = -EIO;
2668		goto err_get_xri_exit;
2669	}
2670	*txxri = get_job_ulpcontext(phba, rspiocbq);
2671
2672	evt->waiting = 1;
2673	evt->wait_time_stamp = jiffies;
2674	time_left = wait_event_interruptible_timeout(
2675		evt->wq, !list_empty(&evt->events_to_see),
2676		msecs_to_jiffies(1000 *
2677			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2678	if (list_empty(&evt->events_to_see))
2679		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2680	else {
2681		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2682		list_move(evt->events_to_see.prev, &evt->events_to_get);
2683		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2684		*rxxri = (list_entry(evt->events_to_get.prev,
2685				     typeof(struct event_data),
2686				     node))->immed_dat;
2687	}
2688	evt->waiting = 0;
2689
2690err_get_xri_exit:
2691	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2692	lpfc_bsg_event_unref(evt); /* release ref */
2693	lpfc_bsg_event_unref(evt); /* delete */
2694	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2695
2696	if (dmabuf) {
2697		if (dmabuf->virt)
2698			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2699		kfree(dmabuf);
2700	}
2701
2702	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2703		lpfc_sli_release_iocbq(phba, cmdiocbq);
2704	if (rspiocbq)
2705		lpfc_sli_release_iocbq(phba, rspiocbq);
2706	return ret_val;
2707}
2708
2709/**
2710 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2711 * @phba: Pointer to HBA context object
2712 *
2713 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2714 * returns the pointer to the buffer.
2715 **/
2716static struct lpfc_dmabuf *
2717lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2718{
2719	struct lpfc_dmabuf *dmabuf;
2720	struct pci_dev *pcidev = phba->pcidev;
2721
2722	/* allocate dma buffer struct */
2723	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2724	if (!dmabuf)
2725		return NULL;
2726
2727	INIT_LIST_HEAD(&dmabuf->list);
2728
2729	/* now, allocate dma buffer */
2730	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2731					  &(dmabuf->phys), GFP_KERNEL);
2732
2733	if (!dmabuf->virt) {
2734		kfree(dmabuf);
2735		return NULL;
2736	}
2737
2738	return dmabuf;
2739}
2740
2741/**
2742 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2743 * @phba: Pointer to HBA context object.
2744 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2745 *
2746 * This routine just simply frees a dma buffer and its associated buffer
2747 * descriptor referred by @dmabuf.
2748 **/
2749static void
2750lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2751{
2752	struct pci_dev *pcidev = phba->pcidev;
2753
2754	if (!dmabuf)
2755		return;
2756
2757	if (dmabuf->virt)
2758		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2759				  dmabuf->virt, dmabuf->phys);
2760	kfree(dmabuf);
2761	return;
2762}
2763
2764/**
2765 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2766 * @phba: Pointer to HBA context object.
2767 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2768 *
2769 * This routine just simply frees all dma buffers and their associated buffer
2770 * descriptors referred by @dmabuf_list.
2771 **/
2772static void
2773lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2774			    struct list_head *dmabuf_list)
2775{
2776	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2777
2778	if (list_empty(dmabuf_list))
2779		return;
2780
2781	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2782		list_del_init(&dmabuf->list);
2783		lpfc_bsg_dma_page_free(phba, dmabuf);
2784	}
2785	return;
2786}
2787
2788/**
2789 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2790 * @phba: Pointer to HBA context object
2791 * @bpl: Pointer to 64 bit bde structure
2792 * @size: Number of bytes to process
2793 * @nocopydata: Flag to copy user data into the allocated buffer
2794 *
2795 * This function allocates page size buffers and populates an lpfc_dmabufext.
2796 * If allowed the user data pointed to with indataptr is copied into the kernel
2797 * memory. The chained list of page size buffers is returned.
2798 **/
2799static struct lpfc_dmabufext *
2800diag_cmd_data_alloc(struct lpfc_hba *phba,
2801		   struct ulp_bde64 *bpl, uint32_t size,
2802		   int nocopydata)
2803{
2804	struct lpfc_dmabufext *mlist = NULL;
2805	struct lpfc_dmabufext *dmp;
2806	int cnt, offset = 0, i = 0;
2807	struct pci_dev *pcidev;
2808
2809	pcidev = phba->pcidev;
2810
2811	while (size) {
2812		/* We get chunks of 4K */
2813		if (size > BUF_SZ_4K)
2814			cnt = BUF_SZ_4K;
2815		else
2816			cnt = size;
2817
2818		/* allocate struct lpfc_dmabufext buffer header */
2819		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2820		if (!dmp)
2821			goto out;
2822
2823		INIT_LIST_HEAD(&dmp->dma.list);
2824
2825		/* Queue it to a linked list */
2826		if (mlist)
2827			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2828		else
2829			mlist = dmp;
2830
2831		/* allocate buffer */
2832		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2833						   cnt,
2834						   &(dmp->dma.phys),
2835						   GFP_KERNEL);
2836
2837		if (!dmp->dma.virt)
2838			goto out;
2839
2840		dmp->size = cnt;
2841
2842		if (nocopydata) {
2843			bpl->tus.f.bdeFlags = 0;
2844		} else {
2845			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2846			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2847		}
2848
2849		/* build buffer ptr list for IOCB */
2850		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2851		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2852		bpl->tus.f.bdeSize = (ushort) cnt;
2853		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2854		bpl++;
2855
2856		i++;
2857		offset += cnt;
2858		size -= cnt;
2859	}
2860
2861	if (mlist) {
2862		mlist->flag = i;
2863		return mlist;
2864	}
2865out:
2866	diag_cmd_data_free(phba, mlist);
2867	return NULL;
2868}
2869
2870/**
2871 * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2872 * @phba: Pointer to HBA context object
2873 * @rxxri: Receive exchange id
2874 * @len: Number of data bytes
2875 *
2876 * This function allocates and posts a data buffer of sufficient size to receive
2877 * an unsolicited CT command.
2878 **/
2879static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2880					  size_t len)
2881{
2882	struct lpfc_sli_ring *pring;
2883	struct lpfc_iocbq *cmdiocbq;
2884	IOCB_t *cmd = NULL;
2885	struct list_head head, *curr, *next;
2886	struct lpfc_dmabuf *rxbmp;
2887	struct lpfc_dmabuf *dmp;
2888	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2889	struct ulp_bde64 *rxbpl = NULL;
2890	uint32_t num_bde;
2891	struct lpfc_dmabufext *rxbuffer = NULL;
2892	int ret_val = 0;
2893	int iocb_stat;
2894	int i = 0;
2895
2896	pring = lpfc_phba_elsring(phba);
2897
2898	cmdiocbq = lpfc_sli_get_iocbq(phba);
2899	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2900	if (rxbmp != NULL) {
2901		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2902		if (rxbmp->virt) {
2903			INIT_LIST_HEAD(&rxbmp->list);
2904			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2905			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2906		}
2907	}
2908
2909	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2910		ret_val = -ENOMEM;
2911		goto err_post_rxbufs_exit;
2912	}
2913
2914	/* Queue buffers for the receive exchange */
2915	num_bde = (uint32_t)rxbuffer->flag;
2916	dmp = &rxbuffer->dma;
2917	cmd = &cmdiocbq->iocb;
2918	i = 0;
2919
2920	INIT_LIST_HEAD(&head);
2921	list_add_tail(&head, &dmp->list);
2922	list_for_each_safe(curr, next, &head) {
2923		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2924		list_del(curr);
2925
2926		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2927			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2928			cmd->un.quexri64cx.buff.bde.addrHigh =
2929				putPaddrHigh(mp[i]->phys);
2930			cmd->un.quexri64cx.buff.bde.addrLow =
2931				putPaddrLow(mp[i]->phys);
2932			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2933				((struct lpfc_dmabufext *)mp[i])->size;
2934			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2935			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2936			cmd->ulpPU = 0;
2937			cmd->ulpLe = 1;
2938			cmd->ulpBdeCount = 1;
2939			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2940
2941		} else {
2942			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2943			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2944			cmd->un.cont64[i].tus.f.bdeSize =
2945				((struct lpfc_dmabufext *)mp[i])->size;
2946			cmd->ulpBdeCount = ++i;
2947
2948			if ((--num_bde > 0) && (i < 2))
2949				continue;
2950
2951			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2952			cmd->ulpLe = 1;
2953		}
2954
2955		cmd->ulpClass = CLASS3;
2956		cmd->ulpContext = rxxri;
2957
2958		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2959						0);
2960		if (iocb_stat == IOCB_ERROR) {
2961			diag_cmd_data_free(phba,
2962				(struct lpfc_dmabufext *)mp[0]);
2963			if (mp[1])
2964				diag_cmd_data_free(phba,
2965					  (struct lpfc_dmabufext *)mp[1]);
2966			dmp = list_entry(next, struct lpfc_dmabuf, list);
2967			ret_val = -EIO;
2968			goto err_post_rxbufs_exit;
2969		}
2970
2971		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2972		if (mp[1]) {
2973			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2974			mp[1] = NULL;
2975		}
2976
2977		/* The iocb was freed by lpfc_sli_issue_iocb */
2978		cmdiocbq = lpfc_sli_get_iocbq(phba);
2979		if (!cmdiocbq) {
2980			dmp = list_entry(next, struct lpfc_dmabuf, list);
2981			ret_val = -EIO;
2982			goto err_post_rxbufs_exit;
2983		}
2984		cmd = &cmdiocbq->iocb;
2985		i = 0;
2986	}
2987	list_del(&head);
2988
2989err_post_rxbufs_exit:
2990
2991	if (rxbmp) {
2992		if (rxbmp->virt)
2993			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2994		kfree(rxbmp);
2995	}
2996
2997	if (cmdiocbq)
2998		lpfc_sli_release_iocbq(phba, cmdiocbq);
2999	return ret_val;
3000}
3001
3002/**
3003 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3004 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3005 *
3006 * This function receives a user data buffer to be transmitted and received on
3007 * the same port, the link must be up and in loopback mode prior
3008 * to being called.
3009 * 1. A kernel buffer is allocated to copy the user data into.
3010 * 2. The port registers with "itself".
3011 * 3. The transmit and receive exchange ids are obtained.
3012 * 4. The receive exchange id is posted.
3013 * 5. A new els loopback event is created.
3014 * 6. The command and response iocbs are allocated.
3015 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3016 *
3017 * This function is meant to be called n times while the port is in loopback
3018 * so it is the apps responsibility to issue a reset to take the port out
3019 * of loopback mode.
3020 **/
3021static int
3022lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3023{
3024	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3025	struct fc_bsg_reply *bsg_reply = job->reply;
3026	struct lpfc_hba *phba = vport->phba;
3027	struct lpfc_bsg_event *evt;
3028	struct event_data *evdat;
3029	struct lpfc_sli *psli = &phba->sli;
3030	uint32_t size;
3031	uint32_t full_size;
3032	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3033	uint16_t rpi = 0;
3034	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3035	union lpfc_wqe128 *cmdwqe, *rspwqe;
3036	struct lpfc_sli_ct_request *ctreq;
3037	struct lpfc_dmabuf *txbmp;
3038	struct ulp_bde64 *txbpl = NULL;
3039	struct lpfc_dmabufext *txbuffer = NULL;
3040	struct list_head head;
3041	struct lpfc_dmabuf  *curr;
3042	uint16_t txxri = 0, rxxri;
3043	uint32_t num_bde;
3044	uint8_t *ptr = NULL, *rx_databuf = NULL;
3045	int rc = 0;
3046	int time_left;
3047	int iocb_stat = IOCB_SUCCESS;
3048	unsigned long flags;
3049	void *dataout = NULL;
3050	uint32_t total_mem;
3051
3052	/* in case no data is returned return just the return code */
3053	bsg_reply->reply_payload_rcv_len = 0;
3054
3055	if (job->request_len <
3056	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3057		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3058				"2739 Received DIAG TEST request below minimum "
3059				"size\n");
3060		rc = -EINVAL;
3061		goto loopback_test_exit;
3062	}
3063
3064	if (job->request_payload.payload_len !=
3065		job->reply_payload.payload_len) {
3066		rc = -EINVAL;
3067		goto loopback_test_exit;
3068	}
3069
3070	if ((phba->link_state == LPFC_HBA_ERROR) ||
3071	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3072	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3073		rc = -EACCES;
3074		goto loopback_test_exit;
3075	}
3076
3077	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3078		rc = -EACCES;
3079		goto loopback_test_exit;
3080	}
3081
3082	size = job->request_payload.payload_len;
3083	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3084
3085	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3086		rc = -ERANGE;
3087		goto loopback_test_exit;
3088	}
3089
3090	if (full_size >= BUF_SZ_4K) {
3091		/*
3092		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3093		 * then we allocate 64k and re-use that buffer over and over to
3094		 * xfer the whole block. This is because Linux kernel has a
3095		 * problem allocating more than 120k of kernel space memory. Saw
3096		 * problem with GET_FCPTARGETMAPPING...
3097		 */
3098		if (size <= (64 * 1024))
3099			total_mem = full_size;
3100		else
3101			total_mem = 64 * 1024;
3102	} else
3103		/* Allocate memory for ioctl data */
3104		total_mem = BUF_SZ_4K;
3105
3106	dataout = kmalloc(total_mem, GFP_KERNEL);
3107	if (dataout == NULL) {
3108		rc = -ENOMEM;
3109		goto loopback_test_exit;
3110	}
3111
3112	ptr = dataout;
3113	ptr += ELX_LOOPBACK_HEADER_SZ;
3114	sg_copy_to_buffer(job->request_payload.sg_list,
3115				job->request_payload.sg_cnt,
3116				ptr, size);
3117	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3118	if (rc)
3119		goto loopback_test_exit;
3120
3121	if (phba->sli_rev < LPFC_SLI_REV4) {
3122		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3123		if (rc) {
3124			lpfcdiag_loop_self_unreg(phba, rpi);
3125			goto loopback_test_exit;
3126		}
3127
3128		rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
3129		if (rc) {
3130			lpfcdiag_loop_self_unreg(phba, rpi);
3131			goto loopback_test_exit;
3132		}
3133	}
3134	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3135				SLI_CT_ELX_LOOPBACK);
3136	if (!evt) {
3137		lpfcdiag_loop_self_unreg(phba, rpi);
3138		rc = -ENOMEM;
3139		goto loopback_test_exit;
3140	}
3141
3142	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3143	list_add(&evt->node, &phba->ct_ev_waiters);
3144	lpfc_bsg_event_ref(evt);
3145	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3146
3147	cmdiocbq = lpfc_sli_get_iocbq(phba);
3148	if (phba->sli_rev < LPFC_SLI_REV4)
3149		rspiocbq = lpfc_sli_get_iocbq(phba);
3150	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3151
3152	if (txbmp) {
3153		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3154		if (txbmp->virt) {
3155			INIT_LIST_HEAD(&txbmp->list);
3156			txbpl = (struct ulp_bde64 *) txbmp->virt;
3157			txbuffer = diag_cmd_data_alloc(phba,
3158							txbpl, full_size, 0);
3159		}
3160	}
3161
3162	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3163		rc = -ENOMEM;
3164		goto err_loopback_test_exit;
3165	}
3166	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3167		rc = -ENOMEM;
3168		goto err_loopback_test_exit;
3169	}
3170
3171	cmdwqe = &cmdiocbq->wqe;
3172	memset(cmdwqe, 0, sizeof(*cmdwqe));
3173	if (phba->sli_rev < LPFC_SLI_REV4) {
3174		rspwqe = &rspiocbq->wqe;
3175		memset(rspwqe, 0, sizeof(*rspwqe));
3176	}
3177
3178	INIT_LIST_HEAD(&head);
3179	list_add_tail(&head, &txbuffer->dma.list);
3180	list_for_each_entry(curr, &head, list) {
3181		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3182		if (current_offset == 0) {
3183			ctreq = curr->virt;
3184			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3185			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3186			ctreq->RevisionId.bits.InId = 0;
3187			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3188			ctreq->FsSubType = 0;
3189			ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA);
3190			ctreq->CommandResponse.bits.Size   = cpu_to_be16(size);
3191			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3192		} else
3193			segment_offset = 0;
3194
3195		BUG_ON(segment_offset >= segment_len);
3196		memcpy(curr->virt + segment_offset,
3197			ptr + current_offset,
3198			segment_len - segment_offset);
3199
3200		current_offset += segment_len - segment_offset;
3201		BUG_ON(current_offset > size);
3202	}
3203	list_del(&head);
3204
3205	/* Build the XMIT_SEQUENCE iocb */
3206	num_bde = (uint32_t)txbuffer->flag;
3207
3208	cmdiocbq->num_bdes = num_bde;
3209	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
3210	cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
3211	cmdiocbq->vport = phba->pport;
3212	cmdiocbq->cmd_cmpl = NULL;
3213	cmdiocbq->bpl_dmabuf = txbmp;
3214
3215	if (phba->sli_rev < LPFC_SLI_REV4) {
3216		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
3217					 num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
3218					 CMD_XMIT_SEQUENCE64_CX);
3219
3220	} else {
3221		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
3222					 phba->sli4_hba.rpi_ids[rpi], 0xffff,
3223					 full_size, FC_RCTL_DD_UNSOL_CTL, 1,
3224					 CMD_XMIT_SEQUENCE64_WQE);
3225		cmdiocbq->sli4_xritag = NO_XRI;
3226	}
3227
3228	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3229					     rspiocbq, (phba->fc_ratov * 2) +
3230					     LPFC_DRVR_TIMEOUT);
3231	if (iocb_stat != IOCB_SUCCESS ||
3232	    (phba->sli_rev < LPFC_SLI_REV4 &&
3233	     (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
3234		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3235				"3126 Failed loopback test issue iocb: "
3236				"iocb_stat:x%x\n", iocb_stat);
3237		rc = -EIO;
3238		goto err_loopback_test_exit;
3239	}
3240
3241	evt->waiting = 1;
3242	time_left = wait_event_interruptible_timeout(
3243		evt->wq, !list_empty(&evt->events_to_see),
3244		msecs_to_jiffies(1000 *
3245			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3246	evt->waiting = 0;
3247	if (list_empty(&evt->events_to_see)) {
3248		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3249		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3250				"3125 Not receiving unsolicited event, "
3251				"rc:x%x\n", rc);
3252	} else {
3253		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3254		list_move(evt->events_to_see.prev, &evt->events_to_get);
3255		evdat = list_entry(evt->events_to_get.prev,
3256				   typeof(*evdat), node);
3257		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3258		rx_databuf = evdat->data;
3259		if (evdat->len != full_size) {
3260			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3261				"1603 Loopback test did not receive expected "
3262				"data length. actual length 0x%x expected "
3263				"length 0x%x\n",
3264				evdat->len, full_size);
3265			rc = -EIO;
3266		} else if (rx_databuf == NULL)
3267			rc = -EIO;
3268		else {
3269			rc = IOCB_SUCCESS;
3270			/* skip over elx loopback header */
3271			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3272			bsg_reply->reply_payload_rcv_len =
3273				sg_copy_from_buffer(job->reply_payload.sg_list,
3274						    job->reply_payload.sg_cnt,
3275						    rx_databuf, size);
3276			bsg_reply->reply_payload_rcv_len = size;
3277		}
3278	}
3279
3280err_loopback_test_exit:
3281	lpfcdiag_loop_self_unreg(phba, rpi);
3282
3283	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3284	lpfc_bsg_event_unref(evt); /* release ref */
3285	lpfc_bsg_event_unref(evt); /* delete */
3286	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3287
3288	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3289		lpfc_sli_release_iocbq(phba, cmdiocbq);
3290
3291	if (rspiocbq != NULL)
3292		lpfc_sli_release_iocbq(phba, rspiocbq);
3293
3294	if (txbmp != NULL) {
3295		if (txbpl != NULL) {
3296			if (txbuffer != NULL)
3297				diag_cmd_data_free(phba, txbuffer);
3298			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3299		}
3300		kfree(txbmp);
3301	}
3302
3303loopback_test_exit:
3304	kfree(dataout);
3305	/* make error code available to userspace */
3306	bsg_reply->result = rc;
3307	job->dd_data = NULL;
3308	/* complete the job back to userspace if no error */
3309	if (rc == IOCB_SUCCESS)
3310		bsg_job_done(job, bsg_reply->result,
3311			       bsg_reply->reply_payload_rcv_len);
3312	return rc;
3313}
3314
3315/**
3316 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3317 * @job: GET_DFC_REV fc_bsg_job
3318 **/
3319static int
3320lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3321{
3322	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3323	struct fc_bsg_reply *bsg_reply = job->reply;
3324	struct lpfc_hba *phba = vport->phba;
3325	struct get_mgmt_rev_reply *event_reply;
3326	int rc = 0;
3327
3328	if (job->request_len <
3329	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3330		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3331				"2740 Received GET_DFC_REV request below "
3332				"minimum size\n");
3333		rc = -EINVAL;
3334		goto job_error;
3335	}
3336
3337	event_reply = (struct get_mgmt_rev_reply *)
3338		bsg_reply->reply_data.vendor_reply.vendor_rsp;
3339
3340	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3341		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3342				"2741 Received GET_DFC_REV reply below "
3343				"minimum size\n");
3344		rc = -EINVAL;
3345		goto job_error;
3346	}
3347
3348	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3349	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3350job_error:
3351	bsg_reply->result = rc;
3352	if (rc == 0)
3353		bsg_job_done(job, bsg_reply->result,
3354			       bsg_reply->reply_payload_rcv_len);
3355	return rc;
3356}
3357
3358/**
3359 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3360 * @phba: Pointer to HBA context object.
3361 * @pmboxq: Pointer to mailbox command.
3362 *
3363 * This is completion handler function for mailbox commands issued from
3364 * lpfc_bsg_issue_mbox function. This function is called by the
3365 * mailbox event handler function with no lock held. This function
3366 * will wake up thread waiting on the wait queue pointed by dd_data
3367 * of the mailbox.
3368 **/
3369static void
3370lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3371{
3372	struct bsg_job_data *dd_data;
3373	struct fc_bsg_reply *bsg_reply;
3374	struct bsg_job *job;
3375	uint32_t size;
3376	unsigned long flags;
3377	uint8_t *pmb, *pmb_buf;
3378
3379	dd_data = pmboxq->ctx_u.dd_data;
3380
3381	/*
3382	 * The outgoing buffer is readily referred from the dma buffer,
3383	 * just need to get header part from mailboxq structure.
3384	 */
3385	pmb = (uint8_t *)&pmboxq->u.mb;
3386	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3387	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3388
3389	/* Determine if job has been aborted */
3390
3391	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3392	job = dd_data->set_job;
3393	if (job) {
3394		/* Prevent timeout handling from trying to abort job  */
3395		job->dd_data = NULL;
3396	}
3397	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3398
3399	/* Copy the mailbox data to the job if it is still active */
3400
3401	if (job) {
3402		bsg_reply = job->reply;
3403		size = job->reply_payload.payload_len;
3404		bsg_reply->reply_payload_rcv_len =
3405			sg_copy_from_buffer(job->reply_payload.sg_list,
3406					    job->reply_payload.sg_cnt,
3407					    pmb_buf, size);
3408	}
3409
3410	dd_data->set_job = NULL;
3411	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3412	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3413	kfree(dd_data);
3414
3415	/* Complete the job if the job is still active */
3416
3417	if (job) {
3418		bsg_reply->result = 0;
3419		bsg_job_done(job, bsg_reply->result,
3420			       bsg_reply->reply_payload_rcv_len);
3421	}
3422	return;
3423}
3424
3425/**
3426 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3427 * @phba: Pointer to HBA context object.
3428 * @mb: Pointer to a mailbox object.
3429 * @vport: Pointer to a vport object.
3430 *
3431 * Some commands require the port to be offline, some may not be called from
3432 * the application.
3433 **/
3434static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3435	MAILBOX_t *mb, struct lpfc_vport *vport)
3436{
3437	/* return negative error values for bsg job */
3438	switch (mb->mbxCommand) {
3439	/* Offline only */
3440	case MBX_INIT_LINK:
3441	case MBX_DOWN_LINK:
3442	case MBX_CONFIG_LINK:
3443	case MBX_CONFIG_RING:
3444	case MBX_RESET_RING:
3445	case MBX_UNREG_LOGIN:
3446	case MBX_CLEAR_LA:
3447	case MBX_DUMP_CONTEXT:
3448	case MBX_RUN_DIAGS:
3449	case MBX_RESTART:
3450	case MBX_SET_MASK:
3451		if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
3452			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3453				"2743 Command 0x%x is illegal in on-line "
3454				"state\n",
3455				mb->mbxCommand);
3456			return -EPERM;
3457		}
3458		break;
3459	case MBX_WRITE_NV:
3460	case MBX_WRITE_VPARMS:
3461	case MBX_LOAD_SM:
3462	case MBX_READ_NV:
3463	case MBX_READ_CONFIG:
3464	case MBX_READ_RCONFIG:
3465	case MBX_READ_STATUS:
3466	case MBX_READ_XRI:
3467	case MBX_READ_REV:
3468	case MBX_READ_LNK_STAT:
3469	case MBX_DUMP_MEMORY:
3470	case MBX_DOWN_LOAD:
3471	case MBX_UPDATE_CFG:
3472	case MBX_KILL_BOARD:
3473	case MBX_READ_TOPOLOGY:
3474	case MBX_LOAD_AREA:
3475	case MBX_LOAD_EXP_ROM:
3476	case MBX_BEACON:
3477	case MBX_DEL_LD_ENTRY:
3478	case MBX_SET_DEBUG:
3479	case MBX_WRITE_WWN:
3480	case MBX_SLI4_CONFIG:
3481	case MBX_READ_EVENT_LOG:
3482	case MBX_READ_EVENT_LOG_STATUS:
3483	case MBX_WRITE_EVENT_LOG:
3484	case MBX_PORT_CAPABILITIES:
3485	case MBX_PORT_IOV_CONTROL:
3486	case MBX_RUN_BIU_DIAG64:
3487		break;
3488	case MBX_SET_VARIABLE:
3489		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3490			"1226 mbox: set_variable 0x%x, 0x%x\n",
3491			mb->un.varWords[0],
3492			mb->un.varWords[1]);
3493		break;
3494	case MBX_READ_SPARM64:
3495	case MBX_REG_LOGIN:
3496	case MBX_REG_LOGIN64:
3497	case MBX_CONFIG_PORT:
3498	case MBX_RUN_BIU_DIAG:
3499	default:
3500		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3501			"2742 Unknown Command 0x%x\n",
3502			mb->mbxCommand);
3503		return -EPERM;
3504	}
3505
3506	return 0; /* ok */
3507}
3508
3509/**
3510 * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
3511 * @phba: Pointer to HBA context object.
3512 *
3513 * This is routine clean up and reset BSG handling of multi-buffer mbox
3514 * command session.
3515 **/
3516static void
3517lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3518{
3519	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3520		return;
3521
3522	/* free all memory, including dma buffers */
3523	lpfc_bsg_dma_page_list_free(phba,
3524				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3525	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3526	/* multi-buffer write mailbox command pass-through complete */
3527	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3528	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3529	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3530
3531	return;
3532}
3533
3534/**
3535 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3536 * @phba: Pointer to HBA context object.
3537 * @pmboxq: Pointer to mailbox command.
3538 *
3539 * This is routine handles BSG job for mailbox commands completions with
3540 * multiple external buffers.
3541 **/
3542static struct bsg_job *
3543lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3544{
3545	struct bsg_job_data *dd_data;
3546	struct bsg_job *job;
3547	struct fc_bsg_reply *bsg_reply;
3548	uint8_t *pmb, *pmb_buf;
3549	unsigned long flags;
3550	uint32_t size;
3551	int rc = 0;
3552	struct lpfc_dmabuf *dmabuf;
3553	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3554	uint8_t *pmbx;
3555
3556	dd_data = pmboxq->ctx_u.dd_data;
3557
3558	/* Determine if job has been aborted */
3559	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3560	job = dd_data->set_job;
3561	if (job) {
3562		bsg_reply = job->reply;
3563		/* Prevent timeout handling from trying to abort job  */
3564		job->dd_data = NULL;
3565	}
3566	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3567
3568	/*
3569	 * The outgoing buffer is readily referred from the dma buffer,
3570	 * just need to get header part from mailboxq structure.
3571	 */
3572
3573	pmb = (uint8_t *)&pmboxq->u.mb;
3574	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3575	/* Copy the byte swapped response mailbox back to the user */
3576	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3577	/* if there is any non-embedded extended data copy that too */
3578	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3579	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3580	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3581	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3582		pmbx = (uint8_t *)dmabuf->virt;
3583		/* byte swap the extended data following the mailbox command */
3584		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3585			&pmbx[sizeof(MAILBOX_t)],
3586			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3587	}
3588
3589	/* Complete the job if the job is still active */
3590
3591	if (job) {
3592		size = job->reply_payload.payload_len;
3593		bsg_reply->reply_payload_rcv_len =
3594			sg_copy_from_buffer(job->reply_payload.sg_list,
3595					    job->reply_payload.sg_cnt,
3596					    pmb_buf, size);
3597
3598		/* result for successful */
3599		bsg_reply->result = 0;
3600
3601		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3602				"2937 SLI_CONFIG ext-buffer mailbox command "
3603				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3604				phba->mbox_ext_buf_ctx.nembType,
3605				phba->mbox_ext_buf_ctx.mboxType, size);
3606		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3607					phba->mbox_ext_buf_ctx.nembType,
3608					phba->mbox_ext_buf_ctx.mboxType,
3609					dma_ebuf, sta_pos_addr,
3610					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3611	} else {
3612		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3613				"2938 SLI_CONFIG ext-buffer mailbox "
3614				"command (x%x/x%x) failure, rc:x%x\n",
3615				phba->mbox_ext_buf_ctx.nembType,
3616				phba->mbox_ext_buf_ctx.mboxType, rc);
3617	}
3618
3619
3620	/* state change */
3621	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3622	kfree(dd_data);
3623	return job;
3624}
3625
3626/**
3627 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3628 * @phba: Pointer to HBA context object.
3629 * @pmboxq: Pointer to mailbox command.
3630 *
3631 * This is completion handler function for mailbox read commands with multiple
3632 * external buffers.
3633 **/
3634static void
3635lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3636{
3637	struct bsg_job *job;
3638	struct fc_bsg_reply *bsg_reply;
3639
3640	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3641
3642	/* handle the BSG job with mailbox command */
3643	if (!job)
3644		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3645
3646	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3647			"2939 SLI_CONFIG ext-buffer rd mailbox command "
3648			"complete, ctxState:x%x, mbxStatus:x%x\n",
3649			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3650
3651	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3652		lpfc_bsg_mbox_ext_session_reset(phba);
3653
3654	/* free base driver mailbox structure memory */
3655	mempool_free(pmboxq, phba->mbox_mem_pool);
3656
3657	/* if the job is still active, call job done */
3658	if (job) {
3659		bsg_reply = job->reply;
3660		bsg_job_done(job, bsg_reply->result,
3661			       bsg_reply->reply_payload_rcv_len);
3662	}
3663	return;
3664}
3665
3666/**
3667 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3668 * @phba: Pointer to HBA context object.
3669 * @pmboxq: Pointer to mailbox command.
3670 *
3671 * This is completion handler function for mailbox write commands with multiple
3672 * external buffers.
3673 **/
3674static void
3675lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3676{
3677	struct bsg_job *job;
3678	struct fc_bsg_reply *bsg_reply;
3679
3680	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3681
3682	/* handle the BSG job with the mailbox command */
3683	if (!job)
3684		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3685
3686	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3687			"2940 SLI_CONFIG ext-buffer wr mailbox command "
3688			"complete, ctxState:x%x, mbxStatus:x%x\n",
3689			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3690
3691	/* free all memory, including dma buffers */
3692	mempool_free(pmboxq, phba->mbox_mem_pool);
3693	lpfc_bsg_mbox_ext_session_reset(phba);
3694
3695	/* if the job is still active, call job done */
3696	if (job) {
3697		bsg_reply = job->reply;
3698		bsg_job_done(job, bsg_reply->result,
3699			       bsg_reply->reply_payload_rcv_len);
3700	}
3701
3702	return;
3703}
3704
3705static void
3706lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3707				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3708				struct lpfc_dmabuf *ext_dmabuf)
3709{
3710	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3711
3712	/* pointer to the start of mailbox command */
3713	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3714
3715	if (nemb_tp == nemb_mse) {
3716		if (index == 0) {
3717			sli_cfg_mbx->un.sli_config_emb0_subsys.
3718				mse[index].pa_hi =
3719				putPaddrHigh(mbx_dmabuf->phys +
3720					     sizeof(MAILBOX_t));
3721			sli_cfg_mbx->un.sli_config_emb0_subsys.
3722				mse[index].pa_lo =
3723				putPaddrLow(mbx_dmabuf->phys +
3724					    sizeof(MAILBOX_t));
3725			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3726					"2943 SLI_CONFIG(mse)[%d], "
3727					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3728					index,
3729					sli_cfg_mbx->un.sli_config_emb0_subsys.
3730					mse[index].buf_len,
3731					sli_cfg_mbx->un.sli_config_emb0_subsys.
3732					mse[index].pa_hi,
3733					sli_cfg_mbx->un.sli_config_emb0_subsys.
3734					mse[index].pa_lo);
3735		} else {
3736			sli_cfg_mbx->un.sli_config_emb0_subsys.
3737				mse[index].pa_hi =
3738				putPaddrHigh(ext_dmabuf->phys);
3739			sli_cfg_mbx->un.sli_config_emb0_subsys.
3740				mse[index].pa_lo =
3741				putPaddrLow(ext_dmabuf->phys);
3742			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3743					"2944 SLI_CONFIG(mse)[%d], "
3744					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3745					index,
3746					sli_cfg_mbx->un.sli_config_emb0_subsys.
3747					mse[index].buf_len,
3748					sli_cfg_mbx->un.sli_config_emb0_subsys.
3749					mse[index].pa_hi,
3750					sli_cfg_mbx->un.sli_config_emb0_subsys.
3751					mse[index].pa_lo);
3752		}
3753	} else {
3754		if (index == 0) {
3755			sli_cfg_mbx->un.sli_config_emb1_subsys.
3756				hbd[index].pa_hi =
3757				putPaddrHigh(mbx_dmabuf->phys +
3758					     sizeof(MAILBOX_t));
3759			sli_cfg_mbx->un.sli_config_emb1_subsys.
3760				hbd[index].pa_lo =
3761				putPaddrLow(mbx_dmabuf->phys +
3762					    sizeof(MAILBOX_t));
3763			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3764					"3007 SLI_CONFIG(hbd)[%d], "
3765					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3766				index,
3767				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3768				&sli_cfg_mbx->un.
3769				sli_config_emb1_subsys.hbd[index]),
3770				sli_cfg_mbx->un.sli_config_emb1_subsys.
3771				hbd[index].pa_hi,
3772				sli_cfg_mbx->un.sli_config_emb1_subsys.
3773				hbd[index].pa_lo);
3774
3775		} else {
3776			sli_cfg_mbx->un.sli_config_emb1_subsys.
3777				hbd[index].pa_hi =
3778				putPaddrHigh(ext_dmabuf->phys);
3779			sli_cfg_mbx->un.sli_config_emb1_subsys.
3780				hbd[index].pa_lo =
3781				putPaddrLow(ext_dmabuf->phys);
3782			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3783					"3008 SLI_CONFIG(hbd)[%d], "
3784					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3785				index,
3786				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3787				&sli_cfg_mbx->un.
3788				sli_config_emb1_subsys.hbd[index]),
3789				sli_cfg_mbx->un.sli_config_emb1_subsys.
3790				hbd[index].pa_hi,
3791				sli_cfg_mbx->un.sli_config_emb1_subsys.
3792				hbd[index].pa_lo);
3793		}
3794	}
3795	return;
3796}
3797
3798/**
3799 * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
3800 * @phba: Pointer to HBA context object.
3801 * @job: Pointer to the job object.
3802 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3803 * @dmabuf: Pointer to a DMA buffer descriptor.
3804 *
3805 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3806 * non-embedded external buffers.
3807 **/
3808static int
3809lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3810			      enum nemb_type nemb_tp,
3811			      struct lpfc_dmabuf *dmabuf)
3812{
3813	struct fc_bsg_request *bsg_request = job->request;
3814	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3815	struct dfc_mbox_req *mbox_req;
3816	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3817	uint32_t ext_buf_cnt, ext_buf_index;
3818	struct lpfc_dmabuf *ext_dmabuf = NULL;
3819	struct bsg_job_data *dd_data = NULL;
3820	LPFC_MBOXQ_t *pmboxq = NULL;
3821	MAILBOX_t *pmb;
3822	uint8_t *pmbx;
3823	int rc, i;
3824
3825	mbox_req =
3826	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3827
3828	/* pointer to the start of mailbox command */
3829	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3830
3831	if (nemb_tp == nemb_mse) {
3832		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3833			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3834		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3835			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3836					"2945 Handled SLI_CONFIG(mse) rd, "
3837					"ext_buf_cnt(%d) out of range(%d)\n",
3838					ext_buf_cnt,
3839					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3840			rc = -ERANGE;
3841			goto job_error;
3842		}
3843		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3844				"2941 Handled SLI_CONFIG(mse) rd, "
3845				"ext_buf_cnt:%d\n", ext_buf_cnt);
3846	} else {
3847		/* sanity check on interface type for support */
3848		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3849		    LPFC_SLI_INTF_IF_TYPE_2) {
3850			rc = -ENODEV;
3851			goto job_error;
3852		}
3853		/* nemb_tp == nemb_hbd */
3854		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3855		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3856			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3857					"2946 Handled SLI_CONFIG(hbd) rd, "
3858					"ext_buf_cnt(%d) out of range(%d)\n",
3859					ext_buf_cnt,
3860					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3861			rc = -ERANGE;
3862			goto job_error;
3863		}
3864		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3865				"2942 Handled SLI_CONFIG(hbd) rd, "
3866				"ext_buf_cnt:%d\n", ext_buf_cnt);
3867	}
3868
3869	/* before dma descriptor setup */
3870	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3871					sta_pre_addr, dmabuf, ext_buf_cnt);
3872
3873	/* reject non-embedded mailbox command with none external buffer */
3874	if (ext_buf_cnt == 0) {
3875		rc = -EPERM;
3876		goto job_error;
3877	} else if (ext_buf_cnt > 1) {
3878		/* additional external read buffers */
3879		for (i = 1; i < ext_buf_cnt; i++) {
3880			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3881			if (!ext_dmabuf) {
3882				rc = -ENOMEM;
3883				goto job_error;
3884			}
3885			list_add_tail(&ext_dmabuf->list,
3886				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3887		}
3888	}
3889
3890	/* bsg tracking structure */
3891	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3892	if (!dd_data) {
3893		rc = -ENOMEM;
3894		goto job_error;
3895	}
3896
3897	/* mailbox command structure for base driver */
3898	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3899	if (!pmboxq) {
3900		rc = -ENOMEM;
3901		goto job_error;
3902	}
3903	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3904
3905	/* for the first external buffer */
3906	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3907
3908	/* for the rest of external buffer descriptors if any */
3909	if (ext_buf_cnt > 1) {
3910		ext_buf_index = 1;
3911		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3912				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3913			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3914						ext_buf_index, dmabuf,
3915						curr_dmabuf);
3916			ext_buf_index++;
3917		}
3918	}
3919
3920	/* after dma descriptor setup */
3921	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3922					sta_pos_addr, dmabuf, ext_buf_cnt);
3923
3924	/* construct base driver mbox command */
3925	pmb = &pmboxq->u.mb;
3926	pmbx = (uint8_t *)dmabuf->virt;
3927	memcpy(pmb, pmbx, sizeof(*pmb));
3928	pmb->mbxOwner = OWN_HOST;
3929	pmboxq->vport = phba->pport;
3930
3931	/* multi-buffer handling context */
3932	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3933	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3934	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3935	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3936	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3937	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3938
3939	/* callback for multi-buffer read mailbox command */
3940	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3941
3942	/* context fields to callback function */
3943	pmboxq->ctx_u.dd_data = dd_data;
3944	dd_data->type = TYPE_MBOX;
3945	dd_data->set_job = job;
3946	dd_data->context_un.mbox.pmboxq = pmboxq;
3947	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3948	job->dd_data = dd_data;
3949
3950	/* state change */
3951	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3952
3953	/*
3954	 * Non-embedded mailbox subcommand data gets byte swapped here because
3955	 * the lower level driver code only does the first 64 mailbox words.
3956	 */
3957	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3958	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3959		(nemb_tp == nemb_mse))
3960		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3961			&pmbx[sizeof(MAILBOX_t)],
3962				sli_cfg_mbx->un.sli_config_emb0_subsys.
3963					mse[0].buf_len);
3964
3965	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3966	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3967		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3968				"2947 Issued SLI_CONFIG ext-buffer "
3969				"mailbox command, rc:x%x\n", rc);
3970		return SLI_CONFIG_HANDLED;
3971	}
3972	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3973			"2948 Failed to issue SLI_CONFIG ext-buffer "
3974			"mailbox command, rc:x%x\n", rc);
3975	rc = -EPIPE;
3976
3977job_error:
3978	if (pmboxq)
3979		mempool_free(pmboxq, phba->mbox_mem_pool);
3980	lpfc_bsg_dma_page_list_free(phba,
3981				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3982	kfree(dd_data);
3983	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3984	return rc;
3985}
3986
3987/**
3988 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3989 * @phba: Pointer to HBA context object.
3990 * @job: Pointer to the job object.
3991 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3992 * @dmabuf: Pointer to a DMA buffer descriptor.
3993 *
3994 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3995 * non-embedded external buffers.
3996 **/
3997static int
3998lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3999			       enum nemb_type nemb_tp,
4000			       struct lpfc_dmabuf *dmabuf)
4001{
4002	struct fc_bsg_request *bsg_request = job->request;
4003	struct fc_bsg_reply *bsg_reply = job->reply;
4004	struct dfc_mbox_req *mbox_req;
4005	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4006	uint32_t ext_buf_cnt;
4007	struct bsg_job_data *dd_data = NULL;
4008	LPFC_MBOXQ_t *pmboxq = NULL;
4009	MAILBOX_t *pmb;
4010	uint8_t *mbx;
4011	int rc = SLI_CONFIG_NOT_HANDLED, i;
4012
4013	mbox_req =
4014	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4015
4016	/* pointer to the start of mailbox command */
4017	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4018
4019	if (nemb_tp == nemb_mse) {
4020		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4021			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4022		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4023			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4024					"2953 Failed SLI_CONFIG(mse) wr, "
4025					"ext_buf_cnt(%d) out of range(%d)\n",
4026					ext_buf_cnt,
4027					LPFC_MBX_SLI_CONFIG_MAX_MSE);
4028			return -ERANGE;
4029		}
4030		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4031				"2949 Handled SLI_CONFIG(mse) wr, "
4032				"ext_buf_cnt:%d\n", ext_buf_cnt);
4033	} else {
4034		/* sanity check on interface type for support */
4035		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4036		    LPFC_SLI_INTF_IF_TYPE_2)
4037			return -ENODEV;
4038		/* nemb_tp == nemb_hbd */
4039		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4040		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4041			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4042					"2954 Failed SLI_CONFIG(hbd) wr, "
4043					"ext_buf_cnt(%d) out of range(%d)\n",
4044					ext_buf_cnt,
4045					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4046			return -ERANGE;
4047		}
4048		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4049				"2950 Handled SLI_CONFIG(hbd) wr, "
4050				"ext_buf_cnt:%d\n", ext_buf_cnt);
4051	}
4052
4053	/* before dma buffer descriptor setup */
4054	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4055					sta_pre_addr, dmabuf, ext_buf_cnt);
4056
4057	if (ext_buf_cnt == 0)
4058		return -EPERM;
4059
4060	/* for the first external buffer */
4061	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4062
4063	/* after dma descriptor setup */
4064	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4065					sta_pos_addr, dmabuf, ext_buf_cnt);
4066
4067	/* log for looking forward */
4068	for (i = 1; i < ext_buf_cnt; i++) {
4069		if (nemb_tp == nemb_mse)
4070			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4071				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4072				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4073				mse[i].buf_len);
4074		else
4075			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4076				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4077				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4078				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4079				hbd[i]));
4080	}
4081
4082	/* multi-buffer handling context */
4083	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4084	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4085	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4086	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4087	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4088	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4089
4090	if (ext_buf_cnt == 1) {
4091		/* bsg tracking structure */
4092		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4093		if (!dd_data) {
4094			rc = -ENOMEM;
4095			goto job_error;
4096		}
4097
4098		/* mailbox command structure for base driver */
4099		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4100		if (!pmboxq) {
4101			rc = -ENOMEM;
4102			goto job_error;
4103		}
4104		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4105		pmb = &pmboxq->u.mb;
4106		mbx = (uint8_t *)dmabuf->virt;
4107		memcpy(pmb, mbx, sizeof(*pmb));
4108		pmb->mbxOwner = OWN_HOST;
4109		pmboxq->vport = phba->pport;
4110
4111		/* callback for multi-buffer read mailbox command */
4112		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4113
4114		/* context fields to callback function */
4115		pmboxq->ctx_u.dd_data = dd_data;
4116		dd_data->type = TYPE_MBOX;
4117		dd_data->set_job = job;
4118		dd_data->context_un.mbox.pmboxq = pmboxq;
4119		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4120		job->dd_data = dd_data;
4121
4122		/* state change */
4123
4124		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4125		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4126		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4127			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4128					"2955 Issued SLI_CONFIG ext-buffer "
4129					"mailbox command, rc:x%x\n", rc);
4130			return SLI_CONFIG_HANDLED;
4131		}
4132		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4133				"2956 Failed to issue SLI_CONFIG ext-buffer "
4134				"mailbox command, rc:x%x\n", rc);
4135		rc = -EPIPE;
4136		goto job_error;
4137	}
4138
4139	/* wait for additional external buffers */
4140
4141	bsg_reply->result = 0;
4142	bsg_job_done(job, bsg_reply->result,
4143		       bsg_reply->reply_payload_rcv_len);
4144	return SLI_CONFIG_HANDLED;
4145
4146job_error:
4147	if (pmboxq)
4148		mempool_free(pmboxq, phba->mbox_mem_pool);
4149	kfree(dd_data);
4150
4151	return rc;
4152}
4153
4154/**
4155 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4156 * @phba: Pointer to HBA context object.
4157 * @job: Pointer to the job object.
4158 * @dmabuf: Pointer to a DMA buffer descriptor.
4159 *
4160 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4161 * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
4162 * with embedded subsystem 0x1 and opcodes with external HBDs.
4163 **/
4164static int
4165lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4166			     struct lpfc_dmabuf *dmabuf)
4167{
4168	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4169	uint32_t subsys;
4170	uint32_t opcode;
4171	int rc = SLI_CONFIG_NOT_HANDLED;
4172
4173	/* state change on new multi-buffer pass-through mailbox command */
4174	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4175
4176	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4177
4178	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4179	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4180		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4181				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4182		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4183				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4184		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4185			switch (opcode) {
4186			case FCOE_OPCODE_READ_FCF:
4187			case FCOE_OPCODE_GET_DPORT_RESULTS:
4188				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4189						"2957 Handled SLI_CONFIG "
4190						"subsys_fcoe, opcode:x%x\n",
4191						opcode);
4192				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4193							nemb_mse, dmabuf);
4194				break;
4195			case FCOE_OPCODE_ADD_FCF:
4196			case FCOE_OPCODE_SET_DPORT_MODE:
4197			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4198				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4199						"2958 Handled SLI_CONFIG "
4200						"subsys_fcoe, opcode:x%x\n",
4201						opcode);
4202				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4203							nemb_mse, dmabuf);
4204				break;
4205			default:
4206				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4207						"2959 Reject SLI_CONFIG "
4208						"subsys_fcoe, opcode:x%x\n",
4209						opcode);
4210				rc = -EPERM;
4211				break;
4212			}
4213		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4214			switch (opcode) {
4215			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4216			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4217			case COMN_OPCODE_GET_PROFILE_CONFIG:
4218			case COMN_OPCODE_SET_FEATURES:
4219				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4220						"3106 Handled SLI_CONFIG "
4221						"subsys_comn, opcode:x%x\n",
4222						opcode);
4223				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4224							nemb_mse, dmabuf);
4225				break;
4226			default:
4227				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4228						"3107 Reject SLI_CONFIG "
4229						"subsys_comn, opcode:x%x\n",
4230						opcode);
4231				rc = -EPERM;
4232				break;
4233			}
4234		} else {
4235			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4236					"2977 Reject SLI_CONFIG "
4237					"subsys:x%d, opcode:x%x\n",
4238					subsys, opcode);
4239			rc = -EPERM;
4240		}
4241	} else {
4242		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4243				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4244		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4245				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4246		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4247			switch (opcode) {
4248			case COMN_OPCODE_READ_OBJECT:
4249			case COMN_OPCODE_READ_OBJECT_LIST:
4250				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4251						"2960 Handled SLI_CONFIG "
4252						"subsys_comn, opcode:x%x\n",
4253						opcode);
4254				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4255							nemb_hbd, dmabuf);
4256				break;
4257			case COMN_OPCODE_WRITE_OBJECT:
4258				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4259						"2961 Handled SLI_CONFIG "
4260						"subsys_comn, opcode:x%x\n",
4261						opcode);
4262				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4263							nemb_hbd, dmabuf);
4264				break;
4265			default:
4266				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4267						"2962 Not handled SLI_CONFIG "
4268						"subsys_comn, opcode:x%x\n",
4269						opcode);
4270				rc = SLI_CONFIG_NOT_HANDLED;
4271				break;
4272			}
4273		} else {
4274			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4275					"2978 Not handled SLI_CONFIG "
4276					"subsys:x%d, opcode:x%x\n",
4277					subsys, opcode);
4278			rc = SLI_CONFIG_NOT_HANDLED;
4279		}
4280	}
4281
4282	/* state reset on not handled new multi-buffer mailbox command */
4283	if (rc != SLI_CONFIG_HANDLED)
4284		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4285
4286	return rc;
4287}
4288
4289/**
4290 * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
4291 * @phba: Pointer to HBA context object.
4292 *
4293 * This routine is for requesting to abort a pass-through mailbox command with
4294 * multiple external buffers due to error condition.
4295 **/
4296static void
4297lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4298{
4299	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4300		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4301	else
4302		lpfc_bsg_mbox_ext_session_reset(phba);
4303	return;
4304}
4305
4306/**
4307 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4308 * @phba: Pointer to HBA context object.
4309 * @job: Pointer to the job object.
4310 *
4311 * This routine extracts the next mailbox read external buffer back to
4312 * user space through BSG.
4313 **/
4314static int
4315lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4316{
4317	struct fc_bsg_reply *bsg_reply = job->reply;
4318	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4319	struct lpfc_dmabuf *dmabuf;
4320	uint8_t *pbuf;
4321	uint32_t size;
4322	uint32_t index;
4323
4324	index = phba->mbox_ext_buf_ctx.seqNum;
4325	phba->mbox_ext_buf_ctx.seqNum++;
4326
4327	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4328			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4329
4330	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4331		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4332			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4333		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4334				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4335				"buffer[%d], size:%d\n", index, size);
4336	} else {
4337		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4338			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4339		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4340				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4341				"buffer[%d], size:%d\n", index, size);
4342	}
4343	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4344		return -EPIPE;
4345	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4346				  struct lpfc_dmabuf, list);
4347	list_del_init(&dmabuf->list);
4348
4349	/* after dma buffer descriptor setup */
4350	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4351					mbox_rd, dma_ebuf, sta_pos_addr,
4352					dmabuf, index);
4353
4354	pbuf = (uint8_t *)dmabuf->virt;
4355	bsg_reply->reply_payload_rcv_len =
4356		sg_copy_from_buffer(job->reply_payload.sg_list,
4357				    job->reply_payload.sg_cnt,
4358				    pbuf, size);
4359
4360	lpfc_bsg_dma_page_free(phba, dmabuf);
4361
4362	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4363		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4364				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4365				"command session done\n");
4366		lpfc_bsg_mbox_ext_session_reset(phba);
4367	}
4368
4369	bsg_reply->result = 0;
4370	bsg_job_done(job, bsg_reply->result,
4371		       bsg_reply->reply_payload_rcv_len);
4372
4373	return SLI_CONFIG_HANDLED;
4374}
4375
4376/**
4377 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4378 * @phba: Pointer to HBA context object.
4379 * @job: Pointer to the job object.
4380 * @dmabuf: Pointer to a DMA buffer descriptor.
4381 *
4382 * This routine sets up the next mailbox read external buffer obtained
4383 * from user space through BSG.
4384 **/
4385static int
4386lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4387			struct lpfc_dmabuf *dmabuf)
4388{
4389	struct fc_bsg_reply *bsg_reply = job->reply;
4390	struct bsg_job_data *dd_data = NULL;
4391	LPFC_MBOXQ_t *pmboxq = NULL;
4392	MAILBOX_t *pmb;
4393	enum nemb_type nemb_tp;
4394	uint8_t *pbuf;
4395	uint32_t size;
4396	uint32_t index;
4397	int rc;
4398
4399	index = phba->mbox_ext_buf_ctx.seqNum;
4400	phba->mbox_ext_buf_ctx.seqNum++;
4401	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4402
4403	pbuf = (uint8_t *)dmabuf->virt;
4404	size = job->request_payload.payload_len;
4405	sg_copy_to_buffer(job->request_payload.sg_list,
4406			  job->request_payload.sg_cnt,
4407			  pbuf, size);
4408
4409	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4410		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4411				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4412				"buffer[%d], size:%d\n",
4413				phba->mbox_ext_buf_ctx.seqNum, size);
4414
4415	} else {
4416		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4417				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4418				"buffer[%d], size:%d\n",
4419				phba->mbox_ext_buf_ctx.seqNum, size);
4420
4421	}
4422
4423	/* set up external buffer descriptor and add to external buffer list */
4424	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4425					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4426					dmabuf);
4427	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4428
4429	/* after write dma buffer */
4430	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4431					mbox_wr, dma_ebuf, sta_pos_addr,
4432					dmabuf, index);
4433
4434	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4435		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4436				"2968 SLI_CONFIG ext-buffer wr all %d "
4437				"ebuffers received\n",
4438				phba->mbox_ext_buf_ctx.numBuf);
4439
4440		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4441		if (!dd_data) {
4442			rc = -ENOMEM;
4443			goto job_error;
4444		}
4445
4446		/* mailbox command structure for base driver */
4447		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4448		if (!pmboxq) {
4449			rc = -ENOMEM;
4450			goto job_error;
4451		}
4452		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4453		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4454		pmb = &pmboxq->u.mb;
4455		memcpy(pmb, pbuf, sizeof(*pmb));
4456		pmb->mbxOwner = OWN_HOST;
4457		pmboxq->vport = phba->pport;
4458
4459		/* callback for multi-buffer write mailbox command */
4460		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4461
4462		/* context fields to callback function */
4463		pmboxq->ctx_u.dd_data = dd_data;
4464		dd_data->type = TYPE_MBOX;
4465		dd_data->set_job = job;
4466		dd_data->context_un.mbox.pmboxq = pmboxq;
4467		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4468		job->dd_data = dd_data;
4469
4470		/* state change */
4471		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4472
4473		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4474		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4475			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4476					"2969 Issued SLI_CONFIG ext-buffer "
4477					"mailbox command, rc:x%x\n", rc);
4478			return SLI_CONFIG_HANDLED;
4479		}
4480		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4481				"2970 Failed to issue SLI_CONFIG ext-buffer "
4482				"mailbox command, rc:x%x\n", rc);
4483		rc = -EPIPE;
4484		goto job_error;
4485	}
4486
4487	/* wait for additional external buffers */
4488	bsg_reply->result = 0;
4489	bsg_job_done(job, bsg_reply->result,
4490		       bsg_reply->reply_payload_rcv_len);
4491	return SLI_CONFIG_HANDLED;
4492
4493job_error:
4494	if (pmboxq)
4495		mempool_free(pmboxq, phba->mbox_mem_pool);
4496	lpfc_bsg_dma_page_free(phba, dmabuf);
4497	kfree(dd_data);
4498
4499	return rc;
4500}
4501
4502/**
4503 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4504 * @phba: Pointer to HBA context object.
4505 * @job: Pointer to the job object.
4506 * @dmabuf: Pointer to a DMA buffer descriptor.
4507 *
4508 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4509 * command with multiple non-embedded external buffers.
4510 **/
4511static int
4512lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4513			     struct lpfc_dmabuf *dmabuf)
4514{
4515	int rc;
4516
4517	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4518			"2971 SLI_CONFIG buffer (type:x%x)\n",
4519			phba->mbox_ext_buf_ctx.mboxType);
4520
4521	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4522		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4523			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4524					"2972 SLI_CONFIG rd buffer state "
4525					"mismatch:x%x\n",
4526					phba->mbox_ext_buf_ctx.state);
4527			lpfc_bsg_mbox_ext_abort(phba);
4528			return -EPIPE;
4529		}
4530		rc = lpfc_bsg_read_ebuf_get(phba, job);
4531		if (rc == SLI_CONFIG_HANDLED)
4532			lpfc_bsg_dma_page_free(phba, dmabuf);
4533	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4534		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4535			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4536					"2973 SLI_CONFIG wr buffer state "
4537					"mismatch:x%x\n",
4538					phba->mbox_ext_buf_ctx.state);
4539			lpfc_bsg_mbox_ext_abort(phba);
4540			return -EPIPE;
4541		}
4542		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4543	}
4544	return rc;
4545}
4546
4547/**
4548 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4549 * @phba: Pointer to HBA context object.
4550 * @job: Pointer to the job object.
4551 * @dmabuf: Pointer to a DMA buffer descriptor.
4552 *
4553 * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
4554 * (0x9B) mailbox commands and external buffers.
4555 **/
4556static int
4557lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4558			    struct lpfc_dmabuf *dmabuf)
4559{
4560	struct fc_bsg_request *bsg_request = job->request;
4561	struct dfc_mbox_req *mbox_req;
4562	int rc = SLI_CONFIG_NOT_HANDLED;
4563
4564	mbox_req =
4565	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4566
4567	/* mbox command with/without single external buffer */
4568	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4569		return rc;
4570
4571	/* mbox command and first external buffer */
4572	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4573		if (mbox_req->extSeqNum == 1) {
4574			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4575					"2974 SLI_CONFIG mailbox: tag:%d, "
4576					"seq:%d\n", mbox_req->extMboxTag,
4577					mbox_req->extSeqNum);
4578			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4579			return rc;
4580		} else
4581			goto sli_cfg_ext_error;
4582	}
4583
4584	/*
4585	 * handle additional external buffers
4586	 */
4587
4588	/* check broken pipe conditions */
4589	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4590		goto sli_cfg_ext_error;
4591	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4592		goto sli_cfg_ext_error;
4593	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4594		goto sli_cfg_ext_error;
4595
4596	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4597			"2975 SLI_CONFIG mailbox external buffer: "
4598			"extSta:x%x, tag:%d, seq:%d\n",
4599			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4600			mbox_req->extSeqNum);
4601	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4602	return rc;
4603
4604sli_cfg_ext_error:
4605	/* all other cases, broken pipe */
4606	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4607			"2976 SLI_CONFIG mailbox broken pipe: "
4608			"ctxSta:x%x, ctxNumBuf:%d "
4609			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4610			phba->mbox_ext_buf_ctx.state,
4611			phba->mbox_ext_buf_ctx.numBuf,
4612			phba->mbox_ext_buf_ctx.mbxTag,
4613			phba->mbox_ext_buf_ctx.seqNum,
4614			mbox_req->extMboxTag, mbox_req->extSeqNum);
4615
4616	lpfc_bsg_mbox_ext_session_reset(phba);
4617
4618	return -EPIPE;
4619}
4620
4621/**
4622 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4623 * @phba: Pointer to HBA context object.
4624 * @job: Pointer to the job object.
4625 * @vport: Pointer to a vport object.
4626 *
4627 * Allocate a tracking object, mailbox command memory, get a mailbox
4628 * from the mailbox pool, copy the caller mailbox command.
4629 *
4630 * If offline and the sli is active we need to poll for the command (port is
4631 * being reset) and complete the job, otherwise issue the mailbox command and
4632 * let our completion handler finish the command.
4633 **/
4634static int
4635lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4636	struct lpfc_vport *vport)
4637{
4638	struct fc_bsg_request *bsg_request = job->request;
4639	struct fc_bsg_reply *bsg_reply = job->reply;
4640	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4641	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4642	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4643	uint8_t *pmbx = NULL;
4644	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4645	struct lpfc_dmabuf *dmabuf = NULL;
4646	struct dfc_mbox_req *mbox_req;
4647	struct READ_EVENT_LOG_VAR *rdEventLog;
4648	uint32_t transmit_length, receive_length, mode;
4649	struct lpfc_mbx_sli4_config *sli4_config;
4650	struct lpfc_mbx_nembed_cmd *nembed_sge;
4651	struct ulp_bde64 *bde;
4652	uint8_t *ext = NULL;
4653	int rc = 0;
4654	uint8_t *from;
4655	uint32_t size;
4656
4657	/* in case no data is transferred */
4658	bsg_reply->reply_payload_rcv_len = 0;
4659
4660	/* sanity check to protect driver */
4661	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4662	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4663		rc = -ERANGE;
4664		goto job_done;
4665	}
4666
4667	/*
4668	 * Don't allow mailbox commands to be sent when blocked or when in
4669	 * the middle of discovery
4670	 */
4671	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4672		rc = -EAGAIN;
4673		goto job_done;
4674	}
4675
4676	mbox_req =
4677	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4678
4679	/* check if requested extended data lengths are valid */
4680	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4681	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4682		rc = -ERANGE;
4683		goto job_done;
4684	}
4685
4686	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4687	if (!dmabuf || !dmabuf->virt) {
4688		rc = -ENOMEM;
4689		goto job_done;
4690	}
4691
4692	/* Get the mailbox command or external buffer from BSG */
4693	pmbx = (uint8_t *)dmabuf->virt;
4694	size = job->request_payload.payload_len;
4695	sg_copy_to_buffer(job->request_payload.sg_list,
4696			  job->request_payload.sg_cnt, pmbx, size);
4697
4698	/* Handle possible SLI_CONFIG with non-embedded payloads */
4699	if (phba->sli_rev == LPFC_SLI_REV4) {
4700		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4701		if (rc == SLI_CONFIG_HANDLED)
4702			goto job_cont;
4703		if (rc)
4704			goto job_done;
4705		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4706	}
4707
4708	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4709	if (rc != 0)
4710		goto job_done; /* must be negative */
4711
4712	/* allocate our bsg tracking structure */
4713	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4714	if (!dd_data) {
4715		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4716				"2727 Failed allocation of dd_data\n");
4717		rc = -ENOMEM;
4718		goto job_done;
4719	}
4720
4721	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4722	if (!pmboxq) {
4723		rc = -ENOMEM;
4724		goto job_done;
4725	}
4726	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4727
4728	pmb = &pmboxq->u.mb;
4729	memcpy(pmb, pmbx, sizeof(*pmb));
4730	pmb->mbxOwner = OWN_HOST;
4731	pmboxq->vport = vport;
4732
4733	/* If HBA encountered an error attention, allow only DUMP
4734	 * or RESTART mailbox commands until the HBA is restarted.
4735	 */
4736	if (phba->pport->stopped &&
4737	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4738	    pmb->mbxCommand != MBX_RESTART &&
4739	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4740	    pmb->mbxCommand != MBX_WRITE_WWN)
4741		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4742				"2797 mbox: Issued mailbox cmd "
4743				"0x%x while in stopped state.\n",
4744				pmb->mbxCommand);
4745
4746	/* extended mailbox commands will need an extended buffer */
4747	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4748		from = pmbx;
4749		ext = from + sizeof(MAILBOX_t);
4750		pmboxq->ext_buf = ext;
4751		pmboxq->in_ext_byte_len =
4752			mbox_req->inExtWLen * sizeof(uint32_t);
4753		pmboxq->out_ext_byte_len =
4754			mbox_req->outExtWLen * sizeof(uint32_t);
4755		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4756	}
4757
4758	/* biu diag will need a kernel buffer to transfer the data
4759	 * allocate our own buffer and setup the mailbox command to
4760	 * use ours
4761	 */
4762	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4763		transmit_length = pmb->un.varWords[1];
4764		receive_length = pmb->un.varWords[4];
4765		/* transmit length cannot be greater than receive length or
4766		 * mailbox extension size
4767		 */
4768		if ((transmit_length > receive_length) ||
4769			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4770			rc = -ERANGE;
4771			goto job_done;
4772		}
4773		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4774			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4775		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4776			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4777
4778		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4779			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4780			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4781		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4782			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4783			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4784	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4785		rdEventLog = &pmb->un.varRdEventLog;
4786		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4787		mode = bf_get(lpfc_event_log, rdEventLog);
4788
4789		/* receive length cannot be greater than mailbox
4790		 * extension size
4791		 */
4792		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4793			rc = -ERANGE;
4794			goto job_done;
4795		}
4796
4797		/* mode zero uses a bde like biu diags command */
4798		if (mode == 0) {
4799			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4800							+ sizeof(MAILBOX_t));
4801			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4802							+ sizeof(MAILBOX_t));
4803		}
4804	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4805		/* Let type 4 (well known data) through because the data is
4806		 * returned in varwords[4-8]
4807		 * otherwise check the recieve length and fetch the buffer addr
4808		 */
4809		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4810			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4811			/* rebuild the command for sli4 using our own buffers
4812			* like we do for biu diags
4813			*/
4814			receive_length = pmb->un.varWords[2];
4815			/* receive length cannot be greater than mailbox
4816			 * extension size
4817			 */
4818			if (receive_length == 0) {
4819				rc = -ERANGE;
4820				goto job_done;
4821			}
4822			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4823						+ sizeof(MAILBOX_t));
4824			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4825						+ sizeof(MAILBOX_t));
4826		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4827			pmb->un.varUpdateCfg.co) {
4828			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4829
4830			/* bde size cannot be greater than mailbox ext size */
4831			if (bde->tus.f.bdeSize >
4832			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4833				rc = -ERANGE;
4834				goto job_done;
4835			}
4836			bde->addrHigh = putPaddrHigh(dmabuf->phys
4837						+ sizeof(MAILBOX_t));
4838			bde->addrLow = putPaddrLow(dmabuf->phys
4839						+ sizeof(MAILBOX_t));
4840		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4841			/* Handling non-embedded SLI_CONFIG mailbox command */
4842			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4843			if (!bf_get(lpfc_mbox_hdr_emb,
4844			    &sli4_config->header.cfg_mhdr)) {
4845				/* rebuild the command for sli4 using our
4846				 * own buffers like we do for biu diags
4847				 */
4848				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4849						&pmb->un.varWords[0];
4850				receive_length = nembed_sge->sge[0].length;
4851
4852				/* receive length cannot be greater than
4853				 * mailbox extension size
4854				 */
4855				if ((receive_length == 0) ||
4856				    (receive_length >
4857				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4858					rc = -ERANGE;
4859					goto job_done;
4860				}
4861
4862				nembed_sge->sge[0].pa_hi =
4863						putPaddrHigh(dmabuf->phys
4864						   + sizeof(MAILBOX_t));
4865				nembed_sge->sge[0].pa_lo =
4866						putPaddrLow(dmabuf->phys
4867						   + sizeof(MAILBOX_t));
4868			}
4869		}
4870	}
4871
4872	dd_data->context_un.mbox.dmabuffers = dmabuf;
4873
4874	/* setup wake call as IOCB callback */
4875	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4876
4877	/* setup context field to pass wait_queue pointer to wake function */
4878	pmboxq->ctx_u.dd_data = dd_data;
4879	dd_data->type = TYPE_MBOX;
4880	dd_data->set_job = job;
4881	dd_data->context_un.mbox.pmboxq = pmboxq;
4882	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4883	dd_data->context_un.mbox.ext = ext;
4884	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4885	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4886	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4887	job->dd_data = dd_data;
4888
4889	if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
4890	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4891		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4892		if (rc != MBX_SUCCESS) {
4893			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4894			goto job_done;
4895		}
4896
4897		/* job finished, copy the data */
4898		memcpy(pmbx, pmb, sizeof(*pmb));
4899		bsg_reply->reply_payload_rcv_len =
4900			sg_copy_from_buffer(job->reply_payload.sg_list,
4901					    job->reply_payload.sg_cnt,
4902					    pmbx, size);
4903		/* not waiting mbox already done */
4904		rc = 0;
4905		goto job_done;
4906	}
4907
4908	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4909	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4910		return 1; /* job started */
4911
4912job_done:
4913	/* common exit for error or job completed inline */
4914	if (pmboxq)
4915		mempool_free(pmboxq, phba->mbox_mem_pool);
4916	lpfc_bsg_dma_page_free(phba, dmabuf);
4917	kfree(dd_data);
4918
4919job_cont:
4920	return rc;
4921}
4922
4923/**
4924 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4925 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4926 **/
4927static int
4928lpfc_bsg_mbox_cmd(struct bsg_job *job)
4929{
4930	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4931	struct fc_bsg_request *bsg_request = job->request;
4932	struct fc_bsg_reply *bsg_reply = job->reply;
4933	struct lpfc_hba *phba = vport->phba;
4934	struct dfc_mbox_req *mbox_req;
4935	int rc = 0;
4936
4937	/* mix-and-match backward compatibility */
4938	bsg_reply->reply_payload_rcv_len = 0;
4939	if (job->request_len <
4940	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4941		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4942				"2737 Mix-and-match backward compatibility "
4943				"between MBOX_REQ old size:%d and "
4944				"new request size:%d\n",
4945				(int)(job->request_len -
4946				      sizeof(struct fc_bsg_request)),
4947				(int)sizeof(struct dfc_mbox_req));
4948		mbox_req = (struct dfc_mbox_req *)
4949				bsg_request->rqst_data.h_vendor.vendor_cmd;
4950		mbox_req->extMboxTag = 0;
4951		mbox_req->extSeqNum = 0;
4952	}
4953
4954	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4955
4956	if (rc == 0) {
4957		/* job done */
4958		bsg_reply->result = 0;
4959		job->dd_data = NULL;
4960		bsg_job_done(job, bsg_reply->result,
4961			       bsg_reply->reply_payload_rcv_len);
4962	} else if (rc == 1)
4963		/* job submitted, will complete later*/
4964		rc = 0; /* return zero, no error */
4965	else {
4966		/* some error occurred */
4967		bsg_reply->result = rc;
4968		job->dd_data = NULL;
4969	}
4970
4971	return rc;
4972}
4973
4974static int
4975lpfc_forced_link_speed(struct bsg_job *job)
4976{
4977	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4978	struct lpfc_vport *vport = shost_priv(shost);
4979	struct lpfc_hba *phba = vport->phba;
4980	struct fc_bsg_reply *bsg_reply = job->reply;
4981	struct forced_link_speed_support_reply *forced_reply;
4982	int rc = 0;
4983
4984	if (job->request_len <
4985	    sizeof(struct fc_bsg_request) +
4986	    sizeof(struct get_forced_link_speed_support)) {
4987		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4988				"0048 Received FORCED_LINK_SPEED request "
4989				"below minimum size\n");
4990		rc = -EINVAL;
4991		goto job_error;
4992	}
4993
4994	forced_reply = (struct forced_link_speed_support_reply *)
4995		bsg_reply->reply_data.vendor_reply.vendor_rsp;
4996
4997	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
4998		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4999				"0049 Received FORCED_LINK_SPEED reply below "
5000				"minimum size\n");
5001		rc = -EINVAL;
5002		goto job_error;
5003	}
5004
5005	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5006				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5007				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5008job_error:
5009	bsg_reply->result = rc;
5010	if (rc == 0)
5011		bsg_job_done(job, bsg_reply->result,
5012			       bsg_reply->reply_payload_rcv_len);
5013	return rc;
5014}
5015
5016/**
5017 * lpfc_check_fwlog_support: Check FW log support on the adapter
5018 * @phba: Pointer to HBA context object.
5019 *
5020 * Check if FW Logging support by the adapter
5021 **/
5022int
5023lpfc_check_fwlog_support(struct lpfc_hba *phba)
5024{
5025	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5026
5027	ras_fwlog = &phba->ras_fwlog;
5028
5029	if (!ras_fwlog->ras_hwsupport)
5030		return -EACCES;
5031	else if (!ras_fwlog->ras_enabled)
5032		return -EPERM;
5033	else
5034		return 0;
5035}
5036
5037/**
5038 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5039 * @job: fc_bsg_job to handle
5040 *
5041 * Get RAS configuration values set.
5042 **/
5043static int
5044lpfc_bsg_get_ras_config(struct bsg_job *job)
5045{
5046	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5047	struct lpfc_vport *vport = shost_priv(shost);
5048	struct fc_bsg_reply *bsg_reply = job->reply;
5049	struct lpfc_hba *phba = vport->phba;
5050	struct lpfc_bsg_get_ras_config_reply *ras_reply;
5051	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5052	int rc = 0;
5053
5054	if (job->request_len <
5055	    sizeof(struct fc_bsg_request) +
5056	    sizeof(struct lpfc_bsg_ras_req)) {
5057		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5058				"6192 FW_LOG request received "
5059				"below minimum size\n");
5060		rc = -EINVAL;
5061		goto ras_job_error;
5062	}
5063
5064	/* Check FW log status */
5065	rc = lpfc_check_fwlog_support(phba);
5066	if (rc)
5067		goto ras_job_error;
5068
5069	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5070		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5071
5072	/* Current logging state */
5073	spin_lock_irq(&phba->ras_fwlog_lock);
5074	if (ras_fwlog->state == ACTIVE)
5075		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5076	else
5077		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5078	spin_unlock_irq(&phba->ras_fwlog_lock);
5079
5080	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5081	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5082
5083ras_job_error:
5084	/* make error code available to userspace */
5085	bsg_reply->result = rc;
5086
5087	/* complete the job back to userspace */
5088	if (!rc)
5089		bsg_job_done(job, bsg_reply->result,
5090			     bsg_reply->reply_payload_rcv_len);
5091	return rc;
5092}
5093
5094/**
5095 * lpfc_bsg_set_ras_config: Set FW logging parameters
5096 * @job: fc_bsg_job to handle
5097 *
5098 * Set log-level parameters for FW-logging in host memory
5099 **/
5100static int
5101lpfc_bsg_set_ras_config(struct bsg_job *job)
5102{
5103	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5104	struct lpfc_vport *vport = shost_priv(shost);
5105	struct lpfc_hba *phba = vport->phba;
5106	struct lpfc_bsg_set_ras_config_req *ras_req;
5107	struct fc_bsg_request *bsg_request = job->request;
5108	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5109	struct fc_bsg_reply *bsg_reply = job->reply;
5110	uint8_t action = 0, log_level = 0;
5111	int rc = 0, action_status = 0;
5112
5113	if (job->request_len <
5114	    sizeof(struct fc_bsg_request) +
5115	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
5116		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5117				"6182 Received RAS_LOG request "
5118				"below minimum size\n");
5119		rc = -EINVAL;
5120		goto ras_job_error;
5121	}
5122
5123	/* Check FW log status */
5124	rc = lpfc_check_fwlog_support(phba);
5125	if (rc)
5126		goto ras_job_error;
5127
5128	ras_req = (struct lpfc_bsg_set_ras_config_req *)
5129		bsg_request->rqst_data.h_vendor.vendor_cmd;
5130	action = ras_req->action;
5131	log_level = ras_req->log_level;
5132
5133	if (action == LPFC_RASACTION_STOP_LOGGING) {
5134		/* Check if already disabled */
5135		spin_lock_irq(&phba->ras_fwlog_lock);
5136		if (ras_fwlog->state != ACTIVE) {
5137			spin_unlock_irq(&phba->ras_fwlog_lock);
5138			rc = -ESRCH;
5139			goto ras_job_error;
5140		}
5141		spin_unlock_irq(&phba->ras_fwlog_lock);
5142
5143		/* Disable logging */
5144		lpfc_ras_stop_fwlog(phba);
5145	} else {
5146		/*action = LPFC_RASACTION_START_LOGGING*/
5147
5148		/* Even though FW-logging is active re-initialize
5149		 * FW-logging with new log-level. Return status
5150		 * "Logging already Running" to caller.
5151		 **/
5152		spin_lock_irq(&phba->ras_fwlog_lock);
5153		if (ras_fwlog->state != INACTIVE)
5154			action_status = -EINPROGRESS;
5155		spin_unlock_irq(&phba->ras_fwlog_lock);
5156
5157		/* Enable logging */
5158		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5159					      LPFC_RAS_ENABLE_LOGGING);
5160		if (rc) {
5161			rc = -EINVAL;
5162			goto ras_job_error;
5163		}
5164
5165		/* Check if FW-logging is re-initialized */
5166		if (action_status == -EINPROGRESS)
5167			rc = action_status;
5168	}
5169ras_job_error:
5170	/* make error code available to userspace */
5171	bsg_reply->result = rc;
5172
5173	/* complete the job back to userspace */
5174	if (!rc)
5175		bsg_job_done(job, bsg_reply->result,
5176			     bsg_reply->reply_payload_rcv_len);
5177
5178	return rc;
5179}
5180
5181/**
5182 * lpfc_bsg_get_ras_lwpd: Get log write position data
5183 * @job: fc_bsg_job to handle
5184 *
5185 * Get Offset/Wrap count of the log message written
5186 * in host memory
5187 **/
5188static int
5189lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5190{
5191	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5192	struct lpfc_vport *vport = shost_priv(shost);
5193	struct lpfc_bsg_get_ras_lwpd *ras_reply;
5194	struct lpfc_hba *phba = vport->phba;
5195	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5196	struct fc_bsg_reply *bsg_reply = job->reply;
5197	u32 *lwpd_ptr = NULL;
5198	int rc = 0;
5199
5200	rc = lpfc_check_fwlog_support(phba);
5201	if (rc)
5202		goto ras_job_error;
5203
5204	if (job->request_len <
5205	    sizeof(struct fc_bsg_request) +
5206	    sizeof(struct lpfc_bsg_ras_req)) {
5207		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5208				"6183 Received RAS_LOG request "
5209				"below minimum size\n");
5210		rc = -EINVAL;
5211		goto ras_job_error;
5212	}
5213
5214	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5215		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5216
5217	if (!ras_fwlog->lwpd.virt) {
5218		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5219				"6193 Restart FW Logging\n");
5220		rc = -EINVAL;
5221		goto ras_job_error;
5222	}
5223
5224	/* Get lwpd offset */
5225	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5226	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5227
5228	/* Get wrap count */
5229	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5230
5231ras_job_error:
5232	/* make error code available to userspace */
5233	bsg_reply->result = rc;
5234
5235	/* complete the job back to userspace */
5236	if (!rc)
5237		bsg_job_done(job, bsg_reply->result,
5238			     bsg_reply->reply_payload_rcv_len);
5239
5240	return rc;
5241}
5242
5243/**
5244 * lpfc_bsg_get_ras_fwlog: Read FW log
5245 * @job: fc_bsg_job to handle
5246 *
5247 * Copy the FW log into the passed buffer.
5248 **/
5249static int
5250lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5251{
5252	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5253	struct lpfc_vport *vport = shost_priv(shost);
5254	struct lpfc_hba *phba = vport->phba;
5255	struct fc_bsg_request *bsg_request = job->request;
5256	struct fc_bsg_reply *bsg_reply = job->reply;
5257	struct lpfc_bsg_get_fwlog_req *ras_req;
5258	u32 rd_offset, rd_index, offset;
5259	void *src, *fwlog_buff;
5260	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5261	struct lpfc_dmabuf *dmabuf, *next;
5262	int rc = 0;
5263
5264	ras_fwlog = &phba->ras_fwlog;
5265
5266	rc = lpfc_check_fwlog_support(phba);
5267	if (rc)
5268		goto ras_job_error;
5269
5270	/* Logging to be stopped before reading */
5271	spin_lock_irq(&phba->ras_fwlog_lock);
5272	if (ras_fwlog->state == ACTIVE) {
5273		spin_unlock_irq(&phba->ras_fwlog_lock);
5274		rc = -EINPROGRESS;
5275		goto ras_job_error;
5276	}
5277	spin_unlock_irq(&phba->ras_fwlog_lock);
5278
5279	if (job->request_len <
5280	    sizeof(struct fc_bsg_request) +
5281	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
5282		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5283				"6184 Received RAS_LOG request "
5284				"below minimum size\n");
5285		rc = -EINVAL;
5286		goto ras_job_error;
5287	}
5288
5289	ras_req = (struct lpfc_bsg_get_fwlog_req *)
5290		bsg_request->rqst_data.h_vendor.vendor_cmd;
5291	rd_offset = ras_req->read_offset;
5292
5293	/* Allocate memory to read fw log*/
5294	fwlog_buff = vmalloc(ras_req->read_size);
5295	if (!fwlog_buff) {
5296		rc = -ENOMEM;
5297		goto ras_job_error;
5298	}
5299
5300	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5301	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5302
5303	list_for_each_entry_safe(dmabuf, next,
5304			      &ras_fwlog->fwlog_buff_list, list) {
5305
5306		if (dmabuf->buffer_tag < rd_index)
5307			continue;
5308
5309		src = dmabuf->virt + offset;
5310		memcpy(fwlog_buff, src, ras_req->read_size);
5311		break;
5312	}
5313
5314	bsg_reply->reply_payload_rcv_len =
5315		sg_copy_from_buffer(job->reply_payload.sg_list,
5316				    job->reply_payload.sg_cnt,
5317				    fwlog_buff, ras_req->read_size);
5318
5319	vfree(fwlog_buff);
5320
5321ras_job_error:
5322	bsg_reply->result = rc;
5323	if (!rc)
5324		bsg_job_done(job, bsg_reply->result,
5325			     bsg_reply->reply_payload_rcv_len);
5326
5327	return rc;
5328}
5329
5330static int
5331lpfc_get_trunk_info(struct bsg_job *job)
5332{
5333	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5334	struct lpfc_hba *phba = vport->phba;
5335	struct fc_bsg_reply *bsg_reply = job->reply;
5336	struct lpfc_trunk_info *event_reply;
5337	int rc = 0;
5338
5339	if (job->request_len <
5340	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5341		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5342				"2744 Received GET TRUNK _INFO request below "
5343				"minimum size\n");
5344		rc = -EINVAL;
5345		goto job_error;
5346	}
5347
5348	event_reply = (struct lpfc_trunk_info *)
5349		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5350
5351	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5352		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5353				"2728 Received GET TRUNK _INFO reply below "
5354				"minimum size\n");
5355		rc = -EINVAL;
5356		goto job_error;
5357	}
5358	if (event_reply == NULL) {
5359		rc = -EINVAL;
5360		goto job_error;
5361	}
5362
5363	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5364		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5365
5366	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5367		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5368
5369	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5370		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5371
5372	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5373		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5374
5375	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5376		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5377
5378	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5379		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5380
5381	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5382		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5383
5384	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5385		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5386
5387	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5388		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5389
5390	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5391	event_reply->logical_speed =
5392				phba->sli4_hba.link_state.logical_speed / 1000;
5393job_error:
5394	bsg_reply->result = rc;
5395	if (!rc)
5396		bsg_job_done(job, bsg_reply->result,
5397			     bsg_reply->reply_payload_rcv_len);
5398	return rc;
5399
5400}
5401
5402static int
5403lpfc_get_cgnbuf_info(struct bsg_job *job)
5404{
5405	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5406	struct lpfc_hba *phba = vport->phba;
5407	struct fc_bsg_request *bsg_request = job->request;
5408	struct fc_bsg_reply *bsg_reply = job->reply;
5409	struct get_cgnbuf_info_req *cgnbuf_req;
5410	struct lpfc_cgn_info *cp;
5411	uint8_t *cgn_buff;
5412	int size, cinfosz;
5413	int  rc = 0;
5414
5415	if (job->request_len < sizeof(struct fc_bsg_request) +
5416	    sizeof(struct get_cgnbuf_info_req)) {
5417		rc = -ENOMEM;
5418		goto job_exit;
5419	}
5420
5421	if (!phba->sli4_hba.pc_sli4_params.cmf) {
5422		rc = -ENOENT;
5423		goto job_exit;
5424	}
5425
5426	if (!phba->cgn_i || !phba->cgn_i->virt) {
5427		rc = -ENOENT;
5428		goto job_exit;
5429	}
5430
5431	cp = phba->cgn_i->virt;
5432	if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5433		rc = -EPERM;
5434		goto job_exit;
5435	}
5436
5437	cgnbuf_req = (struct get_cgnbuf_info_req *)
5438		bsg_request->rqst_data.h_vendor.vendor_cmd;
5439
5440	/* For reset or size == 0 */
5441	bsg_reply->reply_payload_rcv_len = 0;
5442
5443	if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5444		lpfc_init_congestion_stat(phba);
5445		goto job_exit;
5446	}
5447
5448	/* We don't want to include the CRC at the end */
5449	cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5450
5451	size = cgnbuf_req->read_size;
5452	if (!size)
5453		goto job_exit;
5454
5455	if (size < cinfosz) {
5456		/* Just copy back what we can */
5457		cinfosz = size;
5458		rc = -E2BIG;
5459	}
5460
5461	/* Allocate memory to read congestion info */
5462	cgn_buff = vmalloc(cinfosz);
5463	if (!cgn_buff) {
5464		rc = -ENOMEM;
5465		goto job_exit;
5466	}
5467
5468	memcpy(cgn_buff, cp, cinfosz);
5469
5470	bsg_reply->reply_payload_rcv_len =
5471		sg_copy_from_buffer(job->reply_payload.sg_list,
5472				    job->reply_payload.sg_cnt,
5473				    cgn_buff, cinfosz);
5474
5475	vfree(cgn_buff);
5476
5477job_exit:
5478	bsg_reply->result = rc;
5479	if (!rc)
5480		bsg_job_done(job, bsg_reply->result,
5481			     bsg_reply->reply_payload_rcv_len);
5482	else
5483		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5484				"2724 GET CGNBUF error: %d\n", rc);
5485	return rc;
5486}
5487
5488/**
5489 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5490 * @job: fc_bsg_job to handle
5491 **/
5492static int
5493lpfc_bsg_hst_vendor(struct bsg_job *job)
5494{
5495	struct fc_bsg_request *bsg_request = job->request;
5496	struct fc_bsg_reply *bsg_reply = job->reply;
5497	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5498	int rc;
5499
5500	switch (command) {
5501	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5502		rc = lpfc_bsg_hba_set_event(job);
5503		break;
5504	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5505		rc = lpfc_bsg_hba_get_event(job);
5506		break;
5507	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5508		rc = lpfc_bsg_send_mgmt_rsp(job);
5509		break;
5510	case LPFC_BSG_VENDOR_DIAG_MODE:
5511		rc = lpfc_bsg_diag_loopback_mode(job);
5512		break;
5513	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5514		rc = lpfc_sli4_bsg_diag_mode_end(job);
5515		break;
5516	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5517		rc = lpfc_bsg_diag_loopback_run(job);
5518		break;
5519	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5520		rc = lpfc_sli4_bsg_link_diag_test(job);
5521		break;
5522	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5523		rc = lpfc_bsg_get_dfc_rev(job);
5524		break;
5525	case LPFC_BSG_VENDOR_MBOX:
5526		rc = lpfc_bsg_mbox_cmd(job);
5527		break;
5528	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5529		rc = lpfc_forced_link_speed(job);
5530		break;
5531	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5532		rc = lpfc_bsg_get_ras_lwpd(job);
5533		break;
5534	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5535		rc = lpfc_bsg_get_ras_fwlog(job);
5536		break;
5537	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5538		rc = lpfc_bsg_get_ras_config(job);
5539		break;
5540	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5541		rc = lpfc_bsg_set_ras_config(job);
5542		break;
5543	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5544		rc = lpfc_get_trunk_info(job);
5545		break;
5546	case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5547		rc = lpfc_get_cgnbuf_info(job);
5548		break;
5549	default:
5550		rc = -EINVAL;
5551		bsg_reply->reply_payload_rcv_len = 0;
5552		/* make error code available to userspace */
5553		bsg_reply->result = rc;
5554		break;
5555	}
5556
5557	return rc;
5558}
5559
5560/**
5561 * lpfc_bsg_request - handle a bsg request from the FC transport
5562 * @job: bsg_job to handle
5563 **/
5564int
5565lpfc_bsg_request(struct bsg_job *job)
5566{
5567	struct fc_bsg_request *bsg_request = job->request;
5568	struct fc_bsg_reply *bsg_reply = job->reply;
5569	uint32_t msgcode;
5570	int rc;
5571
5572	msgcode = bsg_request->msgcode;
5573	switch (msgcode) {
5574	case FC_BSG_HST_VENDOR:
5575		rc = lpfc_bsg_hst_vendor(job);
5576		break;
5577	case FC_BSG_RPT_ELS:
5578		rc = lpfc_bsg_rport_els(job);
5579		break;
5580	case FC_BSG_RPT_CT:
5581		rc = lpfc_bsg_send_mgmt_cmd(job);
5582		break;
5583	default:
5584		rc = -EINVAL;
5585		bsg_reply->reply_payload_rcv_len = 0;
5586		/* make error code available to userspace */
5587		bsg_reply->result = rc;
5588		break;
5589	}
5590
5591	return rc;
5592}
5593
5594/**
5595 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5596 * @job: bsg_job that has timed out
5597 *
5598 * This function just aborts the job's IOCB.  The aborted IOCB will return to
5599 * the waiting function which will handle passing the error back to userspace
5600 **/
5601int
5602lpfc_bsg_timeout(struct bsg_job *job)
5603{
5604	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5605	struct lpfc_hba *phba = vport->phba;
5606	struct lpfc_iocbq *cmdiocb;
5607	struct lpfc_sli_ring *pring;
5608	struct bsg_job_data *dd_data;
5609	unsigned long flags;
5610	int rc = 0;
5611	LIST_HEAD(completions);
5612	struct lpfc_iocbq *check_iocb, *next_iocb;
5613
5614	pring = lpfc_phba_elsring(phba);
5615	if (unlikely(!pring))
5616		return -EIO;
5617
5618	/* if job's driver data is NULL, the command completed or is in the
5619	 * the process of completing.  In this case, return status to request
5620	 * so the timeout is retried.  This avoids double completion issues
5621	 * and the request will be pulled off the timer queue when the
5622	 * command's completion handler executes.  Otherwise, prevent the
5623	 * command's completion handler from executing the job done callback
5624	 * and continue processing to abort the outstanding the command.
5625	 */
5626
5627	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5628	dd_data = (struct bsg_job_data *)job->dd_data;
5629	if (dd_data) {
5630		dd_data->set_job = NULL;
5631		job->dd_data = NULL;
5632	} else {
5633		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5634		return -EAGAIN;
5635	}
5636
5637	switch (dd_data->type) {
5638	case TYPE_IOCB:
5639		/* Check to see if IOCB was issued to the port or not. If not,
5640		 * remove it from the txq queue and call cancel iocbs.
5641		 * Otherwise, call abort iotag
5642		 */
5643		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5644		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5645
5646		spin_lock_irqsave(&phba->hbalock, flags);
5647		/* make sure the I/O abort window is still open */
5648		if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
5649			spin_unlock_irqrestore(&phba->hbalock, flags);
5650			return -EAGAIN;
5651		}
5652		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5653					 list) {
5654			if (check_iocb == cmdiocb) {
5655				list_move_tail(&check_iocb->list, &completions);
5656				break;
5657			}
5658		}
5659		if (list_empty(&completions))
5660			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5661		spin_unlock_irqrestore(&phba->hbalock, flags);
5662		if (!list_empty(&completions)) {
5663			lpfc_sli_cancel_iocbs(phba, &completions,
5664					      IOSTAT_LOCAL_REJECT,
5665					      IOERR_SLI_ABORTED);
5666		}
5667		break;
5668
5669	case TYPE_EVT:
5670		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5671		break;
5672
5673	case TYPE_MBOX:
5674		/* Update the ext buf ctx state if needed */
5675
5676		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5677			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5678		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5679		break;
5680	default:
5681		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5682		break;
5683	}
5684
5685	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5686	 * otherwise an error message will be displayed on the console
5687	 * so always return success (zero)
5688	 */
5689	return rc;
5690}
5691