1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * ���Broadcom��� refers to Broadcom Inc. and/or its subsidiaries.     *
6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7 * EMULEX and SLI are trademarks of Emulex.                        *
8 * www.broadcom.com                                                *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10 *                                                                 *
11 * This program is free software; you can redistribute it and/or   *
12 * modify it under the terms of version 2 of the GNU General       *
13 * Public License as published by the Free Software Foundation.    *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20 * more details, a copy of which can be found in the file COPYING  *
21 * included with this package.                                     *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/crash_dump.h>
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
51#include "lpfc_crtn.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_compat.h"
54#include "lpfc_debugfs.h"
55#include "lpfc_vport.h"
56#include "lpfc_version.h"
57
58/* There are only four IOCB completion types. */
59typedef enum _lpfc_iocb_type {
60	LPFC_UNKNOWN_IOCB,
61	LPFC_UNSOL_IOCB,
62	LPFC_SOL_IOCB,
63	LPFC_ABORT_IOCB
64} lpfc_iocb_type;
65
66
67/* Provide function prototypes local to this module. */
68static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69				  uint32_t);
70static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71			      uint8_t *, uint32_t *);
72static struct lpfc_iocbq *
73lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
74				  struct lpfc_iocbq *rspiocbq);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76				      struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78					  struct hbq_dmabuf *dmabuf);
79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82				       int);
83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84				     struct lpfc_queue *eq,
85				     struct lpfc_eqe *eqe,
86				     enum lpfc_poll_mode poll_mode);
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91				    struct lpfc_queue *cq,
92				    struct lpfc_cqe *cqe);
93static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94				 struct lpfc_iocbq *pwqeq,
95				 struct lpfc_sglq *sglq);
96
97union lpfc_wqe128 lpfc_iread_cmd_template;
98union lpfc_wqe128 lpfc_iwrite_cmd_template;
99union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101/* Setup WQE templates for IOs */
102void lpfc_wqe_cmd_template(void)
103{
104	union lpfc_wqe128 *wqe;
105
106	/* IREAD template */
107	wqe = &lpfc_iread_cmd_template;
108	memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110	/* Word 0, 1, 2 - BDE is variable */
111
112	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114	/* Word 4 - total_xfer_len is variable */
115
116	/* Word 5 - is zero */
117
118	/* Word 6 - ctxt_tag, xri_tag is variable */
119
120	/* Word 7 */
121	bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122	bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123	bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124	bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126	/* Word 8 - abort_tag is variable */
127
128	/* Word 9  - reqtag is variable */
129
130	/* Word 10 - dbde, wqes is variable */
131	bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132	bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133	bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134	bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135	bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137	/* Word 11 - pbde is variable */
138	bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139	bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140	bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142	/* Word 12 - is zero */
143
144	/* Word 13, 14, 15 - PBDE is variable */
145
146	/* IWRITE template */
147	wqe = &lpfc_iwrite_cmd_template;
148	memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150	/* Word 0, 1, 2 - BDE is variable */
151
152	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154	/* Word 4 - total_xfer_len is variable */
155
156	/* Word 5 - initial_xfer_len is variable */
157
158	/* Word 6 - ctxt_tag, xri_tag is variable */
159
160	/* Word 7 */
161	bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162	bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163	bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164	bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166	/* Word 8 - abort_tag is variable */
167
168	/* Word 9  - reqtag is variable */
169
170	/* Word 10 - dbde, wqes is variable */
171	bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172	bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173	bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174	bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175	bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177	/* Word 11 - pbde is variable */
178	bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179	bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180	bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182	/* Word 12 - is zero */
183
184	/* Word 13, 14, 15 - PBDE is variable */
185
186	/* ICMND template */
187	wqe = &lpfc_icmnd_cmd_template;
188	memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190	/* Word 0, 1, 2 - BDE is variable */
191
192	/* Word 3 - payload_offset_len is variable */
193
194	/* Word 4, 5 - is zero */
195
196	/* Word 6 - ctxt_tag, xri_tag is variable */
197
198	/* Word 7 */
199	bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200	bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202	bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204	/* Word 8 - abort_tag is variable */
205
206	/* Word 9  - reqtag is variable */
207
208	/* Word 10 - dbde, wqes is variable */
209	bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210	bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211	bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212	bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215	/* Word 11 */
216	bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217	bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218	bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220	/* Word 12, 13, 14, 15 - is zero */
221}
222
223#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224/**
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 *       Must be a multiple of sizeof(uint64_t)
230 *
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
235 * lock.
236 **/
237static void
238lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239{
240	uint64_t *src = srcp;
241	uint64_t *dest = destp;
242	int i;
243
244	for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245		*dest++ = *src++;
246}
247#else
248#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249#endif
250
251/**
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
255 *
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
260 * -ENOMEM.
261 * The caller is expected to hold the hbalock when calling this routine.
262 **/
263static int
264lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265{
266	union lpfc_wqe *temp_wqe;
267	struct lpfc_register doorbell;
268	uint32_t host_index;
269	uint32_t idx;
270	uint32_t i = 0;
271	uint8_t *tmp;
272	u32 if_type;
273
274	/* sanity check on queue memory */
275	if (unlikely(!q))
276		return -ENOMEM;
277
278	temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280	/* If the host has not yet processed the next entry then we are done */
281	idx = ((q->host_index + 1) % q->entry_count);
282	if (idx == q->hba_index) {
283		q->WQ_overflow++;
284		return -EBUSY;
285	}
286	q->WQ_posted++;
287	/* set consumption flag every once in a while */
288	if (!((q->host_index + 1) % q->notify_interval))
289		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290	else
291		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294	lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295	if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296		/* write to DPP aperture taking advatage of Combined Writes */
297		tmp = (uint8_t *)temp_wqe;
298#ifdef __raw_writeq
299		for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300			__raw_writeq(*((uint64_t *)(tmp + i)),
301					q->dpp_regaddr + i);
302#else
303		for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304			__raw_writel(*((uint32_t *)(tmp + i)),
305					q->dpp_regaddr + i);
306#endif
307	}
308	/* ensure WQE bcopy and DPP flushed before doorbell write */
309	wmb();
310
311	/* Update the host index before invoking device */
312	host_index = q->host_index;
313
314	q->host_index = idx;
315
316	/* Ring Doorbell */
317	doorbell.word0 = 0;
318	if (q->db_format == LPFC_DB_LIST_FORMAT) {
319		if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320			bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321			bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322			bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323			    q->dpp_id);
324			bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325			    q->queue_id);
326		} else {
327			bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328			bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330			/* Leave bits <23:16> clear for if_type 6 dpp */
331			if_type = bf_get(lpfc_sli_intf_if_type,
332					 &q->phba->sli4_hba.sli_intf);
333			if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334				bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335				       host_index);
336		}
337	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
338		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340	} else {
341		return -EINVAL;
342	}
343	writel(doorbell.word0, q->db_regaddr);
344
345	return 0;
346}
347
348/**
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
352 *
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
356 * pointers.
357 **/
358static void
359lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360{
361	/* sanity check on queue memory */
362	if (unlikely(!q))
363		return;
364
365	q->hba_index = index;
366}
367
368/**
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
372 *
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
377 * -ENOMEM.
378 * The caller is expected to hold the hbalock when calling this routine.
379 **/
380static uint32_t
381lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382{
383	struct lpfc_mqe *temp_mqe;
384	struct lpfc_register doorbell;
385
386	/* sanity check on queue memory */
387	if (unlikely(!q))
388		return -ENOMEM;
389	temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391	/* If the host has not yet processed the next entry then we are done */
392	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393		return -ENOMEM;
394	lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395	/* Save off the mailbox pointer for completion */
396	q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398	/* Update the host index before invoking device */
399	q->host_index = ((q->host_index + 1) % q->entry_count);
400
401	/* Ring Doorbell */
402	doorbell.word0 = 0;
403	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406	return 0;
407}
408
409/**
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
412 *
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
417 * the HBA.
418 **/
419static uint32_t
420lpfc_sli4_mq_release(struct lpfc_queue *q)
421{
422	/* sanity check on queue memory */
423	if (unlikely(!q))
424		return 0;
425
426	/* Clear the mailbox pointer for completion */
427	q->phba->mbox = NULL;
428	q->hba_index = ((q->hba_index + 1) % q->entry_count);
429	return 1;
430}
431
432/**
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
435 *
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
440 **/
441static struct lpfc_eqe *
442lpfc_sli4_eq_get(struct lpfc_queue *q)
443{
444	struct lpfc_eqe *eqe;
445
446	/* sanity check on queue memory */
447	if (unlikely(!q))
448		return NULL;
449	eqe = lpfc_sli4_qe(q, q->host_index);
450
451	/* If the next EQE is not valid then we are done */
452	if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453		return NULL;
454
455	/*
456	 * insert barrier for instruction interlock : data from the hardware
457	 * must have the valid bit checked before it can be copied and acted
458	 * upon. Speculative instructions were allowing a bcopy at the start
459	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460	 * after our return, to copy data before the valid bit check above
461	 * was done. As such, some of the copied data was stale. The barrier
462	 * ensures the check is before any data is copied.
463	 */
464	mb();
465	return eqe;
466}
467
468/**
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
471 *
472 **/
473void
474lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475{
476	struct lpfc_register doorbell;
477
478	doorbell.word0 = 0;
479	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485}
486
487/**
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
490 *
491 **/
492void
493lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494{
495	struct lpfc_register doorbell;
496
497	doorbell.word0 = 0;
498	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500}
501
502/**
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
508 *
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
512 **/
513void
514lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515		     uint32_t count, bool arm)
516{
517	struct lpfc_register doorbell;
518
519	/* sanity check on queue memory */
520	if (unlikely(!q || (count == 0 && !arm)))
521		return;
522
523	/* ring doorbell for number popped */
524	doorbell.word0 = 0;
525	if (arm) {
526		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528	}
529	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
536	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537		readl(q->phba->sli4_hba.EQDBregaddr);
538}
539
540/**
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
546 *
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
550 **/
551void
552lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553			  uint32_t count, bool arm)
554{
555	struct lpfc_register doorbell;
556
557	/* sanity check on queue memory */
558	if (unlikely(!q || (count == 0 && !arm)))
559		return;
560
561	/* ring doorbell for number popped */
562	doorbell.word0 = 0;
563	if (arm)
564		bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
569	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570		readl(q->phba->sli4_hba.EQDBregaddr);
571}
572
573static void
574__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575			struct lpfc_eqe *eqe)
576{
577	if (!phba->sli4_hba.pc_sli4_params.eqav)
578		bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580	eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582	/* if the index wrapped around, toggle the valid bit */
583	if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584		eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585}
586
587static void
588lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589{
590	struct lpfc_eqe *eqe = NULL;
591	u32 eq_count = 0, cq_count = 0;
592	struct lpfc_cqe *cqe = NULL;
593	struct lpfc_queue *cq = NULL, *childq = NULL;
594	int cqid = 0;
595
596	/* walk all the EQ entries and drop on the floor */
597	eqe = lpfc_sli4_eq_get(eq);
598	while (eqe) {
599		/* Get the reference to the corresponding CQ */
600		cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601		cq = NULL;
602
603		list_for_each_entry(childq, &eq->child_list, list) {
604			if (childq->queue_id == cqid) {
605				cq = childq;
606				break;
607			}
608		}
609		/* If CQ is valid, iterate through it and drop all the CQEs */
610		if (cq) {
611			cqe = lpfc_sli4_cq_get(cq);
612			while (cqe) {
613				__lpfc_sli4_consume_cqe(phba, cq, cqe);
614				cq_count++;
615				cqe = lpfc_sli4_cq_get(cq);
616			}
617			/* Clear and re-arm the CQ */
618			phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619			    LPFC_QUEUE_REARM);
620			cq_count = 0;
621		}
622		__lpfc_sli4_consume_eqe(phba, eq, eqe);
623		eq_count++;
624		eqe = lpfc_sli4_eq_get(eq);
625	}
626
627	/* Clear and re-arm the EQ */
628	phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629}
630
631static int
632lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633		     u8 rearm, enum lpfc_poll_mode poll_mode)
634{
635	struct lpfc_eqe *eqe;
636	int count = 0, consumed = 0;
637
638	if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639		goto rearm_and_exit;
640
641	eqe = lpfc_sli4_eq_get(eq);
642	while (eqe) {
643		lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
644		__lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646		consumed++;
647		if (!(++count % eq->max_proc_limit))
648			break;
649
650		if (!(count % eq->notify_interval)) {
651			phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652							LPFC_QUEUE_NOARM);
653			consumed = 0;
654		}
655
656		eqe = lpfc_sli4_eq_get(eq);
657	}
658	eq->EQ_processed += count;
659
660	/* Track the max number of EQEs processed in 1 intr */
661	if (count > eq->EQ_max_eqe)
662		eq->EQ_max_eqe = count;
663
664	xchg(&eq->queue_claimed, 0);
665
666rearm_and_exit:
667	/* Always clear the EQ. */
668	phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670	return count;
671}
672
673/**
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
676 *
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
681 **/
682static struct lpfc_cqe *
683lpfc_sli4_cq_get(struct lpfc_queue *q)
684{
685	struct lpfc_cqe *cqe;
686
687	/* sanity check on queue memory */
688	if (unlikely(!q))
689		return NULL;
690	cqe = lpfc_sli4_qe(q, q->host_index);
691
692	/* If the next CQE is not valid then we are done */
693	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694		return NULL;
695
696	/*
697	 * insert barrier for instruction interlock : data from the hardware
698	 * must have the valid bit checked before it can be copied and acted
699	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700	 * instructions allowing action on content before valid bit checked,
701	 * add barrier here as well. May not be needed as "content" is a
702	 * single 32-bit entity here (vs multi word structure for cq's).
703	 */
704	mb();
705	return cqe;
706}
707
708static void
709__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710			struct lpfc_cqe *cqe)
711{
712	if (!phba->sli4_hba.pc_sli4_params.cqav)
713		bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715	cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717	/* if the index wrapped around, toggle the valid bit */
718	if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719		cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720}
721
722/**
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
728 *
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
732 **/
733void
734lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735		     uint32_t count, bool arm)
736{
737	struct lpfc_register doorbell;
738
739	/* sanity check on queue memory */
740	if (unlikely(!q || (count == 0 && !arm)))
741		return;
742
743	/* ring doorbell for number popped */
744	doorbell.word0 = 0;
745	if (arm)
746		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753}
754
755/**
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
761 *
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
765 **/
766void
767lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768			 uint32_t count, bool arm)
769{
770	struct lpfc_register doorbell;
771
772	/* sanity check on queue memory */
773	if (unlikely(!q || (count == 0 && !arm)))
774		return;
775
776	/* ring doorbell for number popped */
777	doorbell.word0 = 0;
778	if (arm)
779		bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781	bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783}
784
785/*
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787 *
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
794 **/
795int
796lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798{
799	struct lpfc_rqe *temp_hrqe;
800	struct lpfc_rqe *temp_drqe;
801	struct lpfc_register doorbell;
802	int hq_put_index;
803	int dq_put_index;
804
805	/* sanity check on queue memory */
806	if (unlikely(!hq) || unlikely(!dq))
807		return -ENOMEM;
808	hq_put_index = hq->host_index;
809	dq_put_index = dq->host_index;
810	temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811	temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814		return -EINVAL;
815	if (hq_put_index != dq_put_index)
816		return -EINVAL;
817	/* If the host has not yet processed the next entry then we are done */
818	if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819		return -EBUSY;
820	lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821	lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823	/* Update the host index to point to the next slot */
824	hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825	dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826	hq->RQ_buf_posted++;
827
828	/* Ring The Header Receive Queue Doorbell */
829	if (!(hq->host_index % hq->notify_interval)) {
830		doorbell.word0 = 0;
831		if (hq->db_format == LPFC_DB_RING_FORMAT) {
832			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833			       hq->notify_interval);
834			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837			       hq->notify_interval);
838			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839			       hq->host_index);
840			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841		} else {
842			return -EINVAL;
843		}
844		writel(doorbell.word0, hq->db_regaddr);
845	}
846	return hq_put_index;
847}
848
849/*
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
851 *
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
857 **/
858static uint32_t
859lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860{
861	/* sanity check on queue memory */
862	if (unlikely(!hq) || unlikely(!dq))
863		return 0;
864
865	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866		return 0;
867	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869	return 1;
870}
871
872/**
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
876 *
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
881 **/
882static inline IOCB_t *
883lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884{
885	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887}
888
889/**
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
893 *
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
898 **/
899static inline IOCB_t *
900lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901{
902	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904}
905
906/**
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
909 *
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
914 **/
915struct lpfc_iocbq *
916__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917{
918	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919	struct lpfc_iocbq * iocbq = NULL;
920
921	lockdep_assert_held(&phba->hbalock);
922
923	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924	if (iocbq)
925		phba->iocb_cnt++;
926	if (phba->iocb_cnt > phba->iocb_max)
927		phba->iocb_max = phba->iocb_cnt;
928	return iocbq;
929}
930
931/**
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
935 *
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
940 *
941 * Returns sglq ponter = success, NULL = Failure.
942 **/
943struct lpfc_sglq *
944__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945{
946	struct lpfc_sglq *sglq;
947
948	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950	return sglq;
951}
952
953/**
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
957 *
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
962 *
963 * Returns sglq ponter = success, NULL = Failure.
964 **/
965struct lpfc_sglq *
966__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967{
968	struct lpfc_sglq *sglq;
969
970	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
971	return sglq;
972}
973
974/**
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
979 *
980 **/
981void
982lpfc_clr_rrq_active(struct lpfc_hba *phba,
983		    uint16_t xritag,
984		    struct lpfc_node_rrq *rrq)
985{
986	struct lpfc_nodelist *ndlp = NULL;
987
988	/* Lookup did to verify if did is still active on this vport */
989	if (rrq->vport)
990		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992	if (!ndlp)
993		goto out;
994
995	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996		rrq->send_rrq = 0;
997		rrq->xritag = 0;
998		rrq->rrq_stop_time = 0;
999	}
1000out:
1001	mempool_free(rrq, phba->rrq_pool);
1002}
1003
1004/**
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1007 *
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1016 *
1017 **/
1018void
1019lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020{
1021	struct lpfc_node_rrq *rrq;
1022	struct lpfc_node_rrq *nextrrq;
1023	unsigned long next_time;
1024	unsigned long iflags;
1025	LIST_HEAD(send_rrq);
1026
1027	spin_lock_irqsave(&phba->hbalock, iflags);
1028	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030	list_for_each_entry_safe(rrq, nextrrq,
1031				 &phba->active_rrq_list, list) {
1032		if (time_after(jiffies, rrq->rrq_stop_time))
1033			list_move(&rrq->list, &send_rrq);
1034		else if (time_before(rrq->rrq_stop_time, next_time))
1035			next_time = rrq->rrq_stop_time;
1036	}
1037	spin_unlock_irqrestore(&phba->hbalock, iflags);
1038	if ((!list_empty(&phba->active_rrq_list)) &&
1039	    (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
1040		mod_timer(&phba->rrq_tmr, next_time);
1041	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042		list_del(&rrq->list);
1043		if (!rrq->send_rrq) {
1044			/* this call will free the rrq */
1045			lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046		} else if (lpfc_send_rrq(phba, rrq)) {
1047			/* if we send the rrq then the completion handler
1048			*  will clear the bit in the xribitmap.
1049			*/
1050			lpfc_clr_rrq_active(phba, rrq->xritag,
1051					    rrq);
1052		}
1053	}
1054}
1055
1056/**
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1061 *
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 *         rrq = rrq for this xri and target.
1064 **/
1065struct lpfc_node_rrq *
1066lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067{
1068	struct lpfc_hba *phba = vport->phba;
1069	struct lpfc_node_rrq *rrq;
1070	struct lpfc_node_rrq *nextrrq;
1071	unsigned long iflags;
1072
1073	if (phba->sli_rev != LPFC_SLI_REV4)
1074		return NULL;
1075	spin_lock_irqsave(&phba->hbalock, iflags);
1076	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077		if (rrq->vport == vport && rrq->xritag == xri &&
1078				rrq->nlp_DID == did){
1079			list_del(&rrq->list);
1080			spin_unlock_irqrestore(&phba->hbalock, iflags);
1081			return rrq;
1082		}
1083	}
1084	spin_unlock_irqrestore(&phba->hbalock, iflags);
1085	return NULL;
1086}
1087
1088/**
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095 **/
1096void
1097lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099{
1100	struct lpfc_hba *phba = vport->phba;
1101	struct lpfc_node_rrq *rrq;
1102	struct lpfc_node_rrq *nextrrq;
1103	unsigned long iflags;
1104	LIST_HEAD(rrq_list);
1105
1106	if (phba->sli_rev != LPFC_SLI_REV4)
1107		return;
1108	if (!ndlp) {
1109		lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111	}
1112	spin_lock_irqsave(&phba->hbalock, iflags);
1113	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114		if (rrq->vport != vport)
1115			continue;
1116
1117		if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118			list_move(&rrq->list, &rrq_list);
1119
1120	}
1121	spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124		list_del(&rrq->list);
1125		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126	}
1127}
1128
1129/**
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1134 *
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1138 **/
1139int
1140lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141			uint16_t  xritag)
1142{
1143	if (!ndlp)
1144		return 0;
1145	if (!ndlp->active_rrqs_xri_bitmap)
1146		return 0;
1147	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148		return 1;
1149	else
1150		return 0;
1151}
1152
1153/**
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160 *
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1164 *
1165 * returns 0 rrq actived for this xri
1166 *         < 0 No memory or invalid ndlp.
1167 **/
1168int
1169lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171{
1172	unsigned long iflags;
1173	struct lpfc_node_rrq *rrq;
1174	int empty;
1175
1176	if (!ndlp)
1177		return -EINVAL;
1178
1179	if (!phba->cfg_enable_rrq)
1180		return -EINVAL;
1181
1182	spin_lock_irqsave(&phba->hbalock, iflags);
1183	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1184		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185		goto out;
1186	}
1187
1188	if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
1189		goto out;
1190
1191	if (!ndlp->active_rrqs_xri_bitmap)
1192		goto out;
1193
1194	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195		goto out;
1196
1197	spin_unlock_irqrestore(&phba->hbalock, iflags);
1198	rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199	if (!rrq) {
1200		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202				" DID:0x%x Send:%d\n",
1203				xritag, rxid, ndlp->nlp_DID, send_rrq);
1204		return -EINVAL;
1205	}
1206	if (phba->cfg_enable_rrq == 1)
1207		rrq->send_rrq = send_rrq;
1208	else
1209		rrq->send_rrq = 0;
1210	rrq->xritag = xritag;
1211	rrq->rrq_stop_time = jiffies +
1212				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213	rrq->nlp_DID = ndlp->nlp_DID;
1214	rrq->vport = ndlp->vport;
1215	rrq->rxid = rxid;
1216	spin_lock_irqsave(&phba->hbalock, iflags);
1217	empty = list_empty(&phba->active_rrq_list);
1218	list_add_tail(&rrq->list, &phba->active_rrq_list);
1219	phba->hba_flag |= HBA_RRQ_ACTIVE;
1220	spin_unlock_irqrestore(&phba->hbalock, iflags);
1221	if (empty)
1222		lpfc_worker_wake_up(phba);
1223	return 0;
1224out:
1225	spin_unlock_irqrestore(&phba->hbalock, iflags);
1226	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228			" DID:0x%x Send:%d\n",
1229			xritag, rxid, ndlp->nlp_DID, send_rrq);
1230	return -EINVAL;
1231}
1232
1233/**
1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235 * @phba: Pointer to HBA context object.
1236 * @piocbq: Pointer to the iocbq.
1237 *
1238 * The driver calls this function with either the nvme ls ring lock
1239 * or the fc els ring lock held depending on the iocb usage.  This function
1240 * gets a new driver sglq object from the sglq list. If the list is not empty
1241 * then it is successful, it returns pointer to the newly allocated sglq
1242 * object else it returns NULL.
1243 **/
1244static struct lpfc_sglq *
1245__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246{
1247	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248	struct lpfc_sglq *sglq = NULL;
1249	struct lpfc_sglq *start_sglq = NULL;
1250	struct lpfc_io_buf *lpfc_cmd;
1251	struct lpfc_nodelist *ndlp;
1252	int found = 0;
1253	u8 cmnd;
1254
1255	cmnd = get_job_cmnd(phba, piocbq);
1256
1257	if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258		lpfc_cmd = piocbq->io_buf;
1259		ndlp = lpfc_cmd->rdata->pnode;
1260	} else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261			!(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262		ndlp = piocbq->ndlp;
1263	} else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264		if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265			ndlp = NULL;
1266		else
1267			ndlp = piocbq->ndlp;
1268	} else {
1269		ndlp = piocbq->ndlp;
1270	}
1271
1272	spin_lock(&phba->sli4_hba.sgl_list_lock);
1273	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274	start_sglq = sglq;
1275	while (!found) {
1276		if (!sglq)
1277			break;
1278		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279		    test_bit(sglq->sli4_lxritag,
1280		    ndlp->active_rrqs_xri_bitmap)) {
1281			/* This xri has an rrq outstanding for this DID.
1282			 * put it back in the list and get another xri.
1283			 */
1284			list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285			sglq = NULL;
1286			list_remove_head(lpfc_els_sgl_list, sglq,
1287						struct lpfc_sglq, list);
1288			if (sglq == start_sglq) {
1289				list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290				sglq = NULL;
1291				break;
1292			} else
1293				continue;
1294		}
1295		sglq->ndlp = ndlp;
1296		found = 1;
1297		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298		sglq->state = SGL_ALLOCATED;
1299	}
1300	spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301	return sglq;
1302}
1303
1304/**
1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306 * @phba: Pointer to HBA context object.
1307 * @piocbq: Pointer to the iocbq.
1308 *
1309 * This function is called with the sgl_list lock held. This function
1310 * gets a new driver sglq object from the sglq list. If the
1311 * list is not empty then it is successful, it returns pointer to the newly
1312 * allocated sglq object else it returns NULL.
1313 **/
1314struct lpfc_sglq *
1315__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316{
1317	struct list_head *lpfc_nvmet_sgl_list;
1318	struct lpfc_sglq *sglq = NULL;
1319
1320	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325	if (!sglq)
1326		return NULL;
1327	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328	sglq->state = SGL_ALLOCATED;
1329	return sglq;
1330}
1331
1332/**
1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334 * @phba: Pointer to HBA context object.
1335 *
1336 * This function is called with no lock held. This function
1337 * allocates a new driver iocb object from the iocb pool. If the
1338 * allocation is successful, it returns pointer to the newly
1339 * allocated iocb object else it returns NULL.
1340 **/
1341struct lpfc_iocbq *
1342lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343{
1344	struct lpfc_iocbq * iocbq = NULL;
1345	unsigned long iflags;
1346
1347	spin_lock_irqsave(&phba->hbalock, iflags);
1348	iocbq = __lpfc_sli_get_iocbq(phba);
1349	spin_unlock_irqrestore(&phba->hbalock, iflags);
1350	return iocbq;
1351}
1352
1353/**
1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1357 *
1358 * This function is called to release the driver iocb object
1359 * to the iocb pool. The iotag in the iocb object
1360 * does not change for each use of the iocb object. This function
1361 * clears all other fields of the iocb object when it is freed.
1362 * The sqlq structure that holds the xritag and phys and virtual
1363 * mappings for the scatter gather list is retrieved from the
1364 * active array of sglq. The get of the sglq pointer also clears
1365 * the entry in the array. If the status of the IO indiactes that
1366 * this IO was aborted then the sglq entry it put on the
1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368 * IO has good status or fails for any other reason then the sglq
1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370 *  asserted held in the code path calling this routine.
1371 **/
1372static void
1373__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374{
1375	struct lpfc_sglq *sglq;
1376	unsigned long iflag = 0;
1377	struct lpfc_sli_ring *pring;
1378
1379	if (iocbq->sli4_xritag == NO_XRI)
1380		sglq = NULL;
1381	else
1382		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1383
1384
1385	if (sglq)  {
1386		if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1387			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1388					  iflag);
1389			sglq->state = SGL_FREED;
1390			sglq->ndlp = NULL;
1391			list_add_tail(&sglq->list,
1392				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1393			spin_unlock_irqrestore(
1394				&phba->sli4_hba.sgl_list_lock, iflag);
1395			goto out;
1396		}
1397
1398		if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1399		    (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1400		    sglq->state != SGL_XRI_ABORTED) {
1401			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1402					  iflag);
1403
1404			/* Check if we can get a reference on ndlp */
1405			if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1406				sglq->ndlp = NULL;
1407
1408			list_add(&sglq->list,
1409				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1410			spin_unlock_irqrestore(
1411				&phba->sli4_hba.sgl_list_lock, iflag);
1412		} else {
1413			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1414					  iflag);
1415			sglq->state = SGL_FREED;
1416			sglq->ndlp = NULL;
1417			list_add_tail(&sglq->list,
1418				      &phba->sli4_hba.lpfc_els_sgl_list);
1419			spin_unlock_irqrestore(
1420				&phba->sli4_hba.sgl_list_lock, iflag);
1421			pring = lpfc_phba_elsring(phba);
1422			/* Check if TXQ queue needs to be serviced */
1423			if (pring && (!list_empty(&pring->txq)))
1424				lpfc_worker_wake_up(phba);
1425		}
1426	}
1427
1428out:
1429	/*
1430	 * Clean all volatile data fields, preserve iotag and node struct.
1431	 */
1432	memset_startat(iocbq, 0, wqe);
1433	iocbq->sli4_lxritag = NO_XRI;
1434	iocbq->sli4_xritag = NO_XRI;
1435	iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1436			      LPFC_IO_NVME_LS);
1437	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1438}
1439
1440
1441/**
1442 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1443 * @phba: Pointer to HBA context object.
1444 * @iocbq: Pointer to driver iocb object.
1445 *
1446 * This function is called to release the driver iocb object to the
1447 * iocb pool. The iotag in the iocb object does not change for each
1448 * use of the iocb object. This function clears all other fields of
1449 * the iocb object when it is freed. The hbalock is asserted held in
1450 * the code path calling this routine.
1451 **/
1452static void
1453__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1454{
1455
1456	/*
1457	 * Clean all volatile data fields, preserve iotag and node struct.
1458	 */
1459	memset_startat(iocbq, 0, iocb);
1460	iocbq->sli4_xritag = NO_XRI;
1461	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1462}
1463
1464/**
1465 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1466 * @phba: Pointer to HBA context object.
1467 * @iocbq: Pointer to driver iocb object.
1468 *
1469 * This function is called with hbalock held to release driver
1470 * iocb object to the iocb pool. The iotag in the iocb object
1471 * does not change for each use of the iocb object. This function
1472 * clears all other fields of the iocb object when it is freed.
1473 **/
1474static void
1475__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1476{
1477	lockdep_assert_held(&phba->hbalock);
1478
1479	phba->__lpfc_sli_release_iocbq(phba, iocbq);
1480	phba->iocb_cnt--;
1481}
1482
1483/**
1484 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1485 * @phba: Pointer to HBA context object.
1486 * @iocbq: Pointer to driver iocb object.
1487 *
1488 * This function is called with no lock held to release the iocb to
1489 * iocb pool.
1490 **/
1491void
1492lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1493{
1494	unsigned long iflags;
1495
1496	/*
1497	 * Clean all volatile data fields, preserve iotag and node struct.
1498	 */
1499	spin_lock_irqsave(&phba->hbalock, iflags);
1500	__lpfc_sli_release_iocbq(phba, iocbq);
1501	spin_unlock_irqrestore(&phba->hbalock, iflags);
1502}
1503
1504/**
1505 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1506 * @phba: Pointer to HBA context object.
1507 * @iocblist: List of IOCBs.
1508 * @ulpstatus: ULP status in IOCB command field.
1509 * @ulpWord4: ULP word-4 in IOCB command field.
1510 *
1511 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1512 * on the list by invoking the complete callback function associated with the
1513 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1514 * fields.
1515 **/
1516void
1517lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1518		      uint32_t ulpstatus, uint32_t ulpWord4)
1519{
1520	struct lpfc_iocbq *piocb;
1521
1522	while (!list_empty(iocblist)) {
1523		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1524		if (piocb->cmd_cmpl) {
1525			if (piocb->cmd_flag & LPFC_IO_NVME) {
1526				lpfc_nvme_cancel_iocb(phba, piocb,
1527						      ulpstatus, ulpWord4);
1528			} else {
1529				if (phba->sli_rev == LPFC_SLI_REV4) {
1530					bf_set(lpfc_wcqe_c_status,
1531					       &piocb->wcqe_cmpl, ulpstatus);
1532					piocb->wcqe_cmpl.parameter = ulpWord4;
1533				} else {
1534					piocb->iocb.ulpStatus = ulpstatus;
1535					piocb->iocb.un.ulpWord[4] = ulpWord4;
1536				}
1537				(piocb->cmd_cmpl) (phba, piocb, piocb);
1538			}
1539		} else {
1540			lpfc_sli_release_iocbq(phba, piocb);
1541		}
1542	}
1543	return;
1544}
1545
1546/**
1547 * lpfc_sli_iocb_cmd_type - Get the iocb type
1548 * @iocb_cmnd: iocb command code.
1549 *
1550 * This function is called by ring event handler function to get the iocb type.
1551 * This function translates the iocb command to an iocb command type used to
1552 * decide the final disposition of each completed IOCB.
1553 * The function returns
1554 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1555 * LPFC_SOL_IOCB     if it is a solicited iocb completion
1556 * LPFC_ABORT_IOCB   if it is an abort iocb
1557 * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1558 *
1559 * The caller is not required to hold any lock.
1560 **/
1561static lpfc_iocb_type
1562lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1563{
1564	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1565
1566	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1567		return 0;
1568
1569	switch (iocb_cmnd) {
1570	case CMD_XMIT_SEQUENCE_CR:
1571	case CMD_XMIT_SEQUENCE_CX:
1572	case CMD_XMIT_BCAST_CN:
1573	case CMD_XMIT_BCAST_CX:
1574	case CMD_ELS_REQUEST_CR:
1575	case CMD_ELS_REQUEST_CX:
1576	case CMD_CREATE_XRI_CR:
1577	case CMD_CREATE_XRI_CX:
1578	case CMD_GET_RPI_CN:
1579	case CMD_XMIT_ELS_RSP_CX:
1580	case CMD_GET_RPI_CR:
1581	case CMD_FCP_IWRITE_CR:
1582	case CMD_FCP_IWRITE_CX:
1583	case CMD_FCP_IREAD_CR:
1584	case CMD_FCP_IREAD_CX:
1585	case CMD_FCP_ICMND_CR:
1586	case CMD_FCP_ICMND_CX:
1587	case CMD_FCP_TSEND_CX:
1588	case CMD_FCP_TRSP_CX:
1589	case CMD_FCP_TRECEIVE_CX:
1590	case CMD_FCP_AUTO_TRSP_CX:
1591	case CMD_ADAPTER_MSG:
1592	case CMD_ADAPTER_DUMP:
1593	case CMD_XMIT_SEQUENCE64_CR:
1594	case CMD_XMIT_SEQUENCE64_CX:
1595	case CMD_XMIT_BCAST64_CN:
1596	case CMD_XMIT_BCAST64_CX:
1597	case CMD_ELS_REQUEST64_CR:
1598	case CMD_ELS_REQUEST64_CX:
1599	case CMD_FCP_IWRITE64_CR:
1600	case CMD_FCP_IWRITE64_CX:
1601	case CMD_FCP_IREAD64_CR:
1602	case CMD_FCP_IREAD64_CX:
1603	case CMD_FCP_ICMND64_CR:
1604	case CMD_FCP_ICMND64_CX:
1605	case CMD_FCP_TSEND64_CX:
1606	case CMD_FCP_TRSP64_CX:
1607	case CMD_FCP_TRECEIVE64_CX:
1608	case CMD_GEN_REQUEST64_CR:
1609	case CMD_GEN_REQUEST64_CX:
1610	case CMD_XMIT_ELS_RSP64_CX:
1611	case DSSCMD_IWRITE64_CR:
1612	case DSSCMD_IWRITE64_CX:
1613	case DSSCMD_IREAD64_CR:
1614	case DSSCMD_IREAD64_CX:
1615	case CMD_SEND_FRAME:
1616		type = LPFC_SOL_IOCB;
1617		break;
1618	case CMD_ABORT_XRI_CN:
1619	case CMD_ABORT_XRI_CX:
1620	case CMD_CLOSE_XRI_CN:
1621	case CMD_CLOSE_XRI_CX:
1622	case CMD_XRI_ABORTED_CX:
1623	case CMD_ABORT_MXRI64_CN:
1624	case CMD_XMIT_BLS_RSP64_CX:
1625		type = LPFC_ABORT_IOCB;
1626		break;
1627	case CMD_RCV_SEQUENCE_CX:
1628	case CMD_RCV_ELS_REQ_CX:
1629	case CMD_RCV_SEQUENCE64_CX:
1630	case CMD_RCV_ELS_REQ64_CX:
1631	case CMD_ASYNC_STATUS:
1632	case CMD_IOCB_RCV_SEQ64_CX:
1633	case CMD_IOCB_RCV_ELS64_CX:
1634	case CMD_IOCB_RCV_CONT64_CX:
1635	case CMD_IOCB_RET_XRI64_CX:
1636		type = LPFC_UNSOL_IOCB;
1637		break;
1638	case CMD_IOCB_XMIT_MSEQ64_CR:
1639	case CMD_IOCB_XMIT_MSEQ64_CX:
1640	case CMD_IOCB_RCV_SEQ_LIST64_CX:
1641	case CMD_IOCB_RCV_ELS_LIST64_CX:
1642	case CMD_IOCB_CLOSE_EXTENDED_CN:
1643	case CMD_IOCB_ABORT_EXTENDED_CN:
1644	case CMD_IOCB_RET_HBQE64_CN:
1645	case CMD_IOCB_FCP_IBIDIR64_CR:
1646	case CMD_IOCB_FCP_IBIDIR64_CX:
1647	case CMD_IOCB_FCP_ITASKMGT64_CX:
1648	case CMD_IOCB_LOGENTRY_CN:
1649	case CMD_IOCB_LOGENTRY_ASYNC_CN:
1650		printk("%s - Unhandled SLI-3 Command x%x\n",
1651				__func__, iocb_cmnd);
1652		type = LPFC_UNKNOWN_IOCB;
1653		break;
1654	default:
1655		type = LPFC_UNKNOWN_IOCB;
1656		break;
1657	}
1658
1659	return type;
1660}
1661
1662/**
1663 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1664 * @phba: Pointer to HBA context object.
1665 *
1666 * This function is called from SLI initialization code
1667 * to configure every ring of the HBA's SLI interface. The
1668 * caller is not required to hold any lock. This function issues
1669 * a config_ring mailbox command for each ring.
1670 * This function returns zero if successful else returns a negative
1671 * error code.
1672 **/
1673static int
1674lpfc_sli_ring_map(struct lpfc_hba *phba)
1675{
1676	struct lpfc_sli *psli = &phba->sli;
1677	LPFC_MBOXQ_t *pmb;
1678	MAILBOX_t *pmbox;
1679	int i, rc, ret = 0;
1680
1681	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1682	if (!pmb)
1683		return -ENOMEM;
1684	pmbox = &pmb->u.mb;
1685	phba->link_state = LPFC_INIT_MBX_CMDS;
1686	for (i = 0; i < psli->num_rings; i++) {
1687		lpfc_config_ring(phba, i, pmb);
1688		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1689		if (rc != MBX_SUCCESS) {
1690			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1691					"0446 Adapter failed to init (%d), "
1692					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
1693					"ring %d\n",
1694					rc, pmbox->mbxCommand,
1695					pmbox->mbxStatus, i);
1696			phba->link_state = LPFC_HBA_ERROR;
1697			ret = -ENXIO;
1698			break;
1699		}
1700	}
1701	mempool_free(pmb, phba->mbox_mem_pool);
1702	return ret;
1703}
1704
1705/**
1706 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1707 * @phba: Pointer to HBA context object.
1708 * @pring: Pointer to driver SLI ring object.
1709 * @piocb: Pointer to the driver iocb object.
1710 *
1711 * The driver calls this function with the hbalock held for SLI3 ports or
1712 * the ring lock held for SLI4 ports. The function adds the
1713 * new iocb to txcmplq of the given ring. This function always returns
1714 * 0. If this function is called for ELS ring, this function checks if
1715 * there is a vport associated with the ELS command. This function also
1716 * starts els_tmofunc timer if this is an ELS command.
1717 **/
1718static int
1719lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1720			struct lpfc_iocbq *piocb)
1721{
1722	u32 ulp_command = 0;
1723
1724	BUG_ON(!piocb);
1725	ulp_command = get_job_cmnd(phba, piocb);
1726
1727	list_add_tail(&piocb->list, &pring->txcmplq);
1728	piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1729	pring->txcmplq_cnt++;
1730	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1731	   (ulp_command != CMD_ABORT_XRI_WQE) &&
1732	   (ulp_command != CMD_ABORT_XRI_CN) &&
1733	   (ulp_command != CMD_CLOSE_XRI_CN)) {
1734		BUG_ON(!piocb->vport);
1735		if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
1736			mod_timer(&piocb->vport->els_tmofunc,
1737				  jiffies +
1738				  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1739	}
1740
1741	return 0;
1742}
1743
1744/**
1745 * lpfc_sli_ringtx_get - Get first element of the txq
1746 * @phba: Pointer to HBA context object.
1747 * @pring: Pointer to driver SLI ring object.
1748 *
1749 * This function is called with hbalock held to get next
1750 * iocb in txq of the given ring. If there is any iocb in
1751 * the txq, the function returns first iocb in the list after
1752 * removing the iocb from the list, else it returns NULL.
1753 **/
1754struct lpfc_iocbq *
1755lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1756{
1757	struct lpfc_iocbq *cmd_iocb;
1758
1759	lockdep_assert_held(&phba->hbalock);
1760
1761	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1762	return cmd_iocb;
1763}
1764
1765/**
1766 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1767 * @phba: Pointer to HBA context object.
1768 * @cmdiocb: Pointer to driver command iocb object.
1769 * @rspiocb: Pointer to driver response iocb object.
1770 *
1771 * This routine will inform the driver of any BW adjustments we need
1772 * to make. These changes will be picked up during the next CMF
1773 * timer interrupt. In addition, any BW changes will be logged
1774 * with LOG_CGN_MGMT.
1775 **/
1776static void
1777lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1778		   struct lpfc_iocbq *rspiocb)
1779{
1780	union lpfc_wqe128 *wqe;
1781	uint32_t status, info;
1782	struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1783	uint64_t bw, bwdif, slop;
1784	uint64_t pcent, bwpcent;
1785	int asig, afpin, sigcnt, fpincnt;
1786	int wsigmax, wfpinmax, cg, tdp;
1787	char *s;
1788
1789	/* First check for error */
1790	status = bf_get(lpfc_wcqe_c_status, wcqe);
1791	if (status) {
1792		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1793				"6211 CMF_SYNC_WQE Error "
1794				"req_tag x%x status x%x hwstatus x%x "
1795				"tdatap x%x parm x%x\n",
1796				bf_get(lpfc_wcqe_c_request_tag, wcqe),
1797				bf_get(lpfc_wcqe_c_status, wcqe),
1798				bf_get(lpfc_wcqe_c_hw_status, wcqe),
1799				wcqe->total_data_placed,
1800				wcqe->parameter);
1801		goto out;
1802	}
1803
1804	/* Gather congestion information on a successful cmpl */
1805	info = wcqe->parameter;
1806	phba->cmf_active_info = info;
1807
1808	/* See if firmware info count is valid or has changed */
1809	if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1810		info = 0;
1811	else
1812		phba->cmf_info_per_interval = info;
1813
1814	tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1815	cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1816
1817	/* Get BW requirement from firmware */
1818	bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1819	if (!bw) {
1820		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1821				"6212 CMF_SYNC_WQE x%x: NULL bw\n",
1822				bf_get(lpfc_wcqe_c_request_tag, wcqe));
1823		goto out;
1824	}
1825
1826	/* Gather information needed for logging if a BW change is required */
1827	wqe = &cmdiocb->wqe;
1828	asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1829	afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1830	fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1831	sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1832	if (phba->cmf_max_bytes_per_interval != bw ||
1833	    (asig || afpin || sigcnt || fpincnt)) {
1834		/* Are we increasing or decreasing BW */
1835		if (phba->cmf_max_bytes_per_interval <  bw) {
1836			bwdif = bw - phba->cmf_max_bytes_per_interval;
1837			s = "Increase";
1838		} else {
1839			bwdif = phba->cmf_max_bytes_per_interval - bw;
1840			s = "Decrease";
1841		}
1842
1843		/* What is the change percentage */
1844		slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1845		pcent = div64_u64(bwdif * 100 + slop,
1846				  phba->cmf_link_byte_count);
1847		bwpcent = div64_u64(bw * 100 + slop,
1848				    phba->cmf_link_byte_count);
1849		/* Because of bytes adjustment due to shorter timer in
1850		 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1851		 * may seem like BW is above 100%.
1852		 */
1853		if (bwpcent > 100)
1854			bwpcent = 100;
1855
1856		if (phba->cmf_max_bytes_per_interval < bw &&
1857		    bwpcent > 95)
1858			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1859					"6208 Congestion bandwidth "
1860					"limits removed\n");
1861		else if ((phba->cmf_max_bytes_per_interval > bw) &&
1862			 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1863			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1864					"6209 Congestion bandwidth "
1865					"limits in effect\n");
1866
1867		if (asig) {
1868			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1869					"6237 BW Threshold %lld%% (%lld): "
1870					"%lld%% %s: Signal Alarm: cg:%d "
1871					"Info:%u\n",
1872					bwpcent, bw, pcent, s, cg,
1873					phba->cmf_active_info);
1874		} else if (afpin) {
1875			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876					"6238 BW Threshold %lld%% (%lld): "
1877					"%lld%% %s: FPIN Alarm: cg:%d "
1878					"Info:%u\n",
1879					bwpcent, bw, pcent, s, cg,
1880					phba->cmf_active_info);
1881		} else if (sigcnt) {
1882			wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1883			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1884					"6239 BW Threshold %lld%% (%lld): "
1885					"%lld%% %s: Signal Warning: "
1886					"Cnt %d Max %d: cg:%d Info:%u\n",
1887					bwpcent, bw, pcent, s, sigcnt,
1888					wsigmax, cg, phba->cmf_active_info);
1889		} else if (fpincnt) {
1890			wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1891			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1892					"6240 BW Threshold %lld%% (%lld): "
1893					"%lld%% %s: FPIN Warning: "
1894					"Cnt %d Max %d: cg:%d Info:%u\n",
1895					bwpcent, bw, pcent, s, fpincnt,
1896					wfpinmax, cg, phba->cmf_active_info);
1897		} else {
1898			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1899					"6241 BW Threshold %lld%% (%lld): "
1900					"CMF %lld%% %s: cg:%d Info:%u\n",
1901					bwpcent, bw, pcent, s, cg,
1902					phba->cmf_active_info);
1903		}
1904	} else if (info) {
1905		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1906				"6246 Info Threshold %u\n", info);
1907	}
1908
1909	/* Save BW change to be picked up during next timer interrupt */
1910	phba->cmf_last_sync_bw = bw;
1911out:
1912	lpfc_sli_release_iocbq(phba, cmdiocb);
1913}
1914
1915/**
1916 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1917 * @phba: Pointer to HBA context object.
1918 * @ms:   ms to set in WQE interval, 0 means use init op
1919 * @total: Total rcv bytes for this interval
1920 *
1921 * This routine is called every CMF timer interrupt. Its purpose is
1922 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1923 * that may indicate we have congestion (FPINs or Signals). Upon
1924 * completion, the firmware will indicate any BW restrictions the
1925 * driver may need to take.
1926 **/
1927int
1928lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1929{
1930	union lpfc_wqe128 *wqe;
1931	struct lpfc_iocbq *sync_buf;
1932	unsigned long iflags;
1933	u32 ret_val;
1934	u32 atot, wtot, max;
1935	u8 warn_sync_period = 0;
1936
1937	/* First address any alarm / warning activity */
1938	atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1939	wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1940
1941	/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1942	if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1943	    phba->link_state == LPFC_LINK_DOWN)
1944		return 0;
1945
1946	spin_lock_irqsave(&phba->hbalock, iflags);
1947	sync_buf = __lpfc_sli_get_iocbq(phba);
1948	if (!sync_buf) {
1949		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1950				"6244 No available WQEs for CMF_SYNC_WQE\n");
1951		ret_val = ENOMEM;
1952		goto out_unlock;
1953	}
1954
1955	wqe = &sync_buf->wqe;
1956
1957	/* WQEs are reused.  Clear stale data and set key fields to zero */
1958	memset(wqe, 0, sizeof(*wqe));
1959
1960	/* If this is the very first CMF_SYNC_WQE, issue an init operation */
1961	if (!ms) {
1962		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1963				"6441 CMF Init %d - CMF_SYNC_WQE\n",
1964				phba->fc_eventTag);
1965		bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1966		bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1967		goto initpath;
1968	}
1969
1970	bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1971	bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1972
1973	/* Check for alarms / warnings */
1974	if (atot) {
1975		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1976			/* We hit an Signal alarm condition */
1977			bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1978		} else {
1979			/* We hit a FPIN alarm condition */
1980			bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1981		}
1982	} else if (wtot) {
1983		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1984		    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1985			/* We hit an Signal warning condition */
1986			max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1987				lpfc_acqe_cgn_frequency;
1988			bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1989			bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1990			warn_sync_period = lpfc_acqe_cgn_frequency;
1991		} else {
1992			/* We hit a FPIN warning condition */
1993			bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1994			bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1995			if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1996				warn_sync_period =
1997				LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
1998		}
1999	}
2000
2001	/* Update total read blocks during previous timer interval */
2002	wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2003
2004initpath:
2005	bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2006	wqe->cmf_sync.event_tag = phba->fc_eventTag;
2007	bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2008
2009	/* Setup reqtag to match the wqe completion. */
2010	bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2011
2012	bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2013	bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2014
2015	bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2016	bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2017	bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2018
2019	sync_buf->vport = phba->pport;
2020	sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2021	sync_buf->cmd_dmabuf = NULL;
2022	sync_buf->rsp_dmabuf = NULL;
2023	sync_buf->bpl_dmabuf = NULL;
2024	sync_buf->sli4_xritag = NO_XRI;
2025
2026	sync_buf->cmd_flag |= LPFC_IO_CMF;
2027	ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2028	if (ret_val) {
2029		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2030				"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2031				ret_val);
2032		__lpfc_sli_release_iocbq(phba, sync_buf);
2033	}
2034out_unlock:
2035	spin_unlock_irqrestore(&phba->hbalock, iflags);
2036	return ret_val;
2037}
2038
2039/**
2040 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2041 * @phba: Pointer to HBA context object.
2042 * @pring: Pointer to driver SLI ring object.
2043 *
2044 * This function is called with hbalock held and the caller must post the
2045 * iocb without releasing the lock. If the caller releases the lock,
2046 * iocb slot returned by the function is not guaranteed to be available.
2047 * The function returns pointer to the next available iocb slot if there
2048 * is available slot in the ring, else it returns NULL.
2049 * If the get index of the ring is ahead of the put index, the function
2050 * will post an error attention event to the worker thread to take the
2051 * HBA to offline state.
2052 **/
2053static IOCB_t *
2054lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2055{
2056	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2057	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2058
2059	lockdep_assert_held(&phba->hbalock);
2060
2061	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2062	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2063		pring->sli.sli3.next_cmdidx = 0;
2064
2065	if (unlikely(pring->sli.sli3.local_getidx ==
2066		pring->sli.sli3.next_cmdidx)) {
2067
2068		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2069
2070		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2071			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2072					"0315 Ring %d issue: portCmdGet %d "
2073					"is bigger than cmd ring %d\n",
2074					pring->ringno,
2075					pring->sli.sli3.local_getidx,
2076					max_cmd_idx);
2077
2078			phba->link_state = LPFC_HBA_ERROR;
2079			/*
2080			 * All error attention handlers are posted to
2081			 * worker thread
2082			 */
2083			phba->work_ha |= HA_ERATT;
2084			phba->work_hs = HS_FFER3;
2085
2086			lpfc_worker_wake_up(phba);
2087
2088			return NULL;
2089		}
2090
2091		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2092			return NULL;
2093	}
2094
2095	return lpfc_cmd_iocb(phba, pring);
2096}
2097
2098/**
2099 * lpfc_sli_next_iotag - Get an iotag for the iocb
2100 * @phba: Pointer to HBA context object.
2101 * @iocbq: Pointer to driver iocb object.
2102 *
2103 * This function gets an iotag for the iocb. If there is no unused iotag and
2104 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2105 * array and assigns a new iotag.
2106 * The function returns the allocated iotag if successful, else returns zero.
2107 * Zero is not a valid iotag.
2108 * The caller is not required to hold any lock.
2109 **/
2110uint16_t
2111lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2112{
2113	struct lpfc_iocbq **new_arr;
2114	struct lpfc_iocbq **old_arr;
2115	size_t new_len;
2116	struct lpfc_sli *psli = &phba->sli;
2117	uint16_t iotag;
2118
2119	spin_lock_irq(&phba->hbalock);
2120	iotag = psli->last_iotag;
2121	if(++iotag < psli->iocbq_lookup_len) {
2122		psli->last_iotag = iotag;
2123		psli->iocbq_lookup[iotag] = iocbq;
2124		spin_unlock_irq(&phba->hbalock);
2125		iocbq->iotag = iotag;
2126		return iotag;
2127	} else if (psli->iocbq_lookup_len < (0xffff
2128					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2129		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2130		spin_unlock_irq(&phba->hbalock);
2131		new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2132				  GFP_KERNEL);
2133		if (new_arr) {
2134			spin_lock_irq(&phba->hbalock);
2135			old_arr = psli->iocbq_lookup;
2136			if (new_len <= psli->iocbq_lookup_len) {
2137				/* highly unprobable case */
2138				kfree(new_arr);
2139				iotag = psli->last_iotag;
2140				if(++iotag < psli->iocbq_lookup_len) {
2141					psli->last_iotag = iotag;
2142					psli->iocbq_lookup[iotag] = iocbq;
2143					spin_unlock_irq(&phba->hbalock);
2144					iocbq->iotag = iotag;
2145					return iotag;
2146				}
2147				spin_unlock_irq(&phba->hbalock);
2148				return 0;
2149			}
2150			if (psli->iocbq_lookup)
2151				memcpy(new_arr, old_arr,
2152				       ((psli->last_iotag  + 1) *
2153					sizeof (struct lpfc_iocbq *)));
2154			psli->iocbq_lookup = new_arr;
2155			psli->iocbq_lookup_len = new_len;
2156			psli->last_iotag = iotag;
2157			psli->iocbq_lookup[iotag] = iocbq;
2158			spin_unlock_irq(&phba->hbalock);
2159			iocbq->iotag = iotag;
2160			kfree(old_arr);
2161			return iotag;
2162		}
2163	} else
2164		spin_unlock_irq(&phba->hbalock);
2165
2166	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2167			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2168			psli->last_iotag);
2169
2170	return 0;
2171}
2172
2173/**
2174 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2175 * @phba: Pointer to HBA context object.
2176 * @pring: Pointer to driver SLI ring object.
2177 * @iocb: Pointer to iocb slot in the ring.
2178 * @nextiocb: Pointer to driver iocb object which need to be
2179 *            posted to firmware.
2180 *
2181 * This function is called to post a new iocb to the firmware. This
2182 * function copies the new iocb to ring iocb slot and updates the
2183 * ring pointers. It adds the new iocb to txcmplq if there is
2184 * a completion call back for this iocb else the function will free the
2185 * iocb object.  The hbalock is asserted held in the code path calling
2186 * this routine.
2187 **/
2188static void
2189lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2190		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2191{
2192	/*
2193	 * Set up an iotag
2194	 */
2195	nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2196
2197
2198	if (pring->ringno == LPFC_ELS_RING) {
2199		lpfc_debugfs_slow_ring_trc(phba,
2200			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2201			*(((uint32_t *) &nextiocb->iocb) + 4),
2202			*(((uint32_t *) &nextiocb->iocb) + 6),
2203			*(((uint32_t *) &nextiocb->iocb) + 7));
2204	}
2205
2206	/*
2207	 * Issue iocb command to adapter
2208	 */
2209	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2210	wmb();
2211	pring->stats.iocb_cmd++;
2212
2213	/*
2214	 * If there is no completion routine to call, we can release the
2215	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2216	 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2217	 */
2218	if (nextiocb->cmd_cmpl)
2219		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2220	else
2221		__lpfc_sli_release_iocbq(phba, nextiocb);
2222
2223	/*
2224	 * Let the HBA know what IOCB slot will be the next one the
2225	 * driver will put a command into.
2226	 */
2227	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2228	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2229}
2230
2231/**
2232 * lpfc_sli_update_full_ring - Update the chip attention register
2233 * @phba: Pointer to HBA context object.
2234 * @pring: Pointer to driver SLI ring object.
2235 *
2236 * The caller is not required to hold any lock for calling this function.
2237 * This function updates the chip attention bits for the ring to inform firmware
2238 * that there are pending work to be done for this ring and requests an
2239 * interrupt when there is space available in the ring. This function is
2240 * called when the driver is unable to post more iocbs to the ring due
2241 * to unavailability of space in the ring.
2242 **/
2243static void
2244lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2245{
2246	int ringno = pring->ringno;
2247
2248	pring->flag |= LPFC_CALL_RING_AVAILABLE;
2249
2250	wmb();
2251
2252	/*
2253	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2254	 * The HBA will tell us when an IOCB entry is available.
2255	 */
2256	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2257	readl(phba->CAregaddr); /* flush */
2258
2259	pring->stats.iocb_cmd_full++;
2260}
2261
2262/**
2263 * lpfc_sli_update_ring - Update chip attention register
2264 * @phba: Pointer to HBA context object.
2265 * @pring: Pointer to driver SLI ring object.
2266 *
2267 * This function updates the chip attention register bit for the
2268 * given ring to inform HBA that there is more work to be done
2269 * in this ring. The caller is not required to hold any lock.
2270 **/
2271static void
2272lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2273{
2274	int ringno = pring->ringno;
2275
2276	/*
2277	 * Tell the HBA that there is work to do in this ring.
2278	 */
2279	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2280		wmb();
2281		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2282		readl(phba->CAregaddr); /* flush */
2283	}
2284}
2285
2286/**
2287 * lpfc_sli_resume_iocb - Process iocbs in the txq
2288 * @phba: Pointer to HBA context object.
2289 * @pring: Pointer to driver SLI ring object.
2290 *
2291 * This function is called with hbalock held to post pending iocbs
2292 * in the txq to the firmware. This function is called when driver
2293 * detects space available in the ring.
2294 **/
2295static void
2296lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2297{
2298	IOCB_t *iocb;
2299	struct lpfc_iocbq *nextiocb;
2300
2301	lockdep_assert_held(&phba->hbalock);
2302
2303	/*
2304	 * Check to see if:
2305	 *  (a) there is anything on the txq to send
2306	 *  (b) link is up
2307	 *  (c) link attention events can be processed (fcp ring only)
2308	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
2309	 */
2310
2311	if (lpfc_is_link_up(phba) &&
2312	    (!list_empty(&pring->txq)) &&
2313	    (pring->ringno != LPFC_FCP_RING ||
2314	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2315
2316		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2317		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2318			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2319
2320		if (iocb)
2321			lpfc_sli_update_ring(phba, pring);
2322		else
2323			lpfc_sli_update_full_ring(phba, pring);
2324	}
2325
2326	return;
2327}
2328
2329/**
2330 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2331 * @phba: Pointer to HBA context object.
2332 * @hbqno: HBQ number.
2333 *
2334 * This function is called with hbalock held to get the next
2335 * available slot for the given HBQ. If there is free slot
2336 * available for the HBQ it will return pointer to the next available
2337 * HBQ entry else it will return NULL.
2338 **/
2339static struct lpfc_hbq_entry *
2340lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2341{
2342	struct hbq_s *hbqp = &phba->hbqs[hbqno];
2343
2344	lockdep_assert_held(&phba->hbalock);
2345
2346	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2347	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2348		hbqp->next_hbqPutIdx = 0;
2349
2350	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2351		uint32_t raw_index = phba->hbq_get[hbqno];
2352		uint32_t getidx = le32_to_cpu(raw_index);
2353
2354		hbqp->local_hbqGetIdx = getidx;
2355
2356		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2357			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2358					"1802 HBQ %d: local_hbqGetIdx "
2359					"%u is > than hbqp->entry_count %u\n",
2360					hbqno, hbqp->local_hbqGetIdx,
2361					hbqp->entry_count);
2362
2363			phba->link_state = LPFC_HBA_ERROR;
2364			return NULL;
2365		}
2366
2367		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2368			return NULL;
2369	}
2370
2371	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2372			hbqp->hbqPutIdx;
2373}
2374
2375/**
2376 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2377 * @phba: Pointer to HBA context object.
2378 *
2379 * This function is called with no lock held to free all the
2380 * hbq buffers while uninitializing the SLI interface. It also
2381 * frees the HBQ buffers returned by the firmware but not yet
2382 * processed by the upper layers.
2383 **/
2384void
2385lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2386{
2387	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2388	struct hbq_dmabuf *hbq_buf;
2389	unsigned long flags;
2390	int i, hbq_count;
2391
2392	hbq_count = lpfc_sli_hbq_count();
2393	/* Return all memory used by all HBQs */
2394	spin_lock_irqsave(&phba->hbalock, flags);
2395	for (i = 0; i < hbq_count; ++i) {
2396		list_for_each_entry_safe(dmabuf, next_dmabuf,
2397				&phba->hbqs[i].hbq_buffer_list, list) {
2398			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2399			list_del(&hbq_buf->dbuf.list);
2400			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2401		}
2402		phba->hbqs[i].buffer_count = 0;
2403	}
2404
2405	/* Mark the HBQs not in use */
2406	phba->hbq_in_use = 0;
2407	spin_unlock_irqrestore(&phba->hbalock, flags);
2408}
2409
2410/**
2411 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2412 * @phba: Pointer to HBA context object.
2413 * @hbqno: HBQ number.
2414 * @hbq_buf: Pointer to HBQ buffer.
2415 *
2416 * This function is called with the hbalock held to post a
2417 * hbq buffer to the firmware. If the function finds an empty
2418 * slot in the HBQ, it will post the buffer. The function will return
2419 * pointer to the hbq entry if it successfully post the buffer
2420 * else it will return NULL.
2421 **/
2422static int
2423lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2424			 struct hbq_dmabuf *hbq_buf)
2425{
2426	lockdep_assert_held(&phba->hbalock);
2427	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2428}
2429
2430/**
2431 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2432 * @phba: Pointer to HBA context object.
2433 * @hbqno: HBQ number.
2434 * @hbq_buf: Pointer to HBQ buffer.
2435 *
2436 * This function is called with the hbalock held to post a hbq buffer to the
2437 * firmware. If the function finds an empty slot in the HBQ, it will post the
2438 * buffer and place it on the hbq_buffer_list. The function will return zero if
2439 * it successfully post the buffer else it will return an error.
2440 **/
2441static int
2442lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2443			    struct hbq_dmabuf *hbq_buf)
2444{
2445	struct lpfc_hbq_entry *hbqe;
2446	dma_addr_t physaddr = hbq_buf->dbuf.phys;
2447
2448	lockdep_assert_held(&phba->hbalock);
2449	/* Get next HBQ entry slot to use */
2450	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2451	if (hbqe) {
2452		struct hbq_s *hbqp = &phba->hbqs[hbqno];
2453
2454		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2455		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2456		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2457		hbqe->bde.tus.f.bdeFlags = 0;
2458		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2459		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2460				/* Sync SLIM */
2461		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2462		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2463				/* flush */
2464		readl(phba->hbq_put + hbqno);
2465		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2466		return 0;
2467	} else
2468		return -ENOMEM;
2469}
2470
2471/**
2472 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2473 * @phba: Pointer to HBA context object.
2474 * @hbqno: HBQ number.
2475 * @hbq_buf: Pointer to HBQ buffer.
2476 *
2477 * This function is called with the hbalock held to post an RQE to the SLI4
2478 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2479 * the hbq_buffer_list and return zero, otherwise it will return an error.
2480 **/
2481static int
2482lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2483			    struct hbq_dmabuf *hbq_buf)
2484{
2485	int rc;
2486	struct lpfc_rqe hrqe;
2487	struct lpfc_rqe drqe;
2488	struct lpfc_queue *hrq;
2489	struct lpfc_queue *drq;
2490
2491	if (hbqno != LPFC_ELS_HBQ)
2492		return 1;
2493	hrq = phba->sli4_hba.hdr_rq;
2494	drq = phba->sli4_hba.dat_rq;
2495
2496	lockdep_assert_held(&phba->hbalock);
2497	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2498	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2499	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2500	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2501	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2502	if (rc < 0)
2503		return rc;
2504	hbq_buf->tag = (rc | (hbqno << 16));
2505	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2506	return 0;
2507}
2508
2509/* HBQ for ELS and CT traffic. */
2510static struct lpfc_hbq_init lpfc_els_hbq = {
2511	.rn = 1,
2512	.entry_count = 256,
2513	.mask_count = 0,
2514	.profile = 0,
2515	.ring_mask = (1 << LPFC_ELS_RING),
2516	.buffer_count = 0,
2517	.init_count = 40,
2518	.add_count = 40,
2519};
2520
2521/* Array of HBQs */
2522struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2523	&lpfc_els_hbq,
2524};
2525
2526/**
2527 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2528 * @phba: Pointer to HBA context object.
2529 * @hbqno: HBQ number.
2530 * @count: Number of HBQ buffers to be posted.
2531 *
2532 * This function is called with no lock held to post more hbq buffers to the
2533 * given HBQ. The function returns the number of HBQ buffers successfully
2534 * posted.
2535 **/
2536static int
2537lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2538{
2539	uint32_t i, posted = 0;
2540	unsigned long flags;
2541	struct hbq_dmabuf *hbq_buffer;
2542	LIST_HEAD(hbq_buf_list);
2543	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2544		return 0;
2545
2546	if ((phba->hbqs[hbqno].buffer_count + count) >
2547	    lpfc_hbq_defs[hbqno]->entry_count)
2548		count = lpfc_hbq_defs[hbqno]->entry_count -
2549					phba->hbqs[hbqno].buffer_count;
2550	if (!count)
2551		return 0;
2552	/* Allocate HBQ entries */
2553	for (i = 0; i < count; i++) {
2554		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2555		if (!hbq_buffer)
2556			break;
2557		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2558	}
2559	/* Check whether HBQ is still in use */
2560	spin_lock_irqsave(&phba->hbalock, flags);
2561	if (!phba->hbq_in_use)
2562		goto err;
2563	while (!list_empty(&hbq_buf_list)) {
2564		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2565				 dbuf.list);
2566		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2567				      (hbqno << 16));
2568		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2569			phba->hbqs[hbqno].buffer_count++;
2570			posted++;
2571		} else
2572			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2573	}
2574	spin_unlock_irqrestore(&phba->hbalock, flags);
2575	return posted;
2576err:
2577	spin_unlock_irqrestore(&phba->hbalock, flags);
2578	while (!list_empty(&hbq_buf_list)) {
2579		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2580				 dbuf.list);
2581		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2582	}
2583	return 0;
2584}
2585
2586/**
2587 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2588 * @phba: Pointer to HBA context object.
2589 * @qno: HBQ number.
2590 *
2591 * This function posts more buffers to the HBQ. This function
2592 * is called with no lock held. The function returns the number of HBQ entries
2593 * successfully allocated.
2594 **/
2595int
2596lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2597{
2598	if (phba->sli_rev == LPFC_SLI_REV4)
2599		return 0;
2600	else
2601		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2602					 lpfc_hbq_defs[qno]->add_count);
2603}
2604
2605/**
2606 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2607 * @phba: Pointer to HBA context object.
2608 * @qno:  HBQ queue number.
2609 *
2610 * This function is called from SLI initialization code path with
2611 * no lock held to post initial HBQ buffers to firmware. The
2612 * function returns the number of HBQ entries successfully allocated.
2613 **/
2614static int
2615lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2616{
2617	if (phba->sli_rev == LPFC_SLI_REV4)
2618		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2619					lpfc_hbq_defs[qno]->entry_count);
2620	else
2621		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2622					 lpfc_hbq_defs[qno]->init_count);
2623}
2624
2625/*
2626 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2627 *
2628 * This function removes the first hbq buffer on an hbq list and returns a
2629 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2630 **/
2631static struct hbq_dmabuf *
2632lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2633{
2634	struct lpfc_dmabuf *d_buf;
2635
2636	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2637	if (!d_buf)
2638		return NULL;
2639	return container_of(d_buf, struct hbq_dmabuf, dbuf);
2640}
2641
2642/**
2643 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2644 * @phba: Pointer to HBA context object.
2645 * @hrq: HBQ number.
2646 *
2647 * This function removes the first RQ buffer on an RQ buffer list and returns a
2648 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2649 **/
2650static struct rqb_dmabuf *
2651lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2652{
2653	struct lpfc_dmabuf *h_buf;
2654	struct lpfc_rqb *rqbp;
2655
2656	rqbp = hrq->rqbp;
2657	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2658			 struct lpfc_dmabuf, list);
2659	if (!h_buf)
2660		return NULL;
2661	rqbp->buffer_count--;
2662	return container_of(h_buf, struct rqb_dmabuf, hbuf);
2663}
2664
2665/**
2666 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2667 * @phba: Pointer to HBA context object.
2668 * @tag: Tag of the hbq buffer.
2669 *
2670 * This function searches for the hbq buffer associated with the given tag in
2671 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2672 * otherwise it returns NULL.
2673 **/
2674static struct hbq_dmabuf *
2675lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2676{
2677	struct lpfc_dmabuf *d_buf;
2678	struct hbq_dmabuf *hbq_buf;
2679	uint32_t hbqno;
2680
2681	hbqno = tag >> 16;
2682	if (hbqno >= LPFC_MAX_HBQS)
2683		return NULL;
2684
2685	spin_lock_irq(&phba->hbalock);
2686	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2687		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2688		if (hbq_buf->tag == tag) {
2689			spin_unlock_irq(&phba->hbalock);
2690			return hbq_buf;
2691		}
2692	}
2693	spin_unlock_irq(&phba->hbalock);
2694	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2695			"1803 Bad hbq tag. Data: x%x x%x\n",
2696			tag, phba->hbqs[tag >> 16].buffer_count);
2697	return NULL;
2698}
2699
2700/**
2701 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2702 * @phba: Pointer to HBA context object.
2703 * @hbq_buffer: Pointer to HBQ buffer.
2704 *
2705 * This function is called with hbalock. This function gives back
2706 * the hbq buffer to firmware. If the HBQ does not have space to
2707 * post the buffer, it will free the buffer.
2708 **/
2709void
2710lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2711{
2712	uint32_t hbqno;
2713
2714	if (hbq_buffer) {
2715		hbqno = hbq_buffer->tag >> 16;
2716		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2717			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2718	}
2719}
2720
2721/**
2722 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2723 * @mbxCommand: mailbox command code.
2724 *
2725 * This function is called by the mailbox event handler function to verify
2726 * that the completed mailbox command is a legitimate mailbox command. If the
2727 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2728 * and the mailbox event handler will take the HBA offline.
2729 **/
2730static int
2731lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2732{
2733	uint8_t ret;
2734
2735	switch (mbxCommand) {
2736	case MBX_LOAD_SM:
2737	case MBX_READ_NV:
2738	case MBX_WRITE_NV:
2739	case MBX_WRITE_VPARMS:
2740	case MBX_RUN_BIU_DIAG:
2741	case MBX_INIT_LINK:
2742	case MBX_DOWN_LINK:
2743	case MBX_CONFIG_LINK:
2744	case MBX_CONFIG_RING:
2745	case MBX_RESET_RING:
2746	case MBX_READ_CONFIG:
2747	case MBX_READ_RCONFIG:
2748	case MBX_READ_SPARM:
2749	case MBX_READ_STATUS:
2750	case MBX_READ_RPI:
2751	case MBX_READ_XRI:
2752	case MBX_READ_REV:
2753	case MBX_READ_LNK_STAT:
2754	case MBX_REG_LOGIN:
2755	case MBX_UNREG_LOGIN:
2756	case MBX_CLEAR_LA:
2757	case MBX_DUMP_MEMORY:
2758	case MBX_DUMP_CONTEXT:
2759	case MBX_RUN_DIAGS:
2760	case MBX_RESTART:
2761	case MBX_UPDATE_CFG:
2762	case MBX_DOWN_LOAD:
2763	case MBX_DEL_LD_ENTRY:
2764	case MBX_RUN_PROGRAM:
2765	case MBX_SET_MASK:
2766	case MBX_SET_VARIABLE:
2767	case MBX_UNREG_D_ID:
2768	case MBX_KILL_BOARD:
2769	case MBX_CONFIG_FARP:
2770	case MBX_BEACON:
2771	case MBX_LOAD_AREA:
2772	case MBX_RUN_BIU_DIAG64:
2773	case MBX_CONFIG_PORT:
2774	case MBX_READ_SPARM64:
2775	case MBX_READ_RPI64:
2776	case MBX_REG_LOGIN64:
2777	case MBX_READ_TOPOLOGY:
2778	case MBX_WRITE_WWN:
2779	case MBX_SET_DEBUG:
2780	case MBX_LOAD_EXP_ROM:
2781	case MBX_ASYNCEVT_ENABLE:
2782	case MBX_REG_VPI:
2783	case MBX_UNREG_VPI:
2784	case MBX_HEARTBEAT:
2785	case MBX_PORT_CAPABILITIES:
2786	case MBX_PORT_IOV_CONTROL:
2787	case MBX_SLI4_CONFIG:
2788	case MBX_SLI4_REQ_FTRS:
2789	case MBX_REG_FCFI:
2790	case MBX_UNREG_FCFI:
2791	case MBX_REG_VFI:
2792	case MBX_UNREG_VFI:
2793	case MBX_INIT_VPI:
2794	case MBX_INIT_VFI:
2795	case MBX_RESUME_RPI:
2796	case MBX_READ_EVENT_LOG_STATUS:
2797	case MBX_READ_EVENT_LOG:
2798	case MBX_SECURITY_MGMT:
2799	case MBX_AUTH_PORT:
2800	case MBX_ACCESS_VDATA:
2801		ret = mbxCommand;
2802		break;
2803	default:
2804		ret = MBX_SHUTDOWN;
2805		break;
2806	}
2807	return ret;
2808}
2809
2810/**
2811 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2812 * @phba: Pointer to HBA context object.
2813 * @pmboxq: Pointer to mailbox command.
2814 *
2815 * This is completion handler function for mailbox commands issued from
2816 * lpfc_sli_issue_mbox_wait function. This function is called by the
2817 * mailbox event handler function with no lock held. This function
2818 * will wake up thread waiting on the wait queue pointed by context1
2819 * of the mailbox.
2820 **/
2821void
2822lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2823{
2824	unsigned long drvr_flag;
2825	struct completion *pmbox_done;
2826
2827	/*
2828	 * If pmbox_done is empty, the driver thread gave up waiting and
2829	 * continued running.
2830	 */
2831	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2832	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2833	pmbox_done = pmboxq->ctx_u.mbox_wait;
2834	if (pmbox_done)
2835		complete(pmbox_done);
2836	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2837	return;
2838}
2839
2840static void
2841__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2842{
2843	unsigned long iflags;
2844
2845	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2846		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2847		spin_lock_irqsave(&ndlp->lock, iflags);
2848		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2849		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2850		spin_unlock_irqrestore(&ndlp->lock, iflags);
2851	}
2852	ndlp->nlp_flag &= ~NLP_UNREG_INP;
2853}
2854
2855void
2856lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2857{
2858	__lpfc_sli_rpi_release(vport, ndlp);
2859}
2860
2861/**
2862 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2863 * @phba: Pointer to HBA context object.
2864 * @pmb: Pointer to mailbox object.
2865 *
2866 * This function is the default mailbox completion handler. It
2867 * frees the memory resources associated with the completed mailbox
2868 * command. If the completed command is a REG_LOGIN mailbox command,
2869 * this function will issue a UREG_LOGIN to re-claim the RPI.
2870 **/
2871void
2872lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2873{
2874	struct lpfc_vport  *vport = pmb->vport;
2875	struct lpfc_dmabuf *mp;
2876	struct lpfc_nodelist *ndlp;
2877	struct Scsi_Host *shost;
2878	uint16_t rpi, vpi;
2879	int rc;
2880
2881	/*
2882	 * If a REG_LOGIN succeeded  after node is destroyed or node
2883	 * is in re-discovery driver need to cleanup the RPI.
2884	 */
2885	if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2886	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2887	    !pmb->u.mb.mbxStatus) {
2888		mp = pmb->ctx_buf;
2889		if (mp) {
2890			pmb->ctx_buf = NULL;
2891			lpfc_mbuf_free(phba, mp->virt, mp->phys);
2892			kfree(mp);
2893		}
2894		rpi = pmb->u.mb.un.varWords[0];
2895		vpi = pmb->u.mb.un.varRegLogin.vpi;
2896		if (phba->sli_rev == LPFC_SLI_REV4)
2897			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2898		lpfc_unreg_login(phba, vpi, rpi, pmb);
2899		pmb->vport = vport;
2900		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2901		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2902		if (rc != MBX_NOT_FINISHED)
2903			return;
2904	}
2905
2906	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2907		!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2908		!pmb->u.mb.mbxStatus) {
2909		shost = lpfc_shost_from_vport(vport);
2910		spin_lock_irq(shost->host_lock);
2911		vport->vpi_state |= LPFC_VPI_REGISTERED;
2912		spin_unlock_irq(shost->host_lock);
2913		clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
2914	}
2915
2916	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2917		ndlp = pmb->ctx_ndlp;
2918		lpfc_nlp_put(ndlp);
2919	}
2920
2921	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2922		ndlp = pmb->ctx_ndlp;
2923
2924		/* Check to see if there are any deferred events to process */
2925		if (ndlp) {
2926			lpfc_printf_vlog(
2927				vport,
2928				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2929				"1438 UNREG cmpl deferred mbox x%x "
2930				"on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
2931				ndlp->nlp_rpi, ndlp->nlp_DID,
2932				ndlp->nlp_flag, ndlp->nlp_defer_did,
2933				ndlp, vport->load_flag, kref_read(&ndlp->kref));
2934
2935			if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2936			    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2937				ndlp->nlp_flag &= ~NLP_UNREG_INP;
2938				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2939				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2940			} else {
2941				__lpfc_sli_rpi_release(vport, ndlp);
2942			}
2943
2944			/* The unreg_login mailbox is complete and had a
2945			 * reference that has to be released.  The PLOGI
2946			 * got its own ref.
2947			 */
2948			lpfc_nlp_put(ndlp);
2949			pmb->ctx_ndlp = NULL;
2950		}
2951	}
2952
2953	/* This nlp_put pairs with lpfc_sli4_resume_rpi */
2954	if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2955		ndlp = pmb->ctx_ndlp;
2956		lpfc_nlp_put(ndlp);
2957	}
2958
2959	/* Check security permission status on INIT_LINK mailbox command */
2960	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2961	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2962		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2963				"2860 SLI authentication is required "
2964				"for INIT_LINK but has not done yet\n");
2965
2966	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2967		lpfc_sli4_mbox_cmd_free(phba, pmb);
2968	else
2969		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2970}
2971 /**
2972 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2973 * @phba: Pointer to HBA context object.
2974 * @pmb: Pointer to mailbox object.
2975 *
2976 * This function is the unreg rpi mailbox completion handler. It
2977 * frees the memory resources associated with the completed mailbox
2978 * command. An additional reference is put on the ndlp to prevent
2979 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2980 * the unreg mailbox command completes, this routine puts the
2981 * reference back.
2982 *
2983 **/
2984void
2985lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2986{
2987	struct lpfc_vport  *vport = pmb->vport;
2988	struct lpfc_nodelist *ndlp;
2989
2990	ndlp = pmb->ctx_ndlp;
2991	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2992		if (phba->sli_rev == LPFC_SLI_REV4 &&
2993		    (bf_get(lpfc_sli_intf_if_type,
2994		     &phba->sli4_hba.sli_intf) >=
2995		     LPFC_SLI_INTF_IF_TYPE_2)) {
2996			if (ndlp) {
2997				lpfc_printf_vlog(
2998					 vport, KERN_INFO,
2999					 LOG_MBOX | LOG_SLI | LOG_NODE,
3000					 "0010 UNREG_LOGIN vpi:x%x "
3001					 "rpi:%x DID:%x defer x%x flg x%x "
3002					 "x%px\n",
3003					 vport->vpi, ndlp->nlp_rpi,
3004					 ndlp->nlp_DID, ndlp->nlp_defer_did,
3005					 ndlp->nlp_flag,
3006					 ndlp);
3007				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3008
3009				/* Check to see if there are any deferred
3010				 * events to process
3011				 */
3012				if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3013				    (ndlp->nlp_defer_did !=
3014				    NLP_EVT_NOTHING_PENDING)) {
3015					lpfc_printf_vlog(
3016						vport, KERN_INFO,
3017						LOG_MBOX | LOG_SLI | LOG_NODE,
3018						"4111 UNREG cmpl deferred "
3019						"clr x%x on "
3020						"NPort x%x Data: x%x x%px\n",
3021						ndlp->nlp_rpi, ndlp->nlp_DID,
3022						ndlp->nlp_defer_did, ndlp);
3023					ndlp->nlp_flag &= ~NLP_UNREG_INP;
3024					ndlp->nlp_defer_did =
3025						NLP_EVT_NOTHING_PENDING;
3026					lpfc_issue_els_plogi(
3027						vport, ndlp->nlp_DID, 0);
3028				} else {
3029					__lpfc_sli_rpi_release(vport, ndlp);
3030				}
3031				lpfc_nlp_put(ndlp);
3032			}
3033		}
3034	}
3035
3036	mempool_free(pmb, phba->mbox_mem_pool);
3037}
3038
3039/**
3040 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3041 * @phba: Pointer to HBA context object.
3042 *
3043 * This function is called with no lock held. This function processes all
3044 * the completed mailbox commands and gives it to upper layers. The interrupt
3045 * service routine processes mailbox completion interrupt and adds completed
3046 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3047 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3048 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3049 * function returns the mailbox commands to the upper layer by calling the
3050 * completion handler function of each mailbox.
3051 **/
3052int
3053lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3054{
3055	MAILBOX_t *pmbox;
3056	LPFC_MBOXQ_t *pmb;
3057	int rc;
3058	LIST_HEAD(cmplq);
3059
3060	phba->sli.slistat.mbox_event++;
3061
3062	/* Get all completed mailboxe buffers into the cmplq */
3063	spin_lock_irq(&phba->hbalock);
3064	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3065	spin_unlock_irq(&phba->hbalock);
3066
3067	/* Get a Mailbox buffer to setup mailbox commands for callback */
3068	do {
3069		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3070		if (pmb == NULL)
3071			break;
3072
3073		pmbox = &pmb->u.mb;
3074
3075		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3076			if (pmb->vport) {
3077				lpfc_debugfs_disc_trc(pmb->vport,
3078					LPFC_DISC_TRC_MBOX_VPORT,
3079					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3080					(uint32_t)pmbox->mbxCommand,
3081					pmbox->un.varWords[0],
3082					pmbox->un.varWords[1]);
3083			}
3084			else {
3085				lpfc_debugfs_disc_trc(phba->pport,
3086					LPFC_DISC_TRC_MBOX,
3087					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
3088					(uint32_t)pmbox->mbxCommand,
3089					pmbox->un.varWords[0],
3090					pmbox->un.varWords[1]);
3091			}
3092		}
3093
3094		/*
3095		 * It is a fatal error if unknown mbox command completion.
3096		 */
3097		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3098		    MBX_SHUTDOWN) {
3099			/* Unknown mailbox command compl */
3100			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3101					"(%d):0323 Unknown Mailbox command "
3102					"x%x (x%x/x%x) Cmpl\n",
3103					pmb->vport ? pmb->vport->vpi :
3104					LPFC_VPORT_UNKNOWN,
3105					pmbox->mbxCommand,
3106					lpfc_sli_config_mbox_subsys_get(phba,
3107									pmb),
3108					lpfc_sli_config_mbox_opcode_get(phba,
3109									pmb));
3110			phba->link_state = LPFC_HBA_ERROR;
3111			phba->work_hs = HS_FFER3;
3112			lpfc_handle_eratt(phba);
3113			continue;
3114		}
3115
3116		if (pmbox->mbxStatus) {
3117			phba->sli.slistat.mbox_stat_err++;
3118			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3119				/* Mbox cmd cmpl error - RETRYing */
3120				lpfc_printf_log(phba, KERN_INFO,
3121					LOG_MBOX | LOG_SLI,
3122					"(%d):0305 Mbox cmd cmpl "
3123					"error - RETRYing Data: x%x "
3124					"(x%x/x%x) x%x x%x x%x\n",
3125					pmb->vport ? pmb->vport->vpi :
3126					LPFC_VPORT_UNKNOWN,
3127					pmbox->mbxCommand,
3128					lpfc_sli_config_mbox_subsys_get(phba,
3129									pmb),
3130					lpfc_sli_config_mbox_opcode_get(phba,
3131									pmb),
3132					pmbox->mbxStatus,
3133					pmbox->un.varWords[0],
3134					pmb->vport ? pmb->vport->port_state :
3135					LPFC_VPORT_UNKNOWN);
3136				pmbox->mbxStatus = 0;
3137				pmbox->mbxOwner = OWN_HOST;
3138				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3139				if (rc != MBX_NOT_FINISHED)
3140					continue;
3141			}
3142		}
3143
3144		/* Mailbox cmd <cmd> Cmpl <cmpl> */
3145		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3146				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3147				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3148				"x%x x%x x%x\n",
3149				pmb->vport ? pmb->vport->vpi : 0,
3150				pmbox->mbxCommand,
3151				lpfc_sli_config_mbox_subsys_get(phba, pmb),
3152				lpfc_sli_config_mbox_opcode_get(phba, pmb),
3153				pmb->mbox_cmpl,
3154				*((uint32_t *) pmbox),
3155				pmbox->un.varWords[0],
3156				pmbox->un.varWords[1],
3157				pmbox->un.varWords[2],
3158				pmbox->un.varWords[3],
3159				pmbox->un.varWords[4],
3160				pmbox->un.varWords[5],
3161				pmbox->un.varWords[6],
3162				pmbox->un.varWords[7],
3163				pmbox->un.varWords[8],
3164				pmbox->un.varWords[9],
3165				pmbox->un.varWords[10]);
3166
3167		if (pmb->mbox_cmpl)
3168			pmb->mbox_cmpl(phba,pmb);
3169	} while (1);
3170	return 0;
3171}
3172
3173/**
3174 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3175 * @phba: Pointer to HBA context object.
3176 * @pring: Pointer to driver SLI ring object.
3177 * @tag: buffer tag.
3178 *
3179 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3180 * is set in the tag the buffer is posted for a particular exchange,
3181 * the function will return the buffer without replacing the buffer.
3182 * If the buffer is for unsolicited ELS or CT traffic, this function
3183 * returns the buffer and also posts another buffer to the firmware.
3184 **/
3185static struct lpfc_dmabuf *
3186lpfc_sli_get_buff(struct lpfc_hba *phba,
3187		  struct lpfc_sli_ring *pring,
3188		  uint32_t tag)
3189{
3190	struct hbq_dmabuf *hbq_entry;
3191
3192	if (tag & QUE_BUFTAG_BIT)
3193		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3194	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3195	if (!hbq_entry)
3196		return NULL;
3197	return &hbq_entry->dbuf;
3198}
3199
3200/**
3201 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3202 *                              containing a NVME LS request.
3203 * @phba: pointer to lpfc hba data structure.
3204 * @piocb: pointer to the iocbq struct representing the sequence starting
3205 *        frame.
3206 *
3207 * This routine initially validates the NVME LS, validates there is a login
3208 * with the port that sent the LS, and then calls the appropriate nvme host
3209 * or target LS request handler.
3210 **/
3211static void
3212lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3213{
3214	struct lpfc_nodelist *ndlp;
3215	struct lpfc_dmabuf *d_buf;
3216	struct hbq_dmabuf *nvmebuf;
3217	struct fc_frame_header *fc_hdr;
3218	struct lpfc_async_xchg_ctx *axchg = NULL;
3219	char *failwhy = NULL;
3220	uint32_t oxid, sid, did, fctl, size;
3221	int ret = 1;
3222
3223	d_buf = piocb->cmd_dmabuf;
3224
3225	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3226	fc_hdr = nvmebuf->hbuf.virt;
3227	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3228	sid = sli4_sid_from_fc_hdr(fc_hdr);
3229	did = sli4_did_from_fc_hdr(fc_hdr);
3230	fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3231		fc_hdr->fh_f_ctl[1] << 8 |
3232		fc_hdr->fh_f_ctl[2]);
3233	size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3234
3235	lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3236			 oxid, size, sid);
3237
3238	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
3239		failwhy = "Driver Unloading";
3240	} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3241		failwhy = "NVME FC4 Disabled";
3242	} else if (!phba->nvmet_support && !phba->pport->localport) {
3243		failwhy = "No Localport";
3244	} else if (phba->nvmet_support && !phba->targetport) {
3245		failwhy = "No Targetport";
3246	} else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3247		failwhy = "Bad NVME LS R_CTL";
3248	} else if (unlikely((fctl & 0x00FF0000) !=
3249			(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3250		failwhy = "Bad NVME LS F_CTL";
3251	} else {
3252		axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3253		if (!axchg)
3254			failwhy = "No CTX memory";
3255	}
3256
3257	if (unlikely(failwhy)) {
3258		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3259				"6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3260				sid, oxid, failwhy);
3261		goto out_fail;
3262	}
3263
3264	/* validate the source of the LS is logged in */
3265	ndlp = lpfc_findnode_did(phba->pport, sid);
3266	if (!ndlp ||
3267	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3268	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3269		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3270				"6216 NVME Unsol rcv: No ndlp: "
3271				"NPort_ID x%x oxid x%x\n",
3272				sid, oxid);
3273		goto out_fail;
3274	}
3275
3276	axchg->phba = phba;
3277	axchg->ndlp = ndlp;
3278	axchg->size = size;
3279	axchg->oxid = oxid;
3280	axchg->sid = sid;
3281	axchg->wqeq = NULL;
3282	axchg->state = LPFC_NVME_STE_LS_RCV;
3283	axchg->entry_cnt = 1;
3284	axchg->rqb_buffer = (void *)nvmebuf;
3285	axchg->hdwq = &phba->sli4_hba.hdwq[0];
3286	axchg->payload = nvmebuf->dbuf.virt;
3287	INIT_LIST_HEAD(&axchg->list);
3288
3289	if (phba->nvmet_support) {
3290		ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3291		spin_lock_irq(&ndlp->lock);
3292		if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3293			ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3294			spin_unlock_irq(&ndlp->lock);
3295
3296			/* This reference is a single occurrence to hold the
3297			 * node valid until the nvmet transport calls
3298			 * host_release.
3299			 */
3300			if (!lpfc_nlp_get(ndlp))
3301				goto out_fail;
3302
3303			lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3304					"6206 NVMET unsol ls_req ndlp x%px "
3305					"DID x%x xflags x%x refcnt %d\n",
3306					ndlp, ndlp->nlp_DID,
3307					ndlp->fc4_xpt_flags,
3308					kref_read(&ndlp->kref));
3309		} else {
3310			spin_unlock_irq(&ndlp->lock);
3311		}
3312	} else {
3313		ret = lpfc_nvme_handle_lsreq(phba, axchg);
3314	}
3315
3316	/* if zero, LS was successfully handled. If non-zero, LS not handled */
3317	if (!ret)
3318		return;
3319
3320out_fail:
3321	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3322			"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3323			"NVMe%s handler failed %d\n",
3324			did, sid, oxid,
3325			(phba->nvmet_support) ? "T" : "I", ret);
3326
3327	/* recycle receive buffer */
3328	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3329
3330	/* If start of new exchange, abort it */
3331	if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3332		ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3333
3334	if (ret)
3335		kfree(axchg);
3336}
3337
3338/**
3339 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3340 * @phba: Pointer to HBA context object.
3341 * @pring: Pointer to driver SLI ring object.
3342 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3343 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3344 * @fch_type: the type for the first frame of the sequence.
3345 *
3346 * This function is called with no lock held. This function uses the r_ctl and
3347 * type of the received sequence to find the correct callback function to call
3348 * to process the sequence.
3349 **/
3350static int
3351lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3352			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3353			 uint32_t fch_type)
3354{
3355	int i;
3356
3357	switch (fch_type) {
3358	case FC_TYPE_NVME:
3359		lpfc_nvme_unsol_ls_handler(phba, saveq);
3360		return 1;
3361	default:
3362		break;
3363	}
3364
3365	/* unSolicited Responses */
3366	if (pring->prt[0].profile) {
3367		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3368			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3369									saveq);
3370		return 1;
3371	}
3372	/* We must search, based on rctl / type
3373	   for the right routine */
3374	for (i = 0; i < pring->num_mask; i++) {
3375		if ((pring->prt[i].rctl == fch_r_ctl) &&
3376		    (pring->prt[i].type == fch_type)) {
3377			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3378				(pring->prt[i].lpfc_sli_rcv_unsol_event)
3379						(phba, pring, saveq);
3380			return 1;
3381		}
3382	}
3383	return 0;
3384}
3385
3386static void
3387lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3388			struct lpfc_iocbq *saveq)
3389{
3390	IOCB_t *irsp;
3391	union lpfc_wqe128 *wqe;
3392	u16 i = 0;
3393
3394	irsp = &saveq->iocb;
3395	wqe = &saveq->wqe;
3396
3397	/* Fill wcqe with the IOCB status fields */
3398	bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3399	saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3400	saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3401	saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3402
3403	/* Source ID */
3404	bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3405
3406	/* rx-id of the response frame */
3407	bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3408
3409	/* ox-id of the frame */
3410	bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3411	       irsp->unsli3.rcvsli3.ox_id);
3412
3413	/* DID */
3414	bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3415	       irsp->un.rcvels.remoteID);
3416
3417	/* unsol data len */
3418	for (i = 0; i < irsp->ulpBdeCount; i++) {
3419		struct lpfc_hbq_entry *hbqe = NULL;
3420
3421		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3422			if (i == 0) {
3423				hbqe = (struct lpfc_hbq_entry *)
3424					&irsp->un.ulpWord[0];
3425				saveq->wqe.gen_req.bde.tus.f.bdeSize =
3426					hbqe->bde.tus.f.bdeSize;
3427			} else if (i == 1) {
3428				hbqe = (struct lpfc_hbq_entry *)
3429					&irsp->unsli3.sli3Words[4];
3430				saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3431			}
3432		}
3433	}
3434}
3435
3436/**
3437 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3438 * @phba: Pointer to HBA context object.
3439 * @pring: Pointer to driver SLI ring object.
3440 * @saveq: Pointer to the unsolicited iocb.
3441 *
3442 * This function is called with no lock held by the ring event handler
3443 * when there is an unsolicited iocb posted to the response ring by the
3444 * firmware. This function gets the buffer associated with the iocbs
3445 * and calls the event handler for the ring. This function handles both
3446 * qring buffers and hbq buffers.
3447 * When the function returns 1 the caller can free the iocb object otherwise
3448 * upper layer functions will free the iocb objects.
3449 **/
3450static int
3451lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3452			    struct lpfc_iocbq *saveq)
3453{
3454	IOCB_t           * irsp;
3455	WORD5            * w5p;
3456	dma_addr_t	 paddr;
3457	uint32_t           Rctl, Type;
3458	struct lpfc_iocbq *iocbq;
3459	struct lpfc_dmabuf *dmzbuf;
3460
3461	irsp = &saveq->iocb;
3462	saveq->vport = phba->pport;
3463
3464	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3465		if (pring->lpfc_sli_rcv_async_status)
3466			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3467		else
3468			lpfc_printf_log(phba,
3469					KERN_WARNING,
3470					LOG_SLI,
3471					"0316 Ring %d handler: unexpected "
3472					"ASYNC_STATUS iocb received evt_code "
3473					"0x%x\n",
3474					pring->ringno,
3475					irsp->un.asyncstat.evt_code);
3476		return 1;
3477	}
3478
3479	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3480	    (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3481		if (irsp->ulpBdeCount > 0) {
3482			dmzbuf = lpfc_sli_get_buff(phba, pring,
3483						   irsp->un.ulpWord[3]);
3484			lpfc_in_buf_free(phba, dmzbuf);
3485		}
3486
3487		if (irsp->ulpBdeCount > 1) {
3488			dmzbuf = lpfc_sli_get_buff(phba, pring,
3489						   irsp->unsli3.sli3Words[3]);
3490			lpfc_in_buf_free(phba, dmzbuf);
3491		}
3492
3493		if (irsp->ulpBdeCount > 2) {
3494			dmzbuf = lpfc_sli_get_buff(phba, pring,
3495						   irsp->unsli3.sli3Words[7]);
3496			lpfc_in_buf_free(phba, dmzbuf);
3497		}
3498
3499		return 1;
3500	}
3501
3502	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3503		if (irsp->ulpBdeCount != 0) {
3504			saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3505						irsp->un.ulpWord[3]);
3506			if (!saveq->cmd_dmabuf)
3507				lpfc_printf_log(phba,
3508					KERN_ERR,
3509					LOG_SLI,
3510					"0341 Ring %d Cannot find buffer for "
3511					"an unsolicited iocb. tag 0x%x\n",
3512					pring->ringno,
3513					irsp->un.ulpWord[3]);
3514		}
3515		if (irsp->ulpBdeCount == 2) {
3516			saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3517						irsp->unsli3.sli3Words[7]);
3518			if (!saveq->bpl_dmabuf)
3519				lpfc_printf_log(phba,
3520					KERN_ERR,
3521					LOG_SLI,
3522					"0342 Ring %d Cannot find buffer for an"
3523					" unsolicited iocb. tag 0x%x\n",
3524					pring->ringno,
3525					irsp->unsli3.sli3Words[7]);
3526		}
3527		list_for_each_entry(iocbq, &saveq->list, list) {
3528			irsp = &iocbq->iocb;
3529			if (irsp->ulpBdeCount != 0) {
3530				iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3531							pring,
3532							irsp->un.ulpWord[3]);
3533				if (!iocbq->cmd_dmabuf)
3534					lpfc_printf_log(phba,
3535						KERN_ERR,
3536						LOG_SLI,
3537						"0343 Ring %d Cannot find "
3538						"buffer for an unsolicited iocb"
3539						". tag 0x%x\n", pring->ringno,
3540						irsp->un.ulpWord[3]);
3541			}
3542			if (irsp->ulpBdeCount == 2) {
3543				iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3544						pring,
3545						irsp->unsli3.sli3Words[7]);
3546				if (!iocbq->bpl_dmabuf)
3547					lpfc_printf_log(phba,
3548						KERN_ERR,
3549						LOG_SLI,
3550						"0344 Ring %d Cannot find "
3551						"buffer for an unsolicited "
3552						"iocb. tag 0x%x\n",
3553						pring->ringno,
3554						irsp->unsli3.sli3Words[7]);
3555			}
3556		}
3557	} else {
3558		paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3559				 irsp->un.cont64[0].addrLow);
3560		saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3561							     paddr);
3562		if (irsp->ulpBdeCount == 2) {
3563			paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3564					 irsp->un.cont64[1].addrLow);
3565			saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3566								   pring,
3567								   paddr);
3568		}
3569	}
3570
3571	if (irsp->ulpBdeCount != 0 &&
3572	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3573	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3574		int found = 0;
3575
3576		/* search continue save q for same XRI */
3577		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3578			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3579				saveq->iocb.unsli3.rcvsli3.ox_id) {
3580				list_add_tail(&saveq->list, &iocbq->list);
3581				found = 1;
3582				break;
3583			}
3584		}
3585		if (!found)
3586			list_add_tail(&saveq->clist,
3587				      &pring->iocb_continue_saveq);
3588
3589		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3590			list_del_init(&iocbq->clist);
3591			saveq = iocbq;
3592			irsp = &saveq->iocb;
3593		} else {
3594			return 0;
3595		}
3596	}
3597	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3598	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3599	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3600		Rctl = FC_RCTL_ELS_REQ;
3601		Type = FC_TYPE_ELS;
3602	} else {
3603		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3604		Rctl = w5p->hcsw.Rctl;
3605		Type = w5p->hcsw.Type;
3606
3607		/* Firmware Workaround */
3608		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3609			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3610			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3611			Rctl = FC_RCTL_ELS_REQ;
3612			Type = FC_TYPE_ELS;
3613			w5p->hcsw.Rctl = Rctl;
3614			w5p->hcsw.Type = Type;
3615		}
3616	}
3617
3618	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3619	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3620	    irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3621		if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3622			saveq->vport = phba->pport;
3623		else
3624			saveq->vport = lpfc_find_vport_by_vpid(phba,
3625					       irsp->unsli3.rcvsli3.vpi);
3626	}
3627
3628	/* Prepare WQE with Unsol frame */
3629	lpfc_sli_prep_unsol_wqe(phba, saveq);
3630
3631	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3632		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3633				"0313 Ring %d handler: unexpected Rctl x%x "
3634				"Type x%x received\n",
3635				pring->ringno, Rctl, Type);
3636
3637	return 1;
3638}
3639
3640/**
3641 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3642 * @phba: Pointer to HBA context object.
3643 * @pring: Pointer to driver SLI ring object.
3644 * @prspiocb: Pointer to response iocb object.
3645 *
3646 * This function looks up the iocb_lookup table to get the command iocb
3647 * corresponding to the given response iocb using the iotag of the
3648 * response iocb. The driver calls this function with the hbalock held
3649 * for SLI3 ports or the ring lock held for SLI4 ports.
3650 * This function returns the command iocb object if it finds the command
3651 * iocb else returns NULL.
3652 **/
3653static struct lpfc_iocbq *
3654lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3655		      struct lpfc_sli_ring *pring,
3656		      struct lpfc_iocbq *prspiocb)
3657{
3658	struct lpfc_iocbq *cmd_iocb = NULL;
3659	u16 iotag;
3660
3661	if (phba->sli_rev == LPFC_SLI_REV4)
3662		iotag = get_wqe_reqtag(prspiocb);
3663	else
3664		iotag = prspiocb->iocb.ulpIoTag;
3665
3666	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3667		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3668		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3669			/* remove from txcmpl queue list */
3670			list_del_init(&cmd_iocb->list);
3671			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3672			pring->txcmplq_cnt--;
3673			return cmd_iocb;
3674		}
3675	}
3676
3677	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3678			"0317 iotag x%x is out of "
3679			"range: max iotag x%x\n",
3680			iotag, phba->sli.last_iotag);
3681	return NULL;
3682}
3683
3684/**
3685 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3686 * @phba: Pointer to HBA context object.
3687 * @pring: Pointer to driver SLI ring object.
3688 * @iotag: IOCB tag.
3689 *
3690 * This function looks up the iocb_lookup table to get the command iocb
3691 * corresponding to the given iotag. The driver calls this function with
3692 * the ring lock held because this function is an SLI4 port only helper.
3693 * This function returns the command iocb object if it finds the command
3694 * iocb else returns NULL.
3695 **/
3696static struct lpfc_iocbq *
3697lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3698			     struct lpfc_sli_ring *pring, uint16_t iotag)
3699{
3700	struct lpfc_iocbq *cmd_iocb = NULL;
3701
3702	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3703		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3704		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3705			/* remove from txcmpl queue list */
3706			list_del_init(&cmd_iocb->list);
3707			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3708			pring->txcmplq_cnt--;
3709			return cmd_iocb;
3710		}
3711	}
3712
3713	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3714			"0372 iotag x%x lookup error: max iotag (x%x) "
3715			"cmd_flag x%x\n",
3716			iotag, phba->sli.last_iotag,
3717			cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3718	return NULL;
3719}
3720
3721/**
3722 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3723 * @phba: Pointer to HBA context object.
3724 * @pring: Pointer to driver SLI ring object.
3725 * @saveq: Pointer to the response iocb to be processed.
3726 *
3727 * This function is called by the ring event handler for non-fcp
3728 * rings when there is a new response iocb in the response ring.
3729 * The caller is not required to hold any locks. This function
3730 * gets the command iocb associated with the response iocb and
3731 * calls the completion handler for the command iocb. If there
3732 * is no completion handler, the function will free the resources
3733 * associated with command iocb. If the response iocb is for
3734 * an already aborted command iocb, the status of the completion
3735 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3736 * This function always returns 1.
3737 **/
3738static int
3739lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3740			  struct lpfc_iocbq *saveq)
3741{
3742	struct lpfc_iocbq *cmdiocbp;
3743	unsigned long iflag;
3744	u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3745
3746	if (phba->sli_rev == LPFC_SLI_REV4)
3747		spin_lock_irqsave(&pring->ring_lock, iflag);
3748	else
3749		spin_lock_irqsave(&phba->hbalock, iflag);
3750	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3751	if (phba->sli_rev == LPFC_SLI_REV4)
3752		spin_unlock_irqrestore(&pring->ring_lock, iflag);
3753	else
3754		spin_unlock_irqrestore(&phba->hbalock, iflag);
3755
3756	ulp_command = get_job_cmnd(phba, saveq);
3757	ulp_status = get_job_ulpstatus(phba, saveq);
3758	ulp_word4 = get_job_word4(phba, saveq);
3759	ulp_context = get_job_ulpcontext(phba, saveq);
3760	if (phba->sli_rev == LPFC_SLI_REV4)
3761		iotag = get_wqe_reqtag(saveq);
3762	else
3763		iotag = saveq->iocb.ulpIoTag;
3764
3765	if (cmdiocbp) {
3766		ulp_command = get_job_cmnd(phba, cmdiocbp);
3767		if (cmdiocbp->cmd_cmpl) {
3768			/*
3769			 * If an ELS command failed send an event to mgmt
3770			 * application.
3771			 */
3772			if (ulp_status &&
3773			     (pring->ringno == LPFC_ELS_RING) &&
3774			     (ulp_command == CMD_ELS_REQUEST64_CR))
3775				lpfc_send_els_failure_event(phba,
3776					cmdiocbp, saveq);
3777
3778			/*
3779			 * Post all ELS completions to the worker thread.
3780			 * All other are passed to the completion callback.
3781			 */
3782			if (pring->ringno == LPFC_ELS_RING) {
3783				if ((phba->sli_rev < LPFC_SLI_REV4) &&
3784				    (cmdiocbp->cmd_flag &
3785							LPFC_DRIVER_ABORTED)) {
3786					spin_lock_irqsave(&phba->hbalock,
3787							  iflag);
3788					cmdiocbp->cmd_flag &=
3789						~LPFC_DRIVER_ABORTED;
3790					spin_unlock_irqrestore(&phba->hbalock,
3791							       iflag);
3792					saveq->iocb.ulpStatus =
3793						IOSTAT_LOCAL_REJECT;
3794					saveq->iocb.un.ulpWord[4] =
3795						IOERR_SLI_ABORTED;
3796
3797					/* Firmware could still be in progress
3798					 * of DMAing payload, so don't free data
3799					 * buffer till after a hbeat.
3800					 */
3801					spin_lock_irqsave(&phba->hbalock,
3802							  iflag);
3803					saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3804					spin_unlock_irqrestore(&phba->hbalock,
3805							       iflag);
3806				}
3807				if (phba->sli_rev == LPFC_SLI_REV4) {
3808					if (saveq->cmd_flag &
3809					    LPFC_EXCHANGE_BUSY) {
3810						/* Set cmdiocb flag for the
3811						 * exchange busy so sgl (xri)
3812						 * will not be released until
3813						 * the abort xri is received
3814						 * from hba.
3815						 */
3816						spin_lock_irqsave(
3817							&phba->hbalock, iflag);
3818						cmdiocbp->cmd_flag |=
3819							LPFC_EXCHANGE_BUSY;
3820						spin_unlock_irqrestore(
3821							&phba->hbalock, iflag);
3822					}
3823					if (cmdiocbp->cmd_flag &
3824					    LPFC_DRIVER_ABORTED) {
3825						/*
3826						 * Clear LPFC_DRIVER_ABORTED
3827						 * bit in case it was driver
3828						 * initiated abort.
3829						 */
3830						spin_lock_irqsave(
3831							&phba->hbalock, iflag);
3832						cmdiocbp->cmd_flag &=
3833							~LPFC_DRIVER_ABORTED;
3834						spin_unlock_irqrestore(
3835							&phba->hbalock, iflag);
3836						set_job_ulpstatus(cmdiocbp,
3837								  IOSTAT_LOCAL_REJECT);
3838						set_job_ulpword4(cmdiocbp,
3839								 IOERR_ABORT_REQUESTED);
3840						/*
3841						 * For SLI4, irspiocb contains
3842						 * NO_XRI in sli_xritag, it
3843						 * shall not affect releasing
3844						 * sgl (xri) process.
3845						 */
3846						set_job_ulpstatus(saveq,
3847								  IOSTAT_LOCAL_REJECT);
3848						set_job_ulpword4(saveq,
3849								 IOERR_SLI_ABORTED);
3850						spin_lock_irqsave(
3851							&phba->hbalock, iflag);
3852						saveq->cmd_flag |=
3853							LPFC_DELAY_MEM_FREE;
3854						spin_unlock_irqrestore(
3855							&phba->hbalock, iflag);
3856					}
3857				}
3858			}
3859			cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3860		} else
3861			lpfc_sli_release_iocbq(phba, cmdiocbp);
3862	} else {
3863		/*
3864		 * Unknown initiating command based on the response iotag.
3865		 * This could be the case on the ELS ring because of
3866		 * lpfc_els_abort().
3867		 */
3868		if (pring->ringno != LPFC_ELS_RING) {
3869			/*
3870			 * Ring <ringno> handler: unexpected completion IoTag
3871			 * <IoTag>
3872			 */
3873			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3874					 "0322 Ring %d handler: "
3875					 "unexpected completion IoTag x%x "
3876					 "Data: x%x x%x x%x x%x\n",
3877					 pring->ringno, iotag, ulp_status,
3878					 ulp_word4, ulp_command, ulp_context);
3879		}
3880	}
3881
3882	return 1;
3883}
3884
3885/**
3886 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3887 * @phba: Pointer to HBA context object.
3888 * @pring: Pointer to driver SLI ring object.
3889 *
3890 * This function is called from the iocb ring event handlers when
3891 * put pointer is ahead of the get pointer for a ring. This function signal
3892 * an error attention condition to the worker thread and the worker
3893 * thread will transition the HBA to offline state.
3894 **/
3895static void
3896lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3897{
3898	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3899	/*
3900	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3901	 * rsp ring <portRspMax>
3902	 */
3903	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3904			"0312 Ring %d handler: portRspPut %d "
3905			"is bigger than rsp ring %d\n",
3906			pring->ringno, le32_to_cpu(pgp->rspPutInx),
3907			pring->sli.sli3.numRiocb);
3908
3909	phba->link_state = LPFC_HBA_ERROR;
3910
3911	/*
3912	 * All error attention handlers are posted to
3913	 * worker thread
3914	 */
3915	phba->work_ha |= HA_ERATT;
3916	phba->work_hs = HS_FFER3;
3917
3918	lpfc_worker_wake_up(phba);
3919
3920	return;
3921}
3922
3923/**
3924 * lpfc_poll_eratt - Error attention polling timer timeout handler
3925 * @t: Context to fetch pointer to address of HBA context object from.
3926 *
3927 * This function is invoked by the Error Attention polling timer when the
3928 * timer times out. It will check the SLI Error Attention register for
3929 * possible attention events. If so, it will post an Error Attention event
3930 * and wake up worker thread to process it. Otherwise, it will set up the
3931 * Error Attention polling timer for the next poll.
3932 **/
3933void lpfc_poll_eratt(struct timer_list *t)
3934{
3935	struct lpfc_hba *phba;
3936	uint32_t eratt = 0;
3937	uint64_t sli_intr, cnt;
3938
3939	phba = from_timer(phba, t, eratt_poll);
3940	if (!(phba->hba_flag & HBA_SETUP))
3941		return;
3942
3943	if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
3944		return;
3945
3946	/* Here we will also keep track of interrupts per sec of the hba */
3947	sli_intr = phba->sli.slistat.sli_intr;
3948
3949	if (phba->sli.slistat.sli_prev_intr > sli_intr)
3950		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3951			sli_intr);
3952	else
3953		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3954
3955	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
3956	do_div(cnt, phba->eratt_poll_interval);
3957	phba->sli.slistat.sli_ips = cnt;
3958
3959	phba->sli.slistat.sli_prev_intr = sli_intr;
3960
3961	/* Check chip HA register for error event */
3962	eratt = lpfc_sli_check_eratt(phba);
3963
3964	if (eratt)
3965		/* Tell the worker thread there is work to do */
3966		lpfc_worker_wake_up(phba);
3967	else
3968		/* Restart the timer for next eratt poll */
3969		mod_timer(&phba->eratt_poll,
3970			  jiffies +
3971			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3972	return;
3973}
3974
3975
3976/**
3977 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3978 * @phba: Pointer to HBA context object.
3979 * @pring: Pointer to driver SLI ring object.
3980 * @mask: Host attention register mask for this ring.
3981 *
3982 * This function is called from the interrupt context when there is a ring
3983 * event for the fcp ring. The caller does not hold any lock.
3984 * The function processes each response iocb in the response ring until it
3985 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3986 * LE bit set. The function will call the completion handler of the command iocb
3987 * if the response iocb indicates a completion for a command iocb or it is
3988 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3989 * function if this is an unsolicited iocb.
3990 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3991 * to check it explicitly.
3992 */
3993int
3994lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3995				struct lpfc_sli_ring *pring, uint32_t mask)
3996{
3997	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3998	IOCB_t *irsp = NULL;
3999	IOCB_t *entry = NULL;
4000	struct lpfc_iocbq *cmdiocbq = NULL;
4001	struct lpfc_iocbq rspiocbq;
4002	uint32_t status;
4003	uint32_t portRspPut, portRspMax;
4004	int rc = 1;
4005	lpfc_iocb_type type;
4006	unsigned long iflag;
4007	uint32_t rsp_cmpl = 0;
4008
4009	spin_lock_irqsave(&phba->hbalock, iflag);
4010	pring->stats.iocb_event++;
4011
4012	/*
4013	 * The next available response entry should never exceed the maximum
4014	 * entries.  If it does, treat it as an adapter hardware error.
4015	 */
4016	portRspMax = pring->sli.sli3.numRiocb;
4017	portRspPut = le32_to_cpu(pgp->rspPutInx);
4018	if (unlikely(portRspPut >= portRspMax)) {
4019		lpfc_sli_rsp_pointers_error(phba, pring);
4020		spin_unlock_irqrestore(&phba->hbalock, iflag);
4021		return 1;
4022	}
4023	if (phba->fcp_ring_in_use) {
4024		spin_unlock_irqrestore(&phba->hbalock, iflag);
4025		return 1;
4026	} else
4027		phba->fcp_ring_in_use = 1;
4028
4029	rmb();
4030	while (pring->sli.sli3.rspidx != portRspPut) {
4031		/*
4032		 * Fetch an entry off the ring and copy it into a local data
4033		 * structure.  The copy involves a byte-swap since the
4034		 * network byte order and pci byte orders are different.
4035		 */
4036		entry = lpfc_resp_iocb(phba, pring);
4037		phba->last_completion_time = jiffies;
4038
4039		if (++pring->sli.sli3.rspidx >= portRspMax)
4040			pring->sli.sli3.rspidx = 0;
4041
4042		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4043				      (uint32_t *) &rspiocbq.iocb,
4044				      phba->iocb_rsp_size);
4045		INIT_LIST_HEAD(&(rspiocbq.list));
4046		irsp = &rspiocbq.iocb;
4047
4048		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4049		pring->stats.iocb_rsp++;
4050		rsp_cmpl++;
4051
4052		if (unlikely(irsp->ulpStatus)) {
4053			/*
4054			 * If resource errors reported from HBA, reduce
4055			 * queuedepths of the SCSI device.
4056			 */
4057			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4058			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4059			     IOERR_NO_RESOURCES)) {
4060				spin_unlock_irqrestore(&phba->hbalock, iflag);
4061				phba->lpfc_rampdown_queue_depth(phba);
4062				spin_lock_irqsave(&phba->hbalock, iflag);
4063			}
4064
4065			/* Rsp ring <ringno> error: IOCB */
4066			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4067					"0336 Rsp Ring %d error: IOCB Data: "
4068					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
4069					pring->ringno,
4070					irsp->un.ulpWord[0],
4071					irsp->un.ulpWord[1],
4072					irsp->un.ulpWord[2],
4073					irsp->un.ulpWord[3],
4074					irsp->un.ulpWord[4],
4075					irsp->un.ulpWord[5],
4076					*(uint32_t *)&irsp->un1,
4077					*((uint32_t *)&irsp->un1 + 1));
4078		}
4079
4080		switch (type) {
4081		case LPFC_ABORT_IOCB:
4082		case LPFC_SOL_IOCB:
4083			/*
4084			 * Idle exchange closed via ABTS from port.  No iocb
4085			 * resources need to be recovered.
4086			 */
4087			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4088				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4089						"0333 IOCB cmd 0x%x"
4090						" processed. Skipping"
4091						" completion\n",
4092						irsp->ulpCommand);
4093				break;
4094			}
4095
4096			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4097							 &rspiocbq);
4098			if (unlikely(!cmdiocbq))
4099				break;
4100			if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4101				cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4102			if (cmdiocbq->cmd_cmpl) {
4103				spin_unlock_irqrestore(&phba->hbalock, iflag);
4104				cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4105				spin_lock_irqsave(&phba->hbalock, iflag);
4106			}
4107			break;
4108		case LPFC_UNSOL_IOCB:
4109			spin_unlock_irqrestore(&phba->hbalock, iflag);
4110			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4111			spin_lock_irqsave(&phba->hbalock, iflag);
4112			break;
4113		default:
4114			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4115				char adaptermsg[LPFC_MAX_ADPTMSG];
4116				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4117				memcpy(&adaptermsg[0], (uint8_t *) irsp,
4118				       MAX_MSG_DATA);
4119				dev_warn(&((phba->pcidev)->dev),
4120					 "lpfc%d: %s\n",
4121					 phba->brd_no, adaptermsg);
4122			} else {
4123				/* Unknown IOCB command */
4124				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4125						"0334 Unknown IOCB command "
4126						"Data: x%x, x%x x%x x%x x%x\n",
4127						type, irsp->ulpCommand,
4128						irsp->ulpStatus,
4129						irsp->ulpIoTag,
4130						irsp->ulpContext);
4131			}
4132			break;
4133		}
4134
4135		/*
4136		 * The response IOCB has been processed.  Update the ring
4137		 * pointer in SLIM.  If the port response put pointer has not
4138		 * been updated, sync the pgp->rspPutInx and fetch the new port
4139		 * response put pointer.
4140		 */
4141		writel(pring->sli.sli3.rspidx,
4142			&phba->host_gp[pring->ringno].rspGetInx);
4143
4144		if (pring->sli.sli3.rspidx == portRspPut)
4145			portRspPut = le32_to_cpu(pgp->rspPutInx);
4146	}
4147
4148	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4149		pring->stats.iocb_rsp_full++;
4150		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4151		writel(status, phba->CAregaddr);
4152		readl(phba->CAregaddr);
4153	}
4154	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4155		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4156		pring->stats.iocb_cmd_empty++;
4157
4158		/* Force update of the local copy of cmdGetInx */
4159		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4160		lpfc_sli_resume_iocb(phba, pring);
4161
4162		if ((pring->lpfc_sli_cmd_available))
4163			(pring->lpfc_sli_cmd_available) (phba, pring);
4164
4165	}
4166
4167	phba->fcp_ring_in_use = 0;
4168	spin_unlock_irqrestore(&phba->hbalock, iflag);
4169	return rc;
4170}
4171
4172/**
4173 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4174 * @phba: Pointer to HBA context object.
4175 * @pring: Pointer to driver SLI ring object.
4176 * @rspiocbp: Pointer to driver response IOCB object.
4177 *
4178 * This function is called from the worker thread when there is a slow-path
4179 * response IOCB to process. This function chains all the response iocbs until
4180 * seeing the iocb with the LE bit set. The function will call
4181 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4182 * completion of a command iocb. The function will call the
4183 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4184 * The function frees the resources or calls the completion handler if this
4185 * iocb is an abort completion. The function returns NULL when the response
4186 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4187 * this function shall chain the iocb on to the iocb_continueq and return the
4188 * response iocb passed in.
4189 **/
4190static struct lpfc_iocbq *
4191lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4192			struct lpfc_iocbq *rspiocbp)
4193{
4194	struct lpfc_iocbq *saveq;
4195	struct lpfc_iocbq *cmdiocb;
4196	struct lpfc_iocbq *next_iocb;
4197	IOCB_t *irsp;
4198	uint32_t free_saveq;
4199	u8 cmd_type;
4200	lpfc_iocb_type type;
4201	unsigned long iflag;
4202	u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4203	u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4204	u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4205	int rc;
4206
4207	spin_lock_irqsave(&phba->hbalock, iflag);
4208	/* First add the response iocb to the countinueq list */
4209	list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4210	pring->iocb_continueq_cnt++;
4211
4212	/*
4213	 * By default, the driver expects to free all resources
4214	 * associated with this iocb completion.
4215	 */
4216	free_saveq = 1;
4217	saveq = list_get_first(&pring->iocb_continueq,
4218			       struct lpfc_iocbq, list);
4219	list_del_init(&pring->iocb_continueq);
4220	pring->iocb_continueq_cnt = 0;
4221
4222	pring->stats.iocb_rsp++;
4223
4224	/*
4225	 * If resource errors reported from HBA, reduce
4226	 * queuedepths of the SCSI device.
4227	 */
4228	if (ulp_status == IOSTAT_LOCAL_REJECT &&
4229	    ((ulp_word4 & IOERR_PARAM_MASK) ==
4230	     IOERR_NO_RESOURCES)) {
4231		spin_unlock_irqrestore(&phba->hbalock, iflag);
4232		phba->lpfc_rampdown_queue_depth(phba);
4233		spin_lock_irqsave(&phba->hbalock, iflag);
4234	}
4235
4236	if (ulp_status) {
4237		/* Rsp ring <ringno> error: IOCB */
4238		if (phba->sli_rev < LPFC_SLI_REV4) {
4239			irsp = &rspiocbp->iocb;
4240			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4241					"0328 Rsp Ring %d error: ulp_status x%x "
4242					"IOCB Data: "
4243					"x%08x x%08x x%08x x%08x "
4244					"x%08x x%08x x%08x x%08x "
4245					"x%08x x%08x x%08x x%08x "
4246					"x%08x x%08x x%08x x%08x\n",
4247					pring->ringno, ulp_status,
4248					get_job_ulpword(rspiocbp, 0),
4249					get_job_ulpword(rspiocbp, 1),
4250					get_job_ulpword(rspiocbp, 2),
4251					get_job_ulpword(rspiocbp, 3),
4252					get_job_ulpword(rspiocbp, 4),
4253					get_job_ulpword(rspiocbp, 5),
4254					*(((uint32_t *)irsp) + 6),
4255					*(((uint32_t *)irsp) + 7),
4256					*(((uint32_t *)irsp) + 8),
4257					*(((uint32_t *)irsp) + 9),
4258					*(((uint32_t *)irsp) + 10),
4259					*(((uint32_t *)irsp) + 11),
4260					*(((uint32_t *)irsp) + 12),
4261					*(((uint32_t *)irsp) + 13),
4262					*(((uint32_t *)irsp) + 14),
4263					*(((uint32_t *)irsp) + 15));
4264		} else {
4265			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4266					"0321 Rsp Ring %d error: "
4267					"IOCB Data: "
4268					"x%x x%x x%x x%x\n",
4269					pring->ringno,
4270					rspiocbp->wcqe_cmpl.word0,
4271					rspiocbp->wcqe_cmpl.total_data_placed,
4272					rspiocbp->wcqe_cmpl.parameter,
4273					rspiocbp->wcqe_cmpl.word3);
4274		}
4275	}
4276
4277
4278	/*
4279	 * Fetch the iocb command type and call the correct completion
4280	 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4281	 * get freed back to the lpfc_iocb_list by the discovery
4282	 * kernel thread.
4283	 */
4284	cmd_type = ulp_command & CMD_IOCB_MASK;
4285	type = lpfc_sli_iocb_cmd_type(cmd_type);
4286	switch (type) {
4287	case LPFC_SOL_IOCB:
4288		spin_unlock_irqrestore(&phba->hbalock, iflag);
4289		rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4290		spin_lock_irqsave(&phba->hbalock, iflag);
4291		break;
4292	case LPFC_UNSOL_IOCB:
4293		spin_unlock_irqrestore(&phba->hbalock, iflag);
4294		rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4295		spin_lock_irqsave(&phba->hbalock, iflag);
4296		if (!rc)
4297			free_saveq = 0;
4298		break;
4299	case LPFC_ABORT_IOCB:
4300		cmdiocb = NULL;
4301		if (ulp_command != CMD_XRI_ABORTED_CX)
4302			cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4303							saveq);
4304		if (cmdiocb) {
4305			/* Call the specified completion routine */
4306			if (cmdiocb->cmd_cmpl) {
4307				spin_unlock_irqrestore(&phba->hbalock, iflag);
4308				cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4309				spin_lock_irqsave(&phba->hbalock, iflag);
4310			} else {
4311				__lpfc_sli_release_iocbq(phba, cmdiocb);
4312			}
4313		}
4314		break;
4315	case LPFC_UNKNOWN_IOCB:
4316		if (ulp_command == CMD_ADAPTER_MSG) {
4317			char adaptermsg[LPFC_MAX_ADPTMSG];
4318
4319			memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4320			memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4321			       MAX_MSG_DATA);
4322			dev_warn(&((phba->pcidev)->dev),
4323				 "lpfc%d: %s\n",
4324				 phba->brd_no, adaptermsg);
4325		} else {
4326			/* Unknown command */
4327			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4328					"0335 Unknown IOCB "
4329					"command Data: x%x "
4330					"x%x x%x x%x\n",
4331					ulp_command,
4332					ulp_status,
4333					get_wqe_reqtag(rspiocbp),
4334					get_job_ulpcontext(phba, rspiocbp));
4335		}
4336		break;
4337	}
4338
4339	if (free_saveq) {
4340		list_for_each_entry_safe(rspiocbp, next_iocb,
4341					 &saveq->list, list) {
4342			list_del_init(&rspiocbp->list);
4343			__lpfc_sli_release_iocbq(phba, rspiocbp);
4344		}
4345		__lpfc_sli_release_iocbq(phba, saveq);
4346	}
4347	rspiocbp = NULL;
4348	spin_unlock_irqrestore(&phba->hbalock, iflag);
4349	return rspiocbp;
4350}
4351
4352/**
4353 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4354 * @phba: Pointer to HBA context object.
4355 * @pring: Pointer to driver SLI ring object.
4356 * @mask: Host attention register mask for this ring.
4357 *
4358 * This routine wraps the actual slow_ring event process routine from the
4359 * API jump table function pointer from the lpfc_hba struct.
4360 **/
4361void
4362lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4363				struct lpfc_sli_ring *pring, uint32_t mask)
4364{
4365	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4366}
4367
4368/**
4369 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4370 * @phba: Pointer to HBA context object.
4371 * @pring: Pointer to driver SLI ring object.
4372 * @mask: Host attention register mask for this ring.
4373 *
4374 * This function is called from the worker thread when there is a ring event
4375 * for non-fcp rings. The caller does not hold any lock. The function will
4376 * remove each response iocb in the response ring and calls the handle
4377 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4378 **/
4379static void
4380lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4381				   struct lpfc_sli_ring *pring, uint32_t mask)
4382{
4383	struct lpfc_pgp *pgp;
4384	IOCB_t *entry;
4385	IOCB_t *irsp = NULL;
4386	struct lpfc_iocbq *rspiocbp = NULL;
4387	uint32_t portRspPut, portRspMax;
4388	unsigned long iflag;
4389	uint32_t status;
4390
4391	pgp = &phba->port_gp[pring->ringno];
4392	spin_lock_irqsave(&phba->hbalock, iflag);
4393	pring->stats.iocb_event++;
4394
4395	/*
4396	 * The next available response entry should never exceed the maximum
4397	 * entries.  If it does, treat it as an adapter hardware error.
4398	 */
4399	portRspMax = pring->sli.sli3.numRiocb;
4400	portRspPut = le32_to_cpu(pgp->rspPutInx);
4401	if (portRspPut >= portRspMax) {
4402		/*
4403		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4404		 * rsp ring <portRspMax>
4405		 */
4406		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4407				"0303 Ring %d handler: portRspPut %d "
4408				"is bigger than rsp ring %d\n",
4409				pring->ringno, portRspPut, portRspMax);
4410
4411		phba->link_state = LPFC_HBA_ERROR;
4412		spin_unlock_irqrestore(&phba->hbalock, iflag);
4413
4414		phba->work_hs = HS_FFER3;
4415		lpfc_handle_eratt(phba);
4416
4417		return;
4418	}
4419
4420	rmb();
4421	while (pring->sli.sli3.rspidx != portRspPut) {
4422		/*
4423		 * Build a completion list and call the appropriate handler.
4424		 * The process is to get the next available response iocb, get
4425		 * a free iocb from the list, copy the response data into the
4426		 * free iocb, insert to the continuation list, and update the
4427		 * next response index to slim.  This process makes response
4428		 * iocb's in the ring available to DMA as fast as possible but
4429		 * pays a penalty for a copy operation.  Since the iocb is
4430		 * only 32 bytes, this penalty is considered small relative to
4431		 * the PCI reads for register values and a slim write.  When
4432		 * the ulpLe field is set, the entire Command has been
4433		 * received.
4434		 */
4435		entry = lpfc_resp_iocb(phba, pring);
4436
4437		phba->last_completion_time = jiffies;
4438		rspiocbp = __lpfc_sli_get_iocbq(phba);
4439		if (rspiocbp == NULL) {
4440			printk(KERN_ERR "%s: out of buffers! Failing "
4441			       "completion.\n", __func__);
4442			break;
4443		}
4444
4445		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4446				      phba->iocb_rsp_size);
4447		irsp = &rspiocbp->iocb;
4448
4449		if (++pring->sli.sli3.rspidx >= portRspMax)
4450			pring->sli.sli3.rspidx = 0;
4451
4452		if (pring->ringno == LPFC_ELS_RING) {
4453			lpfc_debugfs_slow_ring_trc(phba,
4454			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4455				*(((uint32_t *) irsp) + 4),
4456				*(((uint32_t *) irsp) + 6),
4457				*(((uint32_t *) irsp) + 7));
4458		}
4459
4460		writel(pring->sli.sli3.rspidx,
4461			&phba->host_gp[pring->ringno].rspGetInx);
4462
4463		spin_unlock_irqrestore(&phba->hbalock, iflag);
4464		/* Handle the response IOCB */
4465		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4466		spin_lock_irqsave(&phba->hbalock, iflag);
4467
4468		/*
4469		 * If the port response put pointer has not been updated, sync
4470		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4471		 * response put pointer.
4472		 */
4473		if (pring->sli.sli3.rspidx == portRspPut) {
4474			portRspPut = le32_to_cpu(pgp->rspPutInx);
4475		}
4476	} /* while (pring->sli.sli3.rspidx != portRspPut) */
4477
4478	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4479		/* At least one response entry has been freed */
4480		pring->stats.iocb_rsp_full++;
4481		/* SET RxRE_RSP in Chip Att register */
4482		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4483		writel(status, phba->CAregaddr);
4484		readl(phba->CAregaddr); /* flush */
4485	}
4486	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4487		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4488		pring->stats.iocb_cmd_empty++;
4489
4490		/* Force update of the local copy of cmdGetInx */
4491		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4492		lpfc_sli_resume_iocb(phba, pring);
4493
4494		if ((pring->lpfc_sli_cmd_available))
4495			(pring->lpfc_sli_cmd_available) (phba, pring);
4496
4497	}
4498
4499	spin_unlock_irqrestore(&phba->hbalock, iflag);
4500	return;
4501}
4502
4503/**
4504 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4505 * @phba: Pointer to HBA context object.
4506 * @pring: Pointer to driver SLI ring object.
4507 * @mask: Host attention register mask for this ring.
4508 *
4509 * This function is called from the worker thread when there is a pending
4510 * ELS response iocb on the driver internal slow-path response iocb worker
4511 * queue. The caller does not hold any lock. The function will remove each
4512 * response iocb from the response worker queue and calls the handle
4513 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4514 **/
4515static void
4516lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4517				   struct lpfc_sli_ring *pring, uint32_t mask)
4518{
4519	struct lpfc_iocbq *irspiocbq;
4520	struct hbq_dmabuf *dmabuf;
4521	struct lpfc_cq_event *cq_event;
4522	unsigned long iflag;
4523	int count = 0;
4524
4525	spin_lock_irqsave(&phba->hbalock, iflag);
4526	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4527	spin_unlock_irqrestore(&phba->hbalock, iflag);
4528	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4529		/* Get the response iocb from the head of work queue */
4530		spin_lock_irqsave(&phba->hbalock, iflag);
4531		list_remove_head(&phba->sli4_hba.sp_queue_event,
4532				 cq_event, struct lpfc_cq_event, list);
4533		spin_unlock_irqrestore(&phba->hbalock, iflag);
4534
4535		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4536		case CQE_CODE_COMPL_WQE:
4537			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4538						 cq_event);
4539			/* Translate ELS WCQE to response IOCBQ */
4540			irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4541								      irspiocbq);
4542			if (irspiocbq)
4543				lpfc_sli_sp_handle_rspiocb(phba, pring,
4544							   irspiocbq);
4545			count++;
4546			break;
4547		case CQE_CODE_RECEIVE:
4548		case CQE_CODE_RECEIVE_V1:
4549			dmabuf = container_of(cq_event, struct hbq_dmabuf,
4550					      cq_event);
4551			lpfc_sli4_handle_received_buffer(phba, dmabuf);
4552			count++;
4553			break;
4554		default:
4555			break;
4556		}
4557
4558		/* Limit the number of events to 64 to avoid soft lockups */
4559		if (count == 64)
4560			break;
4561	}
4562}
4563
4564/**
4565 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4566 * @phba: Pointer to HBA context object.
4567 * @pring: Pointer to driver SLI ring object.
4568 *
4569 * This function aborts all iocbs in the given ring and frees all the iocb
4570 * objects in txq. This function issues an abort iocb for all the iocb commands
4571 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4572 * the return of this function. The caller is not required to hold any locks.
4573 **/
4574void
4575lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4576{
4577	LIST_HEAD(tx_completions);
4578	LIST_HEAD(txcmplq_completions);
4579	struct lpfc_iocbq *iocb, *next_iocb;
4580	int offline;
4581
4582	if (pring->ringno == LPFC_ELS_RING) {
4583		lpfc_fabric_abort_hba(phba);
4584	}
4585	offline = pci_channel_offline(phba->pcidev);
4586
4587	/* Error everything on txq and txcmplq
4588	 * First do the txq.
4589	 */
4590	if (phba->sli_rev >= LPFC_SLI_REV4) {
4591		spin_lock_irq(&pring->ring_lock);
4592		list_splice_init(&pring->txq, &tx_completions);
4593		pring->txq_cnt = 0;
4594
4595		if (offline) {
4596			list_splice_init(&pring->txcmplq,
4597					 &txcmplq_completions);
4598		} else {
4599			/* Next issue ABTS for everything on the txcmplq */
4600			list_for_each_entry_safe(iocb, next_iocb,
4601						 &pring->txcmplq, list)
4602				lpfc_sli_issue_abort_iotag(phba, pring,
4603							   iocb, NULL);
4604		}
4605		spin_unlock_irq(&pring->ring_lock);
4606	} else {
4607		spin_lock_irq(&phba->hbalock);
4608		list_splice_init(&pring->txq, &tx_completions);
4609		pring->txq_cnt = 0;
4610
4611		if (offline) {
4612			list_splice_init(&pring->txcmplq, &txcmplq_completions);
4613		} else {
4614			/* Next issue ABTS for everything on the txcmplq */
4615			list_for_each_entry_safe(iocb, next_iocb,
4616						 &pring->txcmplq, list)
4617				lpfc_sli_issue_abort_iotag(phba, pring,
4618							   iocb, NULL);
4619		}
4620		spin_unlock_irq(&phba->hbalock);
4621	}
4622
4623	if (offline) {
4624		/* Cancel all the IOCBs from the completions list */
4625		lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4626				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4627	} else {
4628		/* Make sure HBA is alive */
4629		lpfc_issue_hb_tmo(phba);
4630	}
4631	/* Cancel all the IOCBs from the completions list */
4632	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4633			      IOERR_SLI_ABORTED);
4634}
4635
4636/**
4637 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4638 * @phba: Pointer to HBA context object.
4639 *
4640 * This function aborts all iocbs in FCP rings and frees all the iocb
4641 * objects in txq. This function issues an abort iocb for all the iocb commands
4642 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4643 * the return of this function. The caller is not required to hold any locks.
4644 **/
4645void
4646lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4647{
4648	struct lpfc_sli *psli = &phba->sli;
4649	struct lpfc_sli_ring  *pring;
4650	uint32_t i;
4651
4652	/* Look on all the FCP Rings for the iotag */
4653	if (phba->sli_rev >= LPFC_SLI_REV4) {
4654		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4655			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4656			lpfc_sli_abort_iocb_ring(phba, pring);
4657		}
4658	} else {
4659		pring = &psli->sli3_ring[LPFC_FCP_RING];
4660		lpfc_sli_abort_iocb_ring(phba, pring);
4661	}
4662}
4663
4664/**
4665 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4666 * @phba: Pointer to HBA context object.
4667 *
4668 * This function flushes all iocbs in the IO ring and frees all the iocb
4669 * objects in txq and txcmplq. This function will not issue abort iocbs
4670 * for all the iocb commands in txcmplq, they will just be returned with
4671 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4672 * slot has been permanently disabled.
4673 **/
4674void
4675lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4676{
4677	LIST_HEAD(txq);
4678	LIST_HEAD(txcmplq);
4679	struct lpfc_sli *psli = &phba->sli;
4680	struct lpfc_sli_ring  *pring;
4681	uint32_t i;
4682	struct lpfc_iocbq *piocb, *next_iocb;
4683
4684	spin_lock_irq(&phba->hbalock);
4685	/* Indicate the I/O queues are flushed */
4686	phba->hba_flag |= HBA_IOQ_FLUSH;
4687	spin_unlock_irq(&phba->hbalock);
4688
4689	/* Look on all the FCP Rings for the iotag */
4690	if (phba->sli_rev >= LPFC_SLI_REV4) {
4691		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4692			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4693
4694			spin_lock_irq(&pring->ring_lock);
4695			/* Retrieve everything on txq */
4696			list_splice_init(&pring->txq, &txq);
4697			list_for_each_entry_safe(piocb, next_iocb,
4698						 &pring->txcmplq, list)
4699				piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4700			/* Retrieve everything on the txcmplq */
4701			list_splice_init(&pring->txcmplq, &txcmplq);
4702			pring->txq_cnt = 0;
4703			pring->txcmplq_cnt = 0;
4704			spin_unlock_irq(&pring->ring_lock);
4705
4706			/* Flush the txq */
4707			lpfc_sli_cancel_iocbs(phba, &txq,
4708					      IOSTAT_LOCAL_REJECT,
4709					      IOERR_SLI_DOWN);
4710			/* Flush the txcmplq */
4711			lpfc_sli_cancel_iocbs(phba, &txcmplq,
4712					      IOSTAT_LOCAL_REJECT,
4713					      IOERR_SLI_DOWN);
4714			if (unlikely(pci_channel_offline(phba->pcidev)))
4715				lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4716		}
4717	} else {
4718		pring = &psli->sli3_ring[LPFC_FCP_RING];
4719
4720		spin_lock_irq(&phba->hbalock);
4721		/* Retrieve everything on txq */
4722		list_splice_init(&pring->txq, &txq);
4723		list_for_each_entry_safe(piocb, next_iocb,
4724					 &pring->txcmplq, list)
4725			piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4726		/* Retrieve everything on the txcmplq */
4727		list_splice_init(&pring->txcmplq, &txcmplq);
4728		pring->txq_cnt = 0;
4729		pring->txcmplq_cnt = 0;
4730		spin_unlock_irq(&phba->hbalock);
4731
4732		/* Flush the txq */
4733		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4734				      IOERR_SLI_DOWN);
4735		/* Flush the txcmpq */
4736		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4737				      IOERR_SLI_DOWN);
4738	}
4739}
4740
4741/**
4742 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4743 * @phba: Pointer to HBA context object.
4744 * @mask: Bit mask to be checked.
4745 *
4746 * This function reads the host status register and compares
4747 * with the provided bit mask to check if HBA completed
4748 * the restart. This function will wait in a loop for the
4749 * HBA to complete restart. If the HBA does not restart within
4750 * 15 iterations, the function will reset the HBA again. The
4751 * function returns 1 when HBA fail to restart otherwise returns
4752 * zero.
4753 **/
4754static int
4755lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4756{
4757	uint32_t status;
4758	int i = 0;
4759	int retval = 0;
4760
4761	/* Read the HBA Host Status Register */
4762	if (lpfc_readl(phba->HSregaddr, &status))
4763		return 1;
4764
4765	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4766
4767	/*
4768	 * Check status register every 100ms for 5 retries, then every
4769	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4770	 * every 2.5 sec for 4.
4771	 * Break our of the loop if errors occurred during init.
4772	 */
4773	while (((status & mask) != mask) &&
4774	       !(status & HS_FFERM) &&
4775	       i++ < 20) {
4776
4777		if (i <= 5)
4778			msleep(10);
4779		else if (i <= 10)
4780			msleep(500);
4781		else
4782			msleep(2500);
4783
4784		if (i == 15) {
4785				/* Do post */
4786			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4787			lpfc_sli_brdrestart(phba);
4788		}
4789		/* Read the HBA Host Status Register */
4790		if (lpfc_readl(phba->HSregaddr, &status)) {
4791			retval = 1;
4792			break;
4793		}
4794	}
4795
4796	/* Check to see if any errors occurred during init */
4797	if ((status & HS_FFERM) || (i >= 20)) {
4798		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4799				"2751 Adapter failed to restart, "
4800				"status reg x%x, FW Data: A8 x%x AC x%x\n",
4801				status,
4802				readl(phba->MBslimaddr + 0xa8),
4803				readl(phba->MBslimaddr + 0xac));
4804		phba->link_state = LPFC_HBA_ERROR;
4805		retval = 1;
4806	}
4807
4808	return retval;
4809}
4810
4811/**
4812 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4813 * @phba: Pointer to HBA context object.
4814 * @mask: Bit mask to be checked.
4815 *
4816 * This function checks the host status register to check if HBA is
4817 * ready. This function will wait in a loop for the HBA to be ready
4818 * If the HBA is not ready , the function will will reset the HBA PCI
4819 * function again. The function returns 1 when HBA fail to be ready
4820 * otherwise returns zero.
4821 **/
4822static int
4823lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4824{
4825	uint32_t status;
4826	int retval = 0;
4827
4828	/* Read the HBA Host Status Register */
4829	status = lpfc_sli4_post_status_check(phba);
4830
4831	if (status) {
4832		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4833		lpfc_sli_brdrestart(phba);
4834		status = lpfc_sli4_post_status_check(phba);
4835	}
4836
4837	/* Check to see if any errors occurred during init */
4838	if (status) {
4839		phba->link_state = LPFC_HBA_ERROR;
4840		retval = 1;
4841	} else
4842		phba->sli4_hba.intr_enable = 0;
4843
4844	phba->hba_flag &= ~HBA_SETUP;
4845	return retval;
4846}
4847
4848/**
4849 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4850 * @phba: Pointer to HBA context object.
4851 * @mask: Bit mask to be checked.
4852 *
4853 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4854 * from the API jump table function pointer from the lpfc_hba struct.
4855 **/
4856int
4857lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4858{
4859	return phba->lpfc_sli_brdready(phba, mask);
4860}
4861
4862#define BARRIER_TEST_PATTERN (0xdeadbeef)
4863
4864/**
4865 * lpfc_reset_barrier - Make HBA ready for HBA reset
4866 * @phba: Pointer to HBA context object.
4867 *
4868 * This function is called before resetting an HBA. This function is called
4869 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4870 **/
4871void lpfc_reset_barrier(struct lpfc_hba *phba)
4872{
4873	uint32_t __iomem *resp_buf;
4874	uint32_t __iomem *mbox_buf;
4875	volatile struct MAILBOX_word0 mbox;
4876	uint32_t hc_copy, ha_copy, resp_data;
4877	int  i;
4878	uint8_t hdrtype;
4879
4880	lockdep_assert_held(&phba->hbalock);
4881
4882	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4883	if (hdrtype != PCI_HEADER_TYPE_MFD ||
4884	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4885	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4886		return;
4887
4888	/*
4889	 * Tell the other part of the chip to suspend temporarily all
4890	 * its DMA activity.
4891	 */
4892	resp_buf = phba->MBslimaddr;
4893
4894	/* Disable the error attention */
4895	if (lpfc_readl(phba->HCregaddr, &hc_copy))
4896		return;
4897	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4898	readl(phba->HCregaddr); /* flush */
4899	phba->link_flag |= LS_IGNORE_ERATT;
4900
4901	if (lpfc_readl(phba->HAregaddr, &ha_copy))
4902		return;
4903	if (ha_copy & HA_ERATT) {
4904		/* Clear Chip error bit */
4905		writel(HA_ERATT, phba->HAregaddr);
4906		phba->pport->stopped = 1;
4907	}
4908
4909	mbox.word0 = 0;
4910	mbox.mbxCommand = MBX_KILL_BOARD;
4911	mbox.mbxOwner = OWN_CHIP;
4912
4913	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4914	mbox_buf = phba->MBslimaddr;
4915	writel(mbox.word0, mbox_buf);
4916
4917	for (i = 0; i < 50; i++) {
4918		if (lpfc_readl((resp_buf + 1), &resp_data))
4919			return;
4920		if (resp_data != ~(BARRIER_TEST_PATTERN))
4921			mdelay(1);
4922		else
4923			break;
4924	}
4925	resp_data = 0;
4926	if (lpfc_readl((resp_buf + 1), &resp_data))
4927		return;
4928	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4929		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4930		    phba->pport->stopped)
4931			goto restore_hc;
4932		else
4933			goto clear_errat;
4934	}
4935
4936	mbox.mbxOwner = OWN_HOST;
4937	resp_data = 0;
4938	for (i = 0; i < 500; i++) {
4939		if (lpfc_readl(resp_buf, &resp_data))
4940			return;
4941		if (resp_data != mbox.word0)
4942			mdelay(1);
4943		else
4944			break;
4945	}
4946
4947clear_errat:
4948
4949	while (++i < 500) {
4950		if (lpfc_readl(phba->HAregaddr, &ha_copy))
4951			return;
4952		if (!(ha_copy & HA_ERATT))
4953			mdelay(1);
4954		else
4955			break;
4956	}
4957
4958	if (readl(phba->HAregaddr) & HA_ERATT) {
4959		writel(HA_ERATT, phba->HAregaddr);
4960		phba->pport->stopped = 1;
4961	}
4962
4963restore_hc:
4964	phba->link_flag &= ~LS_IGNORE_ERATT;
4965	writel(hc_copy, phba->HCregaddr);
4966	readl(phba->HCregaddr); /* flush */
4967}
4968
4969/**
4970 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4971 * @phba: Pointer to HBA context object.
4972 *
4973 * This function issues a kill_board mailbox command and waits for
4974 * the error attention interrupt. This function is called for stopping
4975 * the firmware processing. The caller is not required to hold any
4976 * locks. This function calls lpfc_hba_down_post function to free
4977 * any pending commands after the kill. The function will return 1 when it
4978 * fails to kill the board else will return 0.
4979 **/
4980int
4981lpfc_sli_brdkill(struct lpfc_hba *phba)
4982{
4983	struct lpfc_sli *psli;
4984	LPFC_MBOXQ_t *pmb;
4985	uint32_t status;
4986	uint32_t ha_copy;
4987	int retval;
4988	int i = 0;
4989
4990	psli = &phba->sli;
4991
4992	/* Kill HBA */
4993	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4994			"0329 Kill HBA Data: x%x x%x\n",
4995			phba->pport->port_state, psli->sli_flag);
4996
4997	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4998	if (!pmb)
4999		return 1;
5000
5001	/* Disable the error attention */
5002	spin_lock_irq(&phba->hbalock);
5003	if (lpfc_readl(phba->HCregaddr, &status)) {
5004		spin_unlock_irq(&phba->hbalock);
5005		mempool_free(pmb, phba->mbox_mem_pool);
5006		return 1;
5007	}
5008	status &= ~HC_ERINT_ENA;
5009	writel(status, phba->HCregaddr);
5010	readl(phba->HCregaddr); /* flush */
5011	phba->link_flag |= LS_IGNORE_ERATT;
5012	spin_unlock_irq(&phba->hbalock);
5013
5014	lpfc_kill_board(phba, pmb);
5015	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5016	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5017
5018	if (retval != MBX_SUCCESS) {
5019		if (retval != MBX_BUSY)
5020			mempool_free(pmb, phba->mbox_mem_pool);
5021		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5022				"2752 KILL_BOARD command failed retval %d\n",
5023				retval);
5024		spin_lock_irq(&phba->hbalock);
5025		phba->link_flag &= ~LS_IGNORE_ERATT;
5026		spin_unlock_irq(&phba->hbalock);
5027		return 1;
5028	}
5029
5030	spin_lock_irq(&phba->hbalock);
5031	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5032	spin_unlock_irq(&phba->hbalock);
5033
5034	mempool_free(pmb, phba->mbox_mem_pool);
5035
5036	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5037	 * attention every 100ms for 3 seconds. If we don't get ERATT after
5038	 * 3 seconds we still set HBA_ERROR state because the status of the
5039	 * board is now undefined.
5040	 */
5041	if (lpfc_readl(phba->HAregaddr, &ha_copy))
5042		return 1;
5043	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5044		mdelay(100);
5045		if (lpfc_readl(phba->HAregaddr, &ha_copy))
5046			return 1;
5047	}
5048
5049	del_timer_sync(&psli->mbox_tmo);
5050	if (ha_copy & HA_ERATT) {
5051		writel(HA_ERATT, phba->HAregaddr);
5052		phba->pport->stopped = 1;
5053	}
5054	spin_lock_irq(&phba->hbalock);
5055	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5056	psli->mbox_active = NULL;
5057	phba->link_flag &= ~LS_IGNORE_ERATT;
5058	spin_unlock_irq(&phba->hbalock);
5059
5060	lpfc_hba_down_post(phba);
5061	phba->link_state = LPFC_HBA_ERROR;
5062
5063	return ha_copy & HA_ERATT ? 0 : 1;
5064}
5065
5066/**
5067 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5068 * @phba: Pointer to HBA context object.
5069 *
5070 * This function resets the HBA by writing HC_INITFF to the control
5071 * register. After the HBA resets, this function resets all the iocb ring
5072 * indices. This function disables PCI layer parity checking during
5073 * the reset.
5074 * This function returns 0 always.
5075 * The caller is not required to hold any locks.
5076 **/
5077int
5078lpfc_sli_brdreset(struct lpfc_hba *phba)
5079{
5080	struct lpfc_sli *psli;
5081	struct lpfc_sli_ring *pring;
5082	uint16_t cfg_value;
5083	int i;
5084
5085	psli = &phba->sli;
5086
5087	/* Reset HBA */
5088	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5089			"0325 Reset HBA Data: x%x x%x\n",
5090			(phba->pport) ? phba->pport->port_state : 0,
5091			psli->sli_flag);
5092
5093	/* perform board reset */
5094	phba->fc_eventTag = 0;
5095	phba->link_events = 0;
5096	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5097	if (phba->pport) {
5098		phba->pport->fc_myDID = 0;
5099		phba->pport->fc_prevDID = 0;
5100	}
5101
5102	/* Turn off parity checking and serr during the physical reset */
5103	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5104		return -EIO;
5105
5106	pci_write_config_word(phba->pcidev, PCI_COMMAND,
5107			      (cfg_value &
5108			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5109
5110	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5111
5112	/* Now toggle INITFF bit in the Host Control Register */
5113	writel(HC_INITFF, phba->HCregaddr);
5114	mdelay(1);
5115	readl(phba->HCregaddr); /* flush */
5116	writel(0, phba->HCregaddr);
5117	readl(phba->HCregaddr); /* flush */
5118
5119	/* Restore PCI cmd register */
5120	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5121
5122	/* Initialize relevant SLI info */
5123	for (i = 0; i < psli->num_rings; i++) {
5124		pring = &psli->sli3_ring[i];
5125		pring->flag = 0;
5126		pring->sli.sli3.rspidx = 0;
5127		pring->sli.sli3.next_cmdidx  = 0;
5128		pring->sli.sli3.local_getidx = 0;
5129		pring->sli.sli3.cmdidx = 0;
5130		pring->missbufcnt = 0;
5131	}
5132
5133	phba->link_state = LPFC_WARM_START;
5134	return 0;
5135}
5136
5137/**
5138 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5139 * @phba: Pointer to HBA context object.
5140 *
5141 * This function resets a SLI4 HBA. This function disables PCI layer parity
5142 * checking during resets the device. The caller is not required to hold
5143 * any locks.
5144 *
5145 * This function returns 0 on success else returns negative error code.
5146 **/
5147int
5148lpfc_sli4_brdreset(struct lpfc_hba *phba)
5149{
5150	struct lpfc_sli *psli = &phba->sli;
5151	uint16_t cfg_value;
5152	int rc = 0;
5153
5154	/* Reset HBA */
5155	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5156			"0295 Reset HBA Data: x%x x%x x%x\n",
5157			phba->pport->port_state, psli->sli_flag,
5158			phba->hba_flag);
5159
5160	/* perform board reset */
5161	phba->fc_eventTag = 0;
5162	phba->link_events = 0;
5163	phba->pport->fc_myDID = 0;
5164	phba->pport->fc_prevDID = 0;
5165	phba->hba_flag &= ~HBA_SETUP;
5166
5167	spin_lock_irq(&phba->hbalock);
5168	psli->sli_flag &= ~(LPFC_PROCESS_LA);
5169	phba->fcf.fcf_flag = 0;
5170	spin_unlock_irq(&phba->hbalock);
5171
5172	/* Now physically reset the device */
5173	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5174			"0389 Performing PCI function reset!\n");
5175
5176	/* Turn off parity checking and serr during the physical reset */
5177	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5178		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5179				"3205 PCI read Config failed\n");
5180		return -EIO;
5181	}
5182
5183	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5184			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5185
5186	/* Perform FCoE PCI function reset before freeing queue memory */
5187	rc = lpfc_pci_function_reset(phba);
5188
5189	/* Restore PCI cmd register */
5190	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5191
5192	return rc;
5193}
5194
5195/**
5196 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5197 * @phba: Pointer to HBA context object.
5198 *
5199 * This function is called in the SLI initialization code path to
5200 * restart the HBA. The caller is not required to hold any lock.
5201 * This function writes MBX_RESTART mailbox command to the SLIM and
5202 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5203 * function to free any pending commands. The function enables
5204 * POST only during the first initialization. The function returns zero.
5205 * The function does not guarantee completion of MBX_RESTART mailbox
5206 * command before the return of this function.
5207 **/
5208static int
5209lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5210{
5211	volatile struct MAILBOX_word0 mb;
5212	struct lpfc_sli *psli;
5213	void __iomem *to_slim;
5214
5215	spin_lock_irq(&phba->hbalock);
5216
5217	psli = &phba->sli;
5218
5219	/* Restart HBA */
5220	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5221			"0337 Restart HBA Data: x%x x%x\n",
5222			(phba->pport) ? phba->pport->port_state : 0,
5223			psli->sli_flag);
5224
5225	mb.word0 = 0;
5226	mb.mbxCommand = MBX_RESTART;
5227	mb.mbxHc = 1;
5228
5229	lpfc_reset_barrier(phba);
5230
5231	to_slim = phba->MBslimaddr;
5232	writel(mb.word0, to_slim);
5233	readl(to_slim); /* flush */
5234
5235	/* Only skip post after fc_ffinit is completed */
5236	if (phba->pport && phba->pport->port_state)
5237		mb.word0 = 1;	/* This is really setting up word1 */
5238	else
5239		mb.word0 = 0;	/* This is really setting up word1 */
5240	to_slim = phba->MBslimaddr + sizeof (uint32_t);
5241	writel(mb.word0, to_slim);
5242	readl(to_slim); /* flush */
5243
5244	lpfc_sli_brdreset(phba);
5245	if (phba->pport)
5246		phba->pport->stopped = 0;
5247	phba->link_state = LPFC_INIT_START;
5248	phba->hba_flag = 0;
5249	spin_unlock_irq(&phba->hbalock);
5250
5251	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5252	psli->stats_start = ktime_get_seconds();
5253
5254	/* Give the INITFF and Post time to settle. */
5255	mdelay(100);
5256
5257	lpfc_hba_down_post(phba);
5258
5259	return 0;
5260}
5261
5262/**
5263 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5264 * @phba: Pointer to HBA context object.
5265 *
5266 * This function is called in the SLI initialization code path to restart
5267 * a SLI4 HBA. The caller is not required to hold any lock.
5268 * At the end of the function, it calls lpfc_hba_down_post function to
5269 * free any pending commands.
5270 **/
5271static int
5272lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5273{
5274	struct lpfc_sli *psli = &phba->sli;
5275	int rc;
5276
5277	/* Restart HBA */
5278	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5279			"0296 Restart HBA Data: x%x x%x\n",
5280			phba->pport->port_state, psli->sli_flag);
5281
5282	rc = lpfc_sli4_brdreset(phba);
5283	if (rc) {
5284		phba->link_state = LPFC_HBA_ERROR;
5285		goto hba_down_queue;
5286	}
5287
5288	spin_lock_irq(&phba->hbalock);
5289	phba->pport->stopped = 0;
5290	phba->link_state = LPFC_INIT_START;
5291	phba->hba_flag = 0;
5292	/* Preserve FA-PWWN expectation */
5293	phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5294	spin_unlock_irq(&phba->hbalock);
5295
5296	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5297	psli->stats_start = ktime_get_seconds();
5298
5299hba_down_queue:
5300	lpfc_hba_down_post(phba);
5301	lpfc_sli4_queue_destroy(phba);
5302
5303	return rc;
5304}
5305
5306/**
5307 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5308 * @phba: Pointer to HBA context object.
5309 *
5310 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5311 * API jump table function pointer from the lpfc_hba struct.
5312**/
5313int
5314lpfc_sli_brdrestart(struct lpfc_hba *phba)
5315{
5316	return phba->lpfc_sli_brdrestart(phba);
5317}
5318
5319/**
5320 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5321 * @phba: Pointer to HBA context object.
5322 *
5323 * This function is called after a HBA restart to wait for successful
5324 * restart of the HBA. Successful restart of the HBA is indicated by
5325 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5326 * iteration, the function will restart the HBA again. The function returns
5327 * zero if HBA successfully restarted else returns negative error code.
5328 **/
5329int
5330lpfc_sli_chipset_init(struct lpfc_hba *phba)
5331{
5332	uint32_t status, i = 0;
5333
5334	/* Read the HBA Host Status Register */
5335	if (lpfc_readl(phba->HSregaddr, &status))
5336		return -EIO;
5337
5338	/* Check status register to see what current state is */
5339	i = 0;
5340	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5341
5342		/* Check every 10ms for 10 retries, then every 100ms for 90
5343		 * retries, then every 1 sec for 50 retires for a total of
5344		 * ~60 seconds before reset the board again and check every
5345		 * 1 sec for 50 retries. The up to 60 seconds before the
5346		 * board ready is required by the Falcon FIPS zeroization
5347		 * complete, and any reset the board in between shall cause
5348		 * restart of zeroization, further delay the board ready.
5349		 */
5350		if (i++ >= 200) {
5351			/* Adapter failed to init, timeout, status reg
5352			   <status> */
5353			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5354					"0436 Adapter failed to init, "
5355					"timeout, status reg x%x, "
5356					"FW Data: A8 x%x AC x%x\n", status,
5357					readl(phba->MBslimaddr + 0xa8),
5358					readl(phba->MBslimaddr + 0xac));
5359			phba->link_state = LPFC_HBA_ERROR;
5360			return -ETIMEDOUT;
5361		}
5362
5363		/* Check to see if any errors occurred during init */
5364		if (status & HS_FFERM) {
5365			/* ERROR: During chipset initialization */
5366			/* Adapter failed to init, chipset, status reg
5367			   <status> */
5368			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5369					"0437 Adapter failed to init, "
5370					"chipset, status reg x%x, "
5371					"FW Data: A8 x%x AC x%x\n", status,
5372					readl(phba->MBslimaddr + 0xa8),
5373					readl(phba->MBslimaddr + 0xac));
5374			phba->link_state = LPFC_HBA_ERROR;
5375			return -EIO;
5376		}
5377
5378		if (i <= 10)
5379			msleep(10);
5380		else if (i <= 100)
5381			msleep(100);
5382		else
5383			msleep(1000);
5384
5385		if (i == 150) {
5386			/* Do post */
5387			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5388			lpfc_sli_brdrestart(phba);
5389		}
5390		/* Read the HBA Host Status Register */
5391		if (lpfc_readl(phba->HSregaddr, &status))
5392			return -EIO;
5393	}
5394
5395	/* Check to see if any errors occurred during init */
5396	if (status & HS_FFERM) {
5397		/* ERROR: During chipset initialization */
5398		/* Adapter failed to init, chipset, status reg <status> */
5399		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5400				"0438 Adapter failed to init, chipset, "
5401				"status reg x%x, "
5402				"FW Data: A8 x%x AC x%x\n", status,
5403				readl(phba->MBslimaddr + 0xa8),
5404				readl(phba->MBslimaddr + 0xac));
5405		phba->link_state = LPFC_HBA_ERROR;
5406		return -EIO;
5407	}
5408
5409	phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5410
5411	/* Clear all interrupt enable conditions */
5412	writel(0, phba->HCregaddr);
5413	readl(phba->HCregaddr); /* flush */
5414
5415	/* setup host attn register */
5416	writel(0xffffffff, phba->HAregaddr);
5417	readl(phba->HAregaddr); /* flush */
5418	return 0;
5419}
5420
5421/**
5422 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5423 *
5424 * This function calculates and returns the number of HBQs required to be
5425 * configured.
5426 **/
5427int
5428lpfc_sli_hbq_count(void)
5429{
5430	return ARRAY_SIZE(lpfc_hbq_defs);
5431}
5432
5433/**
5434 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5435 *
5436 * This function adds the number of hbq entries in every HBQ to get
5437 * the total number of hbq entries required for the HBA and returns
5438 * the total count.
5439 **/
5440static int
5441lpfc_sli_hbq_entry_count(void)
5442{
5443	int  hbq_count = lpfc_sli_hbq_count();
5444	int  count = 0;
5445	int  i;
5446
5447	for (i = 0; i < hbq_count; ++i)
5448		count += lpfc_hbq_defs[i]->entry_count;
5449	return count;
5450}
5451
5452/**
5453 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5454 *
5455 * This function calculates amount of memory required for all hbq entries
5456 * to be configured and returns the total memory required.
5457 **/
5458int
5459lpfc_sli_hbq_size(void)
5460{
5461	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5462}
5463
5464/**
5465 * lpfc_sli_hbq_setup - configure and initialize HBQs
5466 * @phba: Pointer to HBA context object.
5467 *
5468 * This function is called during the SLI initialization to configure
5469 * all the HBQs and post buffers to the HBQ. The caller is not
5470 * required to hold any locks. This function will return zero if successful
5471 * else it will return negative error code.
5472 **/
5473static int
5474lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5475{
5476	int  hbq_count = lpfc_sli_hbq_count();
5477	LPFC_MBOXQ_t *pmb;
5478	MAILBOX_t *pmbox;
5479	uint32_t hbqno;
5480	uint32_t hbq_entry_index;
5481
5482				/* Get a Mailbox buffer to setup mailbox
5483				 * commands for HBA initialization
5484				 */
5485	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5486
5487	if (!pmb)
5488		return -ENOMEM;
5489
5490	pmbox = &pmb->u.mb;
5491
5492	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
5493	phba->link_state = LPFC_INIT_MBX_CMDS;
5494	phba->hbq_in_use = 1;
5495
5496	hbq_entry_index = 0;
5497	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5498		phba->hbqs[hbqno].next_hbqPutIdx = 0;
5499		phba->hbqs[hbqno].hbqPutIdx      = 0;
5500		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5501		phba->hbqs[hbqno].entry_count =
5502			lpfc_hbq_defs[hbqno]->entry_count;
5503		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5504			hbq_entry_index, pmb);
5505		hbq_entry_index += phba->hbqs[hbqno].entry_count;
5506
5507		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5508			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5509			   mbxStatus <status>, ring <num> */
5510
5511			lpfc_printf_log(phba, KERN_ERR,
5512					LOG_SLI | LOG_VPORT,
5513					"1805 Adapter failed to init. "
5514					"Data: x%x x%x x%x\n",
5515					pmbox->mbxCommand,
5516					pmbox->mbxStatus, hbqno);
5517
5518			phba->link_state = LPFC_HBA_ERROR;
5519			mempool_free(pmb, phba->mbox_mem_pool);
5520			return -ENXIO;
5521		}
5522	}
5523	phba->hbq_count = hbq_count;
5524
5525	mempool_free(pmb, phba->mbox_mem_pool);
5526
5527	/* Initially populate or replenish the HBQs */
5528	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5529		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5530	return 0;
5531}
5532
5533/**
5534 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5535 * @phba: Pointer to HBA context object.
5536 *
5537 * This function is called during the SLI initialization to configure
5538 * all the HBQs and post buffers to the HBQ. The caller is not
5539 * required to hold any locks. This function will return zero if successful
5540 * else it will return negative error code.
5541 **/
5542static int
5543lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5544{
5545	phba->hbq_in_use = 1;
5546	/**
5547	 * Specific case when the MDS diagnostics is enabled and supported.
5548	 * The receive buffer count is truncated to manage the incoming
5549	 * traffic.
5550	 **/
5551	if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5552		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5553			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5554	else
5555		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5556			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5557	phba->hbq_count = 1;
5558	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5559	/* Initially populate or replenish the HBQs */
5560	return 0;
5561}
5562
5563/**
5564 * lpfc_sli_config_port - Issue config port mailbox command
5565 * @phba: Pointer to HBA context object.
5566 * @sli_mode: sli mode - 2/3
5567 *
5568 * This function is called by the sli initialization code path
5569 * to issue config_port mailbox command. This function restarts the
5570 * HBA firmware and issues a config_port mailbox command to configure
5571 * the SLI interface in the sli mode specified by sli_mode
5572 * variable. The caller is not required to hold any locks.
5573 * The function returns 0 if successful, else returns negative error
5574 * code.
5575 **/
5576int
5577lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5578{
5579	LPFC_MBOXQ_t *pmb;
5580	uint32_t resetcount = 0, rc = 0, done = 0;
5581
5582	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5583	if (!pmb) {
5584		phba->link_state = LPFC_HBA_ERROR;
5585		return -ENOMEM;
5586	}
5587
5588	phba->sli_rev = sli_mode;
5589	while (resetcount < 2 && !done) {
5590		spin_lock_irq(&phba->hbalock);
5591		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5592		spin_unlock_irq(&phba->hbalock);
5593		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5594		lpfc_sli_brdrestart(phba);
5595		rc = lpfc_sli_chipset_init(phba);
5596		if (rc)
5597			break;
5598
5599		spin_lock_irq(&phba->hbalock);
5600		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5601		spin_unlock_irq(&phba->hbalock);
5602		resetcount++;
5603
5604		/* Call pre CONFIG_PORT mailbox command initialization.  A
5605		 * value of 0 means the call was successful.  Any other
5606		 * nonzero value is a failure, but if ERESTART is returned,
5607		 * the driver may reset the HBA and try again.
5608		 */
5609		rc = lpfc_config_port_prep(phba);
5610		if (rc == -ERESTART) {
5611			phba->link_state = LPFC_LINK_UNKNOWN;
5612			continue;
5613		} else if (rc)
5614			break;
5615
5616		phba->link_state = LPFC_INIT_MBX_CMDS;
5617		lpfc_config_port(phba, pmb);
5618		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5619		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5620					LPFC_SLI3_HBQ_ENABLED |
5621					LPFC_SLI3_CRP_ENABLED |
5622					LPFC_SLI3_DSS_ENABLED);
5623		if (rc != MBX_SUCCESS) {
5624			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5625				"0442 Adapter failed to init, mbxCmd x%x "
5626				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5627				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5628			spin_lock_irq(&phba->hbalock);
5629			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5630			spin_unlock_irq(&phba->hbalock);
5631			rc = -ENXIO;
5632		} else {
5633			/* Allow asynchronous mailbox command to go through */
5634			spin_lock_irq(&phba->hbalock);
5635			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5636			spin_unlock_irq(&phba->hbalock);
5637			done = 1;
5638
5639			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5640			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
5641				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5642					"3110 Port did not grant ASABT\n");
5643		}
5644	}
5645	if (!done) {
5646		rc = -EINVAL;
5647		goto do_prep_failed;
5648	}
5649	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5650		if (!pmb->u.mb.un.varCfgPort.cMA) {
5651			rc = -ENXIO;
5652			goto do_prep_failed;
5653		}
5654		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5655			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5656			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5657			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5658				phba->max_vpi : phba->max_vports;
5659
5660		} else
5661			phba->max_vpi = 0;
5662		if (pmb->u.mb.un.varCfgPort.gerbm)
5663			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5664		if (pmb->u.mb.un.varCfgPort.gcrp)
5665			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5666
5667		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5668		phba->port_gp = phba->mbox->us.s3_pgp.port;
5669
5670		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5671			if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5672				phba->cfg_enable_bg = 0;
5673				phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5674				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5675						"0443 Adapter did not grant "
5676						"BlockGuard\n");
5677			}
5678		}
5679	} else {
5680		phba->hbq_get = NULL;
5681		phba->port_gp = phba->mbox->us.s2.port;
5682		phba->max_vpi = 0;
5683	}
5684do_prep_failed:
5685	mempool_free(pmb, phba->mbox_mem_pool);
5686	return rc;
5687}
5688
5689
5690/**
5691 * lpfc_sli_hba_setup - SLI initialization function
5692 * @phba: Pointer to HBA context object.
5693 *
5694 * This function is the main SLI initialization function. This function
5695 * is called by the HBA initialization code, HBA reset code and HBA
5696 * error attention handler code. Caller is not required to hold any
5697 * locks. This function issues config_port mailbox command to configure
5698 * the SLI, setup iocb rings and HBQ rings. In the end the function
5699 * calls the config_port_post function to issue init_link mailbox
5700 * command and to start the discovery. The function will return zero
5701 * if successful, else it will return negative error code.
5702 **/
5703int
5704lpfc_sli_hba_setup(struct lpfc_hba *phba)
5705{
5706	uint32_t rc;
5707	int  i;
5708	int longs;
5709
5710	/* Enable ISR already does config_port because of config_msi mbx */
5711	if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5712		rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5713		if (rc)
5714			return -EIO;
5715		phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5716	}
5717	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
5718
5719	if (phba->sli_rev == 3) {
5720		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5721		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5722	} else {
5723		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5724		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5725		phba->sli3_options = 0;
5726	}
5727
5728	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5729			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
5730			phba->sli_rev, phba->max_vpi);
5731	rc = lpfc_sli_ring_map(phba);
5732
5733	if (rc)
5734		goto lpfc_sli_hba_setup_error;
5735
5736	/* Initialize VPIs. */
5737	if (phba->sli_rev == LPFC_SLI_REV3) {
5738		/*
5739		 * The VPI bitmask and physical ID array are allocated
5740		 * and initialized once only - at driver load.  A port
5741		 * reset doesn't need to reinitialize this memory.
5742		 */
5743		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5744			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5745			phba->vpi_bmask = kcalloc(longs,
5746						  sizeof(unsigned long),
5747						  GFP_KERNEL);
5748			if (!phba->vpi_bmask) {
5749				rc = -ENOMEM;
5750				goto lpfc_sli_hba_setup_error;
5751			}
5752
5753			phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5754						sizeof(uint16_t),
5755						GFP_KERNEL);
5756			if (!phba->vpi_ids) {
5757				kfree(phba->vpi_bmask);
5758				rc = -ENOMEM;
5759				goto lpfc_sli_hba_setup_error;
5760			}
5761			for (i = 0; i < phba->max_vpi; i++)
5762				phba->vpi_ids[i] = i;
5763		}
5764	}
5765
5766	/* Init HBQs */
5767	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5768		rc = lpfc_sli_hbq_setup(phba);
5769		if (rc)
5770			goto lpfc_sli_hba_setup_error;
5771	}
5772	spin_lock_irq(&phba->hbalock);
5773	phba->sli.sli_flag |= LPFC_PROCESS_LA;
5774	spin_unlock_irq(&phba->hbalock);
5775
5776	rc = lpfc_config_port_post(phba);
5777	if (rc)
5778		goto lpfc_sli_hba_setup_error;
5779
5780	return rc;
5781
5782lpfc_sli_hba_setup_error:
5783	phba->link_state = LPFC_HBA_ERROR;
5784	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5785			"0445 Firmware initialization failed\n");
5786	return rc;
5787}
5788
5789/**
5790 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5791 * @phba: Pointer to HBA context object.
5792 *
5793 * This function issue a dump mailbox command to read config region
5794 * 23 and parse the records in the region and populate driver
5795 * data structure.
5796 **/
5797static int
5798lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5799{
5800	LPFC_MBOXQ_t *mboxq;
5801	struct lpfc_dmabuf *mp;
5802	struct lpfc_mqe *mqe;
5803	uint32_t data_length;
5804	int rc;
5805
5806	/* Program the default value of vlan_id and fc_map */
5807	phba->valid_vlan = 0;
5808	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5809	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5810	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5811
5812	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5813	if (!mboxq)
5814		return -ENOMEM;
5815
5816	mqe = &mboxq->u.mqe;
5817	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5818		rc = -ENOMEM;
5819		goto out_free_mboxq;
5820	}
5821
5822	mp = mboxq->ctx_buf;
5823	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5824
5825	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5826			"(%d):2571 Mailbox cmd x%x Status x%x "
5827			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5828			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5829			"CQ: x%x x%x x%x x%x\n",
5830			mboxq->vport ? mboxq->vport->vpi : 0,
5831			bf_get(lpfc_mqe_command, mqe),
5832			bf_get(lpfc_mqe_status, mqe),
5833			mqe->un.mb_words[0], mqe->un.mb_words[1],
5834			mqe->un.mb_words[2], mqe->un.mb_words[3],
5835			mqe->un.mb_words[4], mqe->un.mb_words[5],
5836			mqe->un.mb_words[6], mqe->un.mb_words[7],
5837			mqe->un.mb_words[8], mqe->un.mb_words[9],
5838			mqe->un.mb_words[10], mqe->un.mb_words[11],
5839			mqe->un.mb_words[12], mqe->un.mb_words[13],
5840			mqe->un.mb_words[14], mqe->un.mb_words[15],
5841			mqe->un.mb_words[16], mqe->un.mb_words[50],
5842			mboxq->mcqe.word0,
5843			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
5844			mboxq->mcqe.trailer);
5845
5846	if (rc) {
5847		rc = -EIO;
5848		goto out_free_mboxq;
5849	}
5850	data_length = mqe->un.mb_words[5];
5851	if (data_length > DMP_RGN23_SIZE) {
5852		rc = -EIO;
5853		goto out_free_mboxq;
5854	}
5855
5856	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5857	rc = 0;
5858
5859out_free_mboxq:
5860	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5861	return rc;
5862}
5863
5864/**
5865 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5866 * @phba: pointer to lpfc hba data structure.
5867 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5868 * @vpd: pointer to the memory to hold resulting port vpd data.
5869 * @vpd_size: On input, the number of bytes allocated to @vpd.
5870 *	      On output, the number of data bytes in @vpd.
5871 *
5872 * This routine executes a READ_REV SLI4 mailbox command.  In
5873 * addition, this routine gets the port vpd data.
5874 *
5875 * Return codes
5876 * 	0 - successful
5877 * 	-ENOMEM - could not allocated memory.
5878 **/
5879static int
5880lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5881		    uint8_t *vpd, uint32_t *vpd_size)
5882{
5883	int rc = 0;
5884	uint32_t dma_size;
5885	struct lpfc_dmabuf *dmabuf;
5886	struct lpfc_mqe *mqe;
5887
5888	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5889	if (!dmabuf)
5890		return -ENOMEM;
5891
5892	/*
5893	 * Get a DMA buffer for the vpd data resulting from the READ_REV
5894	 * mailbox command.
5895	 */
5896	dma_size = *vpd_size;
5897	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5898					  &dmabuf->phys, GFP_KERNEL);
5899	if (!dmabuf->virt) {
5900		kfree(dmabuf);
5901		return -ENOMEM;
5902	}
5903
5904	/*
5905	 * The SLI4 implementation of READ_REV conflicts at word1,
5906	 * bits 31:16 and SLI4 adds vpd functionality not present
5907	 * in SLI3.  This code corrects the conflicts.
5908	 */
5909	lpfc_read_rev(phba, mboxq);
5910	mqe = &mboxq->u.mqe;
5911	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5912	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5913	mqe->un.read_rev.word1 &= 0x0000FFFF;
5914	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5915	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5916
5917	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5918	if (rc) {
5919		dma_free_coherent(&phba->pcidev->dev, dma_size,
5920				  dmabuf->virt, dmabuf->phys);
5921		kfree(dmabuf);
5922		return -EIO;
5923	}
5924
5925	/*
5926	 * The available vpd length cannot be bigger than the
5927	 * DMA buffer passed to the port.  Catch the less than
5928	 * case and update the caller's size.
5929	 */
5930	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5931		*vpd_size = mqe->un.read_rev.avail_vpd_len;
5932
5933	memcpy(vpd, dmabuf->virt, *vpd_size);
5934
5935	dma_free_coherent(&phba->pcidev->dev, dma_size,
5936			  dmabuf->virt, dmabuf->phys);
5937	kfree(dmabuf);
5938	return 0;
5939}
5940
5941/**
5942 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5943 * @phba: pointer to lpfc hba data structure.
5944 *
5945 * This routine retrieves SLI4 device physical port name this PCI function
5946 * is attached to.
5947 *
5948 * Return codes
5949 *      0 - successful
5950 *      otherwise - failed to retrieve controller attributes
5951 **/
5952static int
5953lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5954{
5955	LPFC_MBOXQ_t *mboxq;
5956	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5957	struct lpfc_controller_attribute *cntl_attr;
5958	void *virtaddr = NULL;
5959	uint32_t alloclen, reqlen;
5960	uint32_t shdr_status, shdr_add_status;
5961	union lpfc_sli4_cfg_shdr *shdr;
5962	int rc;
5963
5964	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5965	if (!mboxq)
5966		return -ENOMEM;
5967
5968	/* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5969	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5970	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5971			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5972			LPFC_SLI4_MBX_NEMBED);
5973
5974	if (alloclen < reqlen) {
5975		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5976				"3084 Allocated DMA memory size (%d) is "
5977				"less than the requested DMA memory size "
5978				"(%d)\n", alloclen, reqlen);
5979		rc = -ENOMEM;
5980		goto out_free_mboxq;
5981	}
5982	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5983	virtaddr = mboxq->sge_array->addr[0];
5984	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5985	shdr = &mbx_cntl_attr->cfg_shdr;
5986	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5987	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5988	if (shdr_status || shdr_add_status || rc) {
5989		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5990				"3085 Mailbox x%x (x%x/x%x) failed, "
5991				"rc:x%x, status:x%x, add_status:x%x\n",
5992				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5993				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5994				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5995				rc, shdr_status, shdr_add_status);
5996		rc = -ENXIO;
5997		goto out_free_mboxq;
5998	}
5999
6000	cntl_attr = &mbx_cntl_attr->cntl_attr;
6001	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6002	phba->sli4_hba.lnk_info.lnk_tp =
6003		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6004	phba->sli4_hba.lnk_info.lnk_no =
6005		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6006	phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6007	phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6008
6009	memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6010	strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6011		sizeof(phba->BIOSVersion));
6012
6013	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6014			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6015			"flash_id: x%02x, asic_rev: x%02x\n",
6016			phba->sli4_hba.lnk_info.lnk_tp,
6017			phba->sli4_hba.lnk_info.lnk_no,
6018			phba->BIOSVersion, phba->sli4_hba.flash_id,
6019			phba->sli4_hba.asic_rev);
6020out_free_mboxq:
6021	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6022		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6023	else
6024		mempool_free(mboxq, phba->mbox_mem_pool);
6025	return rc;
6026}
6027
6028/**
6029 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6030 * @phba: pointer to lpfc hba data structure.
6031 *
6032 * This routine retrieves SLI4 device physical port name this PCI function
6033 * is attached to.
6034 *
6035 * Return codes
6036 *      0 - successful
6037 *      otherwise - failed to retrieve physical port name
6038 **/
6039static int
6040lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6041{
6042	LPFC_MBOXQ_t *mboxq;
6043	struct lpfc_mbx_get_port_name *get_port_name;
6044	uint32_t shdr_status, shdr_add_status;
6045	union lpfc_sli4_cfg_shdr *shdr;
6046	char cport_name = 0;
6047	int rc;
6048
6049	/* We assume nothing at this point */
6050	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6051	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6052
6053	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6054	if (!mboxq)
6055		return -ENOMEM;
6056	/* obtain link type and link number via READ_CONFIG */
6057	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6058	lpfc_sli4_read_config(phba);
6059
6060	if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6061		phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6062
6063	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6064		goto retrieve_ppname;
6065
6066	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6067	rc = lpfc_sli4_get_ctl_attr(phba);
6068	if (rc)
6069		goto out_free_mboxq;
6070
6071retrieve_ppname:
6072	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6073		LPFC_MBOX_OPCODE_GET_PORT_NAME,
6074		sizeof(struct lpfc_mbx_get_port_name) -
6075		sizeof(struct lpfc_sli4_cfg_mhdr),
6076		LPFC_SLI4_MBX_EMBED);
6077	get_port_name = &mboxq->u.mqe.un.get_port_name;
6078	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6079	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6080	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6081		phba->sli4_hba.lnk_info.lnk_tp);
6082	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6083	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6084	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6085	if (shdr_status || shdr_add_status || rc) {
6086		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6087				"3087 Mailbox x%x (x%x/x%x) failed: "
6088				"rc:x%x, status:x%x, add_status:x%x\n",
6089				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6090				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6091				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6092				rc, shdr_status, shdr_add_status);
6093		rc = -ENXIO;
6094		goto out_free_mboxq;
6095	}
6096	switch (phba->sli4_hba.lnk_info.lnk_no) {
6097	case LPFC_LINK_NUMBER_0:
6098		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6099				&get_port_name->u.response);
6100		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6101		break;
6102	case LPFC_LINK_NUMBER_1:
6103		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6104				&get_port_name->u.response);
6105		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6106		break;
6107	case LPFC_LINK_NUMBER_2:
6108		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6109				&get_port_name->u.response);
6110		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6111		break;
6112	case LPFC_LINK_NUMBER_3:
6113		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6114				&get_port_name->u.response);
6115		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6116		break;
6117	default:
6118		break;
6119	}
6120
6121	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6122		phba->Port[0] = cport_name;
6123		phba->Port[1] = '\0';
6124		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6125				"3091 SLI get port name: %s\n", phba->Port);
6126	}
6127
6128out_free_mboxq:
6129	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6130		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6131	else
6132		mempool_free(mboxq, phba->mbox_mem_pool);
6133	return rc;
6134}
6135
6136/**
6137 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6138 * @phba: pointer to lpfc hba data structure.
6139 *
6140 * This routine is called to explicitly arm the SLI4 device's completion and
6141 * event queues
6142 **/
6143static void
6144lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6145{
6146	int qidx;
6147	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6148	struct lpfc_sli4_hdw_queue *qp;
6149	struct lpfc_queue *eq;
6150
6151	sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6152	sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6153	if (sli4_hba->nvmels_cq)
6154		sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6155					   LPFC_QUEUE_REARM);
6156
6157	if (sli4_hba->hdwq) {
6158		/* Loop thru all Hardware Queues */
6159		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6160			qp = &sli4_hba->hdwq[qidx];
6161			/* ARM the corresponding CQ */
6162			sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6163						LPFC_QUEUE_REARM);
6164		}
6165
6166		/* Loop thru all IRQ vectors */
6167		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6168			eq = sli4_hba->hba_eq_hdl[qidx].eq;
6169			/* ARM the corresponding EQ */
6170			sli4_hba->sli4_write_eq_db(phba, eq,
6171						   0, LPFC_QUEUE_REARM);
6172		}
6173	}
6174
6175	if (phba->nvmet_support) {
6176		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6177			sli4_hba->sli4_write_cq_db(phba,
6178				sli4_hba->nvmet_cqset[qidx], 0,
6179				LPFC_QUEUE_REARM);
6180		}
6181	}
6182}
6183
6184/**
6185 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6186 * @phba: Pointer to HBA context object.
6187 * @type: The resource extent type.
6188 * @extnt_count: buffer to hold port available extent count.
6189 * @extnt_size: buffer to hold element count per extent.
6190 *
6191 * This function calls the port and retrievs the number of available
6192 * extents and their size for a particular extent type.
6193 *
6194 * Returns: 0 if successful.  Nonzero otherwise.
6195 **/
6196int
6197lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6198			       uint16_t *extnt_count, uint16_t *extnt_size)
6199{
6200	int rc = 0;
6201	uint32_t length;
6202	uint32_t mbox_tmo;
6203	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6204	LPFC_MBOXQ_t *mbox;
6205
6206	*extnt_count = 0;
6207	*extnt_size = 0;
6208
6209	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6210	if (!mbox)
6211		return -ENOMEM;
6212
6213	/* Find out how many extents are available for this resource type */
6214	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6215		  sizeof(struct lpfc_sli4_cfg_mhdr));
6216	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6217			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6218			 length, LPFC_SLI4_MBX_EMBED);
6219
6220	/* Send an extents count of 0 - the GET doesn't use it. */
6221	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6222					LPFC_SLI4_MBX_EMBED);
6223	if (unlikely(rc)) {
6224		rc = -EIO;
6225		goto err_exit;
6226	}
6227
6228	if (!phba->sli4_hba.intr_enable)
6229		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6230	else {
6231		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6232		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6233	}
6234	if (unlikely(rc)) {
6235		rc = -EIO;
6236		goto err_exit;
6237	}
6238
6239	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6240	if (bf_get(lpfc_mbox_hdr_status,
6241		   &rsrc_info->header.cfg_shdr.response)) {
6242		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6243				"2930 Failed to get resource extents "
6244				"Status 0x%x Add'l Status 0x%x\n",
6245				bf_get(lpfc_mbox_hdr_status,
6246				       &rsrc_info->header.cfg_shdr.response),
6247				bf_get(lpfc_mbox_hdr_add_status,
6248				       &rsrc_info->header.cfg_shdr.response));
6249		rc = -EIO;
6250		goto err_exit;
6251	}
6252
6253	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6254			      &rsrc_info->u.rsp);
6255	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6256			     &rsrc_info->u.rsp);
6257
6258	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6259			"3162 Retrieved extents type-%d from port: count:%d, "
6260			"size:%d\n", type, *extnt_count, *extnt_size);
6261
6262err_exit:
6263	mempool_free(mbox, phba->mbox_mem_pool);
6264	return rc;
6265}
6266
6267/**
6268 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6269 * @phba: Pointer to HBA context object.
6270 * @type: The extent type to check.
6271 *
6272 * This function reads the current available extents from the port and checks
6273 * if the extent count or extent size has changed since the last access.
6274 * Callers use this routine post port reset to understand if there is a
6275 * extent reprovisioning requirement.
6276 *
6277 * Returns:
6278 *   -Error: error indicates problem.
6279 *   1: Extent count or size has changed.
6280 *   0: No changes.
6281 **/
6282static int
6283lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6284{
6285	uint16_t curr_ext_cnt, rsrc_ext_cnt;
6286	uint16_t size_diff, rsrc_ext_size;
6287	int rc = 0;
6288	struct lpfc_rsrc_blks *rsrc_entry;
6289	struct list_head *rsrc_blk_list = NULL;
6290
6291	size_diff = 0;
6292	curr_ext_cnt = 0;
6293	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6294					    &rsrc_ext_cnt,
6295					    &rsrc_ext_size);
6296	if (unlikely(rc))
6297		return -EIO;
6298
6299	switch (type) {
6300	case LPFC_RSC_TYPE_FCOE_RPI:
6301		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6302		break;
6303	case LPFC_RSC_TYPE_FCOE_VPI:
6304		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6305		break;
6306	case LPFC_RSC_TYPE_FCOE_XRI:
6307		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6308		break;
6309	case LPFC_RSC_TYPE_FCOE_VFI:
6310		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6311		break;
6312	default:
6313		break;
6314	}
6315
6316	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6317		curr_ext_cnt++;
6318		if (rsrc_entry->rsrc_size != rsrc_ext_size)
6319			size_diff++;
6320	}
6321
6322	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6323		rc = 1;
6324
6325	return rc;
6326}
6327
6328/**
6329 * lpfc_sli4_cfg_post_extnts -
6330 * @phba: Pointer to HBA context object.
6331 * @extnt_cnt: number of available extents.
6332 * @type: the extent type (rpi, xri, vfi, vpi).
6333 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6334 * @mbox: pointer to the caller's allocated mailbox structure.
6335 *
6336 * This function executes the extents allocation request.  It also
6337 * takes care of the amount of memory needed to allocate or get the
6338 * allocated extents. It is the caller's responsibility to evaluate
6339 * the response.
6340 *
6341 * Returns:
6342 *   -Error:  Error value describes the condition found.
6343 *   0: if successful
6344 **/
6345static int
6346lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6347			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6348{
6349	int rc = 0;
6350	uint32_t req_len;
6351	uint32_t emb_len;
6352	uint32_t alloc_len, mbox_tmo;
6353
6354	/* Calculate the total requested length of the dma memory */
6355	req_len = extnt_cnt * sizeof(uint16_t);
6356
6357	/*
6358	 * Calculate the size of an embedded mailbox.  The uint32_t
6359	 * accounts for extents-specific word.
6360	 */
6361	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6362		sizeof(uint32_t);
6363
6364	/*
6365	 * Presume the allocation and response will fit into an embedded
6366	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6367	 */
6368	*emb = LPFC_SLI4_MBX_EMBED;
6369	if (req_len > emb_len) {
6370		req_len = extnt_cnt * sizeof(uint16_t) +
6371			sizeof(union lpfc_sli4_cfg_shdr) +
6372			sizeof(uint32_t);
6373		*emb = LPFC_SLI4_MBX_NEMBED;
6374	}
6375
6376	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6377				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6378				     req_len, *emb);
6379	if (alloc_len < req_len) {
6380		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6381			"2982 Allocated DMA memory size (x%x) is "
6382			"less than the requested DMA memory "
6383			"size (x%x)\n", alloc_len, req_len);
6384		return -ENOMEM;
6385	}
6386	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6387	if (unlikely(rc))
6388		return -EIO;
6389
6390	if (!phba->sli4_hba.intr_enable)
6391		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6392	else {
6393		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6394		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6395	}
6396
6397	if (unlikely(rc))
6398		rc = -EIO;
6399	return rc;
6400}
6401
6402/**
6403 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6404 * @phba: Pointer to HBA context object.
6405 * @type:  The resource extent type to allocate.
6406 *
6407 * This function allocates the number of elements for the specified
6408 * resource type.
6409 **/
6410static int
6411lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6412{
6413	bool emb = false;
6414	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6415	uint16_t rsrc_id, rsrc_start, j, k;
6416	uint16_t *ids;
6417	int i, rc;
6418	unsigned long longs;
6419	unsigned long *bmask;
6420	struct lpfc_rsrc_blks *rsrc_blks;
6421	LPFC_MBOXQ_t *mbox;
6422	uint32_t length;
6423	struct lpfc_id_range *id_array = NULL;
6424	void *virtaddr = NULL;
6425	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6426	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6427	struct list_head *ext_blk_list;
6428
6429	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6430					    &rsrc_cnt,
6431					    &rsrc_size);
6432	if (unlikely(rc))
6433		return -EIO;
6434
6435	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6436		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6437			"3009 No available Resource Extents "
6438			"for resource type 0x%x: Count: 0x%x, "
6439			"Size 0x%x\n", type, rsrc_cnt,
6440			rsrc_size);
6441		return -ENOMEM;
6442	}
6443
6444	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6445			"2903 Post resource extents type-0x%x: "
6446			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6447
6448	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6449	if (!mbox)
6450		return -ENOMEM;
6451
6452	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6453	if (unlikely(rc)) {
6454		rc = -EIO;
6455		goto err_exit;
6456	}
6457
6458	/*
6459	 * Figure out where the response is located.  Then get local pointers
6460	 * to the response data.  The port does not guarantee to respond to
6461	 * all extents counts request so update the local variable with the
6462	 * allocated count from the port.
6463	 */
6464	if (emb == LPFC_SLI4_MBX_EMBED) {
6465		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6466		id_array = &rsrc_ext->u.rsp.id[0];
6467		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6468	} else {
6469		virtaddr = mbox->sge_array->addr[0];
6470		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6471		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6472		id_array = &n_rsrc->id;
6473	}
6474
6475	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6476	rsrc_id_cnt = rsrc_cnt * rsrc_size;
6477
6478	/*
6479	 * Based on the resource size and count, correct the base and max
6480	 * resource values.
6481	 */
6482	length = sizeof(struct lpfc_rsrc_blks);
6483	switch (type) {
6484	case LPFC_RSC_TYPE_FCOE_RPI:
6485		phba->sli4_hba.rpi_bmask = kcalloc(longs,
6486						   sizeof(unsigned long),
6487						   GFP_KERNEL);
6488		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6489			rc = -ENOMEM;
6490			goto err_exit;
6491		}
6492		phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6493						 sizeof(uint16_t),
6494						 GFP_KERNEL);
6495		if (unlikely(!phba->sli4_hba.rpi_ids)) {
6496			kfree(phba->sli4_hba.rpi_bmask);
6497			rc = -ENOMEM;
6498			goto err_exit;
6499		}
6500
6501		/*
6502		 * The next_rpi was initialized with the maximum available
6503		 * count but the port may allocate a smaller number.  Catch
6504		 * that case and update the next_rpi.
6505		 */
6506		phba->sli4_hba.next_rpi = rsrc_id_cnt;
6507
6508		/* Initialize local ptrs for common extent processing later. */
6509		bmask = phba->sli4_hba.rpi_bmask;
6510		ids = phba->sli4_hba.rpi_ids;
6511		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6512		break;
6513	case LPFC_RSC_TYPE_FCOE_VPI:
6514		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6515					  GFP_KERNEL);
6516		if (unlikely(!phba->vpi_bmask)) {
6517			rc = -ENOMEM;
6518			goto err_exit;
6519		}
6520		phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6521					 GFP_KERNEL);
6522		if (unlikely(!phba->vpi_ids)) {
6523			kfree(phba->vpi_bmask);
6524			rc = -ENOMEM;
6525			goto err_exit;
6526		}
6527
6528		/* Initialize local ptrs for common extent processing later. */
6529		bmask = phba->vpi_bmask;
6530		ids = phba->vpi_ids;
6531		ext_blk_list = &phba->lpfc_vpi_blk_list;
6532		break;
6533	case LPFC_RSC_TYPE_FCOE_XRI:
6534		phba->sli4_hba.xri_bmask = kcalloc(longs,
6535						   sizeof(unsigned long),
6536						   GFP_KERNEL);
6537		if (unlikely(!phba->sli4_hba.xri_bmask)) {
6538			rc = -ENOMEM;
6539			goto err_exit;
6540		}
6541		phba->sli4_hba.max_cfg_param.xri_used = 0;
6542		phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6543						 sizeof(uint16_t),
6544						 GFP_KERNEL);
6545		if (unlikely(!phba->sli4_hba.xri_ids)) {
6546			kfree(phba->sli4_hba.xri_bmask);
6547			rc = -ENOMEM;
6548			goto err_exit;
6549		}
6550
6551		/* Initialize local ptrs for common extent processing later. */
6552		bmask = phba->sli4_hba.xri_bmask;
6553		ids = phba->sli4_hba.xri_ids;
6554		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6555		break;
6556	case LPFC_RSC_TYPE_FCOE_VFI:
6557		phba->sli4_hba.vfi_bmask = kcalloc(longs,
6558						   sizeof(unsigned long),
6559						   GFP_KERNEL);
6560		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6561			rc = -ENOMEM;
6562			goto err_exit;
6563		}
6564		phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6565						 sizeof(uint16_t),
6566						 GFP_KERNEL);
6567		if (unlikely(!phba->sli4_hba.vfi_ids)) {
6568			kfree(phba->sli4_hba.vfi_bmask);
6569			rc = -ENOMEM;
6570			goto err_exit;
6571		}
6572
6573		/* Initialize local ptrs for common extent processing later. */
6574		bmask = phba->sli4_hba.vfi_bmask;
6575		ids = phba->sli4_hba.vfi_ids;
6576		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6577		break;
6578	default:
6579		/* Unsupported Opcode.  Fail call. */
6580		id_array = NULL;
6581		bmask = NULL;
6582		ids = NULL;
6583		ext_blk_list = NULL;
6584		goto err_exit;
6585	}
6586
6587	/*
6588	 * Complete initializing the extent configuration with the
6589	 * allocated ids assigned to this function.  The bitmask serves
6590	 * as an index into the array and manages the available ids.  The
6591	 * array just stores the ids communicated to the port via the wqes.
6592	 */
6593	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6594		if ((i % 2) == 0)
6595			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6596					 &id_array[k]);
6597		else
6598			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6599					 &id_array[k]);
6600
6601		rsrc_blks = kzalloc(length, GFP_KERNEL);
6602		if (unlikely(!rsrc_blks)) {
6603			rc = -ENOMEM;
6604			kfree(bmask);
6605			kfree(ids);
6606			goto err_exit;
6607		}
6608		rsrc_blks->rsrc_start = rsrc_id;
6609		rsrc_blks->rsrc_size = rsrc_size;
6610		list_add_tail(&rsrc_blks->list, ext_blk_list);
6611		rsrc_start = rsrc_id;
6612		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6613			phba->sli4_hba.io_xri_start = rsrc_start +
6614				lpfc_sli4_get_iocb_cnt(phba);
6615		}
6616
6617		while (rsrc_id < (rsrc_start + rsrc_size)) {
6618			ids[j] = rsrc_id;
6619			rsrc_id++;
6620			j++;
6621		}
6622		/* Entire word processed.  Get next word.*/
6623		if ((i % 2) == 1)
6624			k++;
6625	}
6626 err_exit:
6627	lpfc_sli4_mbox_cmd_free(phba, mbox);
6628	return rc;
6629}
6630
6631
6632
6633/**
6634 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6635 * @phba: Pointer to HBA context object.
6636 * @type: the extent's type.
6637 *
6638 * This function deallocates all extents of a particular resource type.
6639 * SLI4 does not allow for deallocating a particular extent range.  It
6640 * is the caller's responsibility to release all kernel memory resources.
6641 **/
6642static int
6643lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6644{
6645	int rc;
6646	uint32_t length, mbox_tmo = 0;
6647	LPFC_MBOXQ_t *mbox;
6648	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6649	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6650
6651	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6652	if (!mbox)
6653		return -ENOMEM;
6654
6655	/*
6656	 * This function sends an embedded mailbox because it only sends the
6657	 * the resource type.  All extents of this type are released by the
6658	 * port.
6659	 */
6660	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6661		  sizeof(struct lpfc_sli4_cfg_mhdr));
6662	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6663			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6664			 length, LPFC_SLI4_MBX_EMBED);
6665
6666	/* Send an extents count of 0 - the dealloc doesn't use it. */
6667	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6668					LPFC_SLI4_MBX_EMBED);
6669	if (unlikely(rc)) {
6670		rc = -EIO;
6671		goto out_free_mbox;
6672	}
6673	if (!phba->sli4_hba.intr_enable)
6674		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6675	else {
6676		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6677		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6678	}
6679	if (unlikely(rc)) {
6680		rc = -EIO;
6681		goto out_free_mbox;
6682	}
6683
6684	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6685	if (bf_get(lpfc_mbox_hdr_status,
6686		   &dealloc_rsrc->header.cfg_shdr.response)) {
6687		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6688				"2919 Failed to release resource extents "
6689				"for type %d - Status 0x%x Add'l Status 0x%x. "
6690				"Resource memory not released.\n",
6691				type,
6692				bf_get(lpfc_mbox_hdr_status,
6693				    &dealloc_rsrc->header.cfg_shdr.response),
6694				bf_get(lpfc_mbox_hdr_add_status,
6695				    &dealloc_rsrc->header.cfg_shdr.response));
6696		rc = -EIO;
6697		goto out_free_mbox;
6698	}
6699
6700	/* Release kernel memory resources for the specific type. */
6701	switch (type) {
6702	case LPFC_RSC_TYPE_FCOE_VPI:
6703		kfree(phba->vpi_bmask);
6704		kfree(phba->vpi_ids);
6705		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6706		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6707				    &phba->lpfc_vpi_blk_list, list) {
6708			list_del_init(&rsrc_blk->list);
6709			kfree(rsrc_blk);
6710		}
6711		phba->sli4_hba.max_cfg_param.vpi_used = 0;
6712		break;
6713	case LPFC_RSC_TYPE_FCOE_XRI:
6714		kfree(phba->sli4_hba.xri_bmask);
6715		kfree(phba->sli4_hba.xri_ids);
6716		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6717				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
6718			list_del_init(&rsrc_blk->list);
6719			kfree(rsrc_blk);
6720		}
6721		break;
6722	case LPFC_RSC_TYPE_FCOE_VFI:
6723		kfree(phba->sli4_hba.vfi_bmask);
6724		kfree(phba->sli4_hba.vfi_ids);
6725		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6726		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6727				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6728			list_del_init(&rsrc_blk->list);
6729			kfree(rsrc_blk);
6730		}
6731		break;
6732	case LPFC_RSC_TYPE_FCOE_RPI:
6733		/* RPI bitmask and physical id array are cleaned up earlier. */
6734		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6735				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6736			list_del_init(&rsrc_blk->list);
6737			kfree(rsrc_blk);
6738		}
6739		break;
6740	default:
6741		break;
6742	}
6743
6744	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6745
6746 out_free_mbox:
6747	mempool_free(mbox, phba->mbox_mem_pool);
6748	return rc;
6749}
6750
6751static void
6752lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6753		  uint32_t feature)
6754{
6755	uint32_t len;
6756	u32 sig_freq = 0;
6757
6758	len = sizeof(struct lpfc_mbx_set_feature) -
6759		sizeof(struct lpfc_sli4_cfg_mhdr);
6760	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6761			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6762			 LPFC_SLI4_MBX_EMBED);
6763
6764	switch (feature) {
6765	case LPFC_SET_UE_RECOVERY:
6766		bf_set(lpfc_mbx_set_feature_UER,
6767		       &mbox->u.mqe.un.set_feature, 1);
6768		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6769		mbox->u.mqe.un.set_feature.param_len = 8;
6770		break;
6771	case LPFC_SET_MDS_DIAGS:
6772		bf_set(lpfc_mbx_set_feature_mds,
6773		       &mbox->u.mqe.un.set_feature, 1);
6774		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6775		       &mbox->u.mqe.un.set_feature, 1);
6776		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6777		mbox->u.mqe.un.set_feature.param_len = 8;
6778		break;
6779	case LPFC_SET_CGN_SIGNAL:
6780		if (phba->cmf_active_mode == LPFC_CFG_OFF)
6781			sig_freq = 0;
6782		else
6783			sig_freq = phba->cgn_sig_freq;
6784
6785		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6786			bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6787			       &mbox->u.mqe.un.set_feature, sig_freq);
6788			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6789			       &mbox->u.mqe.un.set_feature, sig_freq);
6790		}
6791
6792		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6793			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6794			       &mbox->u.mqe.un.set_feature, sig_freq);
6795
6796		if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6797		    phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6798			sig_freq = 0;
6799		else
6800			sig_freq = lpfc_acqe_cgn_frequency;
6801
6802		bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6803		       &mbox->u.mqe.un.set_feature, sig_freq);
6804
6805		mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6806		mbox->u.mqe.un.set_feature.param_len = 12;
6807		break;
6808	case LPFC_SET_DUAL_DUMP:
6809		bf_set(lpfc_mbx_set_feature_dd,
6810		       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6811		bf_set(lpfc_mbx_set_feature_ddquery,
6812		       &mbox->u.mqe.un.set_feature, 0);
6813		mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6814		mbox->u.mqe.un.set_feature.param_len = 4;
6815		break;
6816	case LPFC_SET_ENABLE_MI:
6817		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6818		mbox->u.mqe.un.set_feature.param_len = 4;
6819		bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6820		       phba->pport->cfg_lun_queue_depth);
6821		bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6822		       phba->sli4_hba.pc_sli4_params.mi_ver);
6823		break;
6824	case LPFC_SET_LD_SIGNAL:
6825		mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6826		mbox->u.mqe.un.set_feature.param_len = 16;
6827		bf_set(lpfc_mbx_set_feature_lds_qry,
6828		       &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6829		break;
6830	case LPFC_SET_ENABLE_CMF:
6831		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6832		mbox->u.mqe.un.set_feature.param_len = 4;
6833		bf_set(lpfc_mbx_set_feature_cmf,
6834		       &mbox->u.mqe.un.set_feature, 1);
6835		break;
6836	}
6837	return;
6838}
6839
6840/**
6841 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6842 * @phba: Pointer to HBA context object.
6843 *
6844 * Disable FW logging into host memory on the adapter. To
6845 * be done before reading logs from the host memory.
6846 **/
6847void
6848lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6849{
6850	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6851
6852	spin_lock_irq(&phba->ras_fwlog_lock);
6853	ras_fwlog->state = INACTIVE;
6854	spin_unlock_irq(&phba->ras_fwlog_lock);
6855
6856	/* Disable FW logging to host memory */
6857	writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6858	       phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6859
6860	/* Wait 10ms for firmware to stop using DMA buffer */
6861	usleep_range(10 * 1000, 20 * 1000);
6862}
6863
6864/**
6865 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6866 * @phba: Pointer to HBA context object.
6867 *
6868 * This function is called to free memory allocated for RAS FW logging
6869 * support in the driver.
6870 **/
6871void
6872lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6873{
6874	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6875	struct lpfc_dmabuf *dmabuf, *next;
6876
6877	if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6878		list_for_each_entry_safe(dmabuf, next,
6879				    &ras_fwlog->fwlog_buff_list,
6880				    list) {
6881			list_del(&dmabuf->list);
6882			dma_free_coherent(&phba->pcidev->dev,
6883					  LPFC_RAS_MAX_ENTRY_SIZE,
6884					  dmabuf->virt, dmabuf->phys);
6885			kfree(dmabuf);
6886		}
6887	}
6888
6889	if (ras_fwlog->lwpd.virt) {
6890		dma_free_coherent(&phba->pcidev->dev,
6891				  sizeof(uint32_t) * 2,
6892				  ras_fwlog->lwpd.virt,
6893				  ras_fwlog->lwpd.phys);
6894		ras_fwlog->lwpd.virt = NULL;
6895	}
6896
6897	spin_lock_irq(&phba->ras_fwlog_lock);
6898	ras_fwlog->state = INACTIVE;
6899	spin_unlock_irq(&phba->ras_fwlog_lock);
6900}
6901
6902/**
6903 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6904 * @phba: Pointer to HBA context object.
6905 * @fwlog_buff_count: Count of buffers to be created.
6906 *
6907 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6908 * to update FW log is posted to the adapter.
6909 * Buffer count is calculated based on module param ras_fwlog_buffsize
6910 * Size of each buffer posted to FW is 64K.
6911 **/
6912
6913static int
6914lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6915			uint32_t fwlog_buff_count)
6916{
6917	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6918	struct lpfc_dmabuf *dmabuf;
6919	int rc = 0, i = 0;
6920
6921	/* Initialize List */
6922	INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6923
6924	/* Allocate memory for the LWPD */
6925	ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6926					    sizeof(uint32_t) * 2,
6927					    &ras_fwlog->lwpd.phys,
6928					    GFP_KERNEL);
6929	if (!ras_fwlog->lwpd.virt) {
6930		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6931				"6185 LWPD Memory Alloc Failed\n");
6932
6933		return -ENOMEM;
6934	}
6935
6936	ras_fwlog->fw_buffcount = fwlog_buff_count;
6937	for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6938		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6939				 GFP_KERNEL);
6940		if (!dmabuf) {
6941			rc = -ENOMEM;
6942			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6943					"6186 Memory Alloc failed FW logging");
6944			goto free_mem;
6945		}
6946
6947		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6948						  LPFC_RAS_MAX_ENTRY_SIZE,
6949						  &dmabuf->phys, GFP_KERNEL);
6950		if (!dmabuf->virt) {
6951			kfree(dmabuf);
6952			rc = -ENOMEM;
6953			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6954					"6187 DMA Alloc Failed FW logging");
6955			goto free_mem;
6956		}
6957		dmabuf->buffer_tag = i;
6958		list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6959	}
6960
6961free_mem:
6962	if (rc)
6963		lpfc_sli4_ras_dma_free(phba);
6964
6965	return rc;
6966}
6967
6968/**
6969 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6970 * @phba: pointer to lpfc hba data structure.
6971 * @pmb: pointer to the driver internal queue element for mailbox command.
6972 *
6973 * Completion handler for driver's RAS MBX command to the device.
6974 **/
6975static void
6976lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6977{
6978	MAILBOX_t *mb;
6979	union lpfc_sli4_cfg_shdr *shdr;
6980	uint32_t shdr_status, shdr_add_status;
6981	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6982
6983	mb = &pmb->u.mb;
6984
6985	shdr = (union lpfc_sli4_cfg_shdr *)
6986		&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6987	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6988	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6989
6990	if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6991		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6992				"6188 FW LOG mailbox "
6993				"completed with status x%x add_status x%x,"
6994				" mbx status x%x\n",
6995				shdr_status, shdr_add_status, mb->mbxStatus);
6996
6997		ras_fwlog->ras_hwsupport = false;
6998		goto disable_ras;
6999	}
7000
7001	spin_lock_irq(&phba->ras_fwlog_lock);
7002	ras_fwlog->state = ACTIVE;
7003	spin_unlock_irq(&phba->ras_fwlog_lock);
7004	mempool_free(pmb, phba->mbox_mem_pool);
7005
7006	return;
7007
7008disable_ras:
7009	/* Free RAS DMA memory */
7010	lpfc_sli4_ras_dma_free(phba);
7011	mempool_free(pmb, phba->mbox_mem_pool);
7012}
7013
7014/**
7015 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7016 * @phba: pointer to lpfc hba data structure.
7017 * @fwlog_level: Logging verbosity level.
7018 * @fwlog_enable: Enable/Disable logging.
7019 *
7020 * Initialize memory and post mailbox command to enable FW logging in host
7021 * memory.
7022 **/
7023int
7024lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7025			 uint32_t fwlog_level,
7026			 uint32_t fwlog_enable)
7027{
7028	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7029	struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7030	struct lpfc_dmabuf *dmabuf;
7031	LPFC_MBOXQ_t *mbox;
7032	uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7033	int rc = 0;
7034
7035	spin_lock_irq(&phba->ras_fwlog_lock);
7036	ras_fwlog->state = INACTIVE;
7037	spin_unlock_irq(&phba->ras_fwlog_lock);
7038
7039	fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7040			  phba->cfg_ras_fwlog_buffsize);
7041	fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7042
7043	/*
7044	 * If re-enabling FW logging support use earlier allocated
7045	 * DMA buffers while posting MBX command.
7046	 **/
7047	if (!ras_fwlog->lwpd.virt) {
7048		rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7049		if (rc) {
7050			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7051					"6189 FW Log Memory Allocation Failed");
7052			return rc;
7053		}
7054	}
7055
7056	/* Setup Mailbox command */
7057	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7058	if (!mbox) {
7059		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7060				"6190 RAS MBX Alloc Failed");
7061		rc = -ENOMEM;
7062		goto mem_free;
7063	}
7064
7065	ras_fwlog->fw_loglevel = fwlog_level;
7066	len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7067		sizeof(struct lpfc_sli4_cfg_mhdr));
7068
7069	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7070			 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7071			 len, LPFC_SLI4_MBX_EMBED);
7072
7073	mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7074	bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7075	       fwlog_enable);
7076	bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7077	       ras_fwlog->fw_loglevel);
7078	bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7079	       ras_fwlog->fw_buffcount);
7080	bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7081	       LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7082
7083	/* Update DMA buffer address */
7084	list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7085		memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7086
7087		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7088			putPaddrLow(dmabuf->phys);
7089
7090		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7091			putPaddrHigh(dmabuf->phys);
7092	}
7093
7094	/* Update LPWD address */
7095	mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7096	mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7097
7098	spin_lock_irq(&phba->ras_fwlog_lock);
7099	ras_fwlog->state = REG_INPROGRESS;
7100	spin_unlock_irq(&phba->ras_fwlog_lock);
7101	mbox->vport = phba->pport;
7102	mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7103
7104	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7105
7106	if (rc == MBX_NOT_FINISHED) {
7107		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7108				"6191 FW-Log Mailbox failed. "
7109				"status %d mbxStatus : x%x", rc,
7110				bf_get(lpfc_mqe_status, &mbox->u.mqe));
7111		mempool_free(mbox, phba->mbox_mem_pool);
7112		rc = -EIO;
7113		goto mem_free;
7114	} else
7115		rc = 0;
7116mem_free:
7117	if (rc)
7118		lpfc_sli4_ras_dma_free(phba);
7119
7120	return rc;
7121}
7122
7123/**
7124 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7125 * @phba: Pointer to HBA context object.
7126 *
7127 * Check if RAS is supported on the adapter and initialize it.
7128 **/
7129void
7130lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7131{
7132	/* Check RAS FW Log needs to be enabled or not */
7133	if (lpfc_check_fwlog_support(phba))
7134		return;
7135
7136	lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7137				 LPFC_RAS_ENABLE_LOGGING);
7138}
7139
7140/**
7141 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7142 * @phba: Pointer to HBA context object.
7143 *
7144 * This function allocates all SLI4 resource identifiers.
7145 **/
7146int
7147lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7148{
7149	int i, rc, error = 0;
7150	uint16_t count, base;
7151	unsigned long longs;
7152
7153	if (!phba->sli4_hba.rpi_hdrs_in_use)
7154		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7155	if (phba->sli4_hba.extents_in_use) {
7156		/*
7157		 * The port supports resource extents. The XRI, VPI, VFI, RPI
7158		 * resource extent count must be read and allocated before
7159		 * provisioning the resource id arrays.
7160		 */
7161		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7162		    LPFC_IDX_RSRC_RDY) {
7163			/*
7164			 * Extent-based resources are set - the driver could
7165			 * be in a port reset. Figure out if any corrective
7166			 * actions need to be taken.
7167			 */
7168			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7169						 LPFC_RSC_TYPE_FCOE_VFI);
7170			if (rc != 0)
7171				error++;
7172			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7173						 LPFC_RSC_TYPE_FCOE_VPI);
7174			if (rc != 0)
7175				error++;
7176			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7177						 LPFC_RSC_TYPE_FCOE_XRI);
7178			if (rc != 0)
7179				error++;
7180			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7181						 LPFC_RSC_TYPE_FCOE_RPI);
7182			if (rc != 0)
7183				error++;
7184
7185			/*
7186			 * It's possible that the number of resources
7187			 * provided to this port instance changed between
7188			 * resets.  Detect this condition and reallocate
7189			 * resources.  Otherwise, there is no action.
7190			 */
7191			if (error) {
7192				lpfc_printf_log(phba, KERN_INFO,
7193						LOG_MBOX | LOG_INIT,
7194						"2931 Detected extent resource "
7195						"change.  Reallocating all "
7196						"extents.\n");
7197				rc = lpfc_sli4_dealloc_extent(phba,
7198						 LPFC_RSC_TYPE_FCOE_VFI);
7199				rc = lpfc_sli4_dealloc_extent(phba,
7200						 LPFC_RSC_TYPE_FCOE_VPI);
7201				rc = lpfc_sli4_dealloc_extent(phba,
7202						 LPFC_RSC_TYPE_FCOE_XRI);
7203				rc = lpfc_sli4_dealloc_extent(phba,
7204						 LPFC_RSC_TYPE_FCOE_RPI);
7205			} else
7206				return 0;
7207		}
7208
7209		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7210		if (unlikely(rc))
7211			goto err_exit;
7212
7213		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7214		if (unlikely(rc))
7215			goto err_exit;
7216
7217		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7218		if (unlikely(rc))
7219			goto err_exit;
7220
7221		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7222		if (unlikely(rc))
7223			goto err_exit;
7224		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7225		       LPFC_IDX_RSRC_RDY);
7226		return rc;
7227	} else {
7228		/*
7229		 * The port does not support resource extents.  The XRI, VPI,
7230		 * VFI, RPI resource ids were determined from READ_CONFIG.
7231		 * Just allocate the bitmasks and provision the resource id
7232		 * arrays.  If a port reset is active, the resources don't
7233		 * need any action - just exit.
7234		 */
7235		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7236		    LPFC_IDX_RSRC_RDY) {
7237			lpfc_sli4_dealloc_resource_identifiers(phba);
7238			lpfc_sli4_remove_rpis(phba);
7239		}
7240		/* RPIs. */
7241		count = phba->sli4_hba.max_cfg_param.max_rpi;
7242		if (count <= 0) {
7243			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7244					"3279 Invalid provisioning of "
7245					"rpi:%d\n", count);
7246			rc = -EINVAL;
7247			goto err_exit;
7248		}
7249		base = phba->sli4_hba.max_cfg_param.rpi_base;
7250		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7251		phba->sli4_hba.rpi_bmask = kcalloc(longs,
7252						   sizeof(unsigned long),
7253						   GFP_KERNEL);
7254		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7255			rc = -ENOMEM;
7256			goto err_exit;
7257		}
7258		phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7259						 GFP_KERNEL);
7260		if (unlikely(!phba->sli4_hba.rpi_ids)) {
7261			rc = -ENOMEM;
7262			goto free_rpi_bmask;
7263		}
7264
7265		for (i = 0; i < count; i++)
7266			phba->sli4_hba.rpi_ids[i] = base + i;
7267
7268		/* VPIs. */
7269		count = phba->sli4_hba.max_cfg_param.max_vpi;
7270		if (count <= 0) {
7271			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7272					"3280 Invalid provisioning of "
7273					"vpi:%d\n", count);
7274			rc = -EINVAL;
7275			goto free_rpi_ids;
7276		}
7277		base = phba->sli4_hba.max_cfg_param.vpi_base;
7278		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7279		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7280					  GFP_KERNEL);
7281		if (unlikely(!phba->vpi_bmask)) {
7282			rc = -ENOMEM;
7283			goto free_rpi_ids;
7284		}
7285		phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7286					GFP_KERNEL);
7287		if (unlikely(!phba->vpi_ids)) {
7288			rc = -ENOMEM;
7289			goto free_vpi_bmask;
7290		}
7291
7292		for (i = 0; i < count; i++)
7293			phba->vpi_ids[i] = base + i;
7294
7295		/* XRIs. */
7296		count = phba->sli4_hba.max_cfg_param.max_xri;
7297		if (count <= 0) {
7298			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7299					"3281 Invalid provisioning of "
7300					"xri:%d\n", count);
7301			rc = -EINVAL;
7302			goto free_vpi_ids;
7303		}
7304		base = phba->sli4_hba.max_cfg_param.xri_base;
7305		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7306		phba->sli4_hba.xri_bmask = kcalloc(longs,
7307						   sizeof(unsigned long),
7308						   GFP_KERNEL);
7309		if (unlikely(!phba->sli4_hba.xri_bmask)) {
7310			rc = -ENOMEM;
7311			goto free_vpi_ids;
7312		}
7313		phba->sli4_hba.max_cfg_param.xri_used = 0;
7314		phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7315						 GFP_KERNEL);
7316		if (unlikely(!phba->sli4_hba.xri_ids)) {
7317			rc = -ENOMEM;
7318			goto free_xri_bmask;
7319		}
7320
7321		for (i = 0; i < count; i++)
7322			phba->sli4_hba.xri_ids[i] = base + i;
7323
7324		/* VFIs. */
7325		count = phba->sli4_hba.max_cfg_param.max_vfi;
7326		if (count <= 0) {
7327			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7328					"3282 Invalid provisioning of "
7329					"vfi:%d\n", count);
7330			rc = -EINVAL;
7331			goto free_xri_ids;
7332		}
7333		base = phba->sli4_hba.max_cfg_param.vfi_base;
7334		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7335		phba->sli4_hba.vfi_bmask = kcalloc(longs,
7336						   sizeof(unsigned long),
7337						   GFP_KERNEL);
7338		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7339			rc = -ENOMEM;
7340			goto free_xri_ids;
7341		}
7342		phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7343						 GFP_KERNEL);
7344		if (unlikely(!phba->sli4_hba.vfi_ids)) {
7345			rc = -ENOMEM;
7346			goto free_vfi_bmask;
7347		}
7348
7349		for (i = 0; i < count; i++)
7350			phba->sli4_hba.vfi_ids[i] = base + i;
7351
7352		/*
7353		 * Mark all resources ready.  An HBA reset doesn't need
7354		 * to reset the initialization.
7355		 */
7356		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7357		       LPFC_IDX_RSRC_RDY);
7358		return 0;
7359	}
7360
7361 free_vfi_bmask:
7362	kfree(phba->sli4_hba.vfi_bmask);
7363	phba->sli4_hba.vfi_bmask = NULL;
7364 free_xri_ids:
7365	kfree(phba->sli4_hba.xri_ids);
7366	phba->sli4_hba.xri_ids = NULL;
7367 free_xri_bmask:
7368	kfree(phba->sli4_hba.xri_bmask);
7369	phba->sli4_hba.xri_bmask = NULL;
7370 free_vpi_ids:
7371	kfree(phba->vpi_ids);
7372	phba->vpi_ids = NULL;
7373 free_vpi_bmask:
7374	kfree(phba->vpi_bmask);
7375	phba->vpi_bmask = NULL;
7376 free_rpi_ids:
7377	kfree(phba->sli4_hba.rpi_ids);
7378	phba->sli4_hba.rpi_ids = NULL;
7379 free_rpi_bmask:
7380	kfree(phba->sli4_hba.rpi_bmask);
7381	phba->sli4_hba.rpi_bmask = NULL;
7382 err_exit:
7383	return rc;
7384}
7385
7386/**
7387 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7388 * @phba: Pointer to HBA context object.
7389 *
7390 * This function allocates the number of elements for the specified
7391 * resource type.
7392 **/
7393int
7394lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7395{
7396	if (phba->sli4_hba.extents_in_use) {
7397		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7398		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7399		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7400		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7401	} else {
7402		kfree(phba->vpi_bmask);
7403		phba->sli4_hba.max_cfg_param.vpi_used = 0;
7404		kfree(phba->vpi_ids);
7405		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7406		kfree(phba->sli4_hba.xri_bmask);
7407		kfree(phba->sli4_hba.xri_ids);
7408		kfree(phba->sli4_hba.vfi_bmask);
7409		kfree(phba->sli4_hba.vfi_ids);
7410		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7411		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7412	}
7413
7414	return 0;
7415}
7416
7417/**
7418 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7419 * @phba: Pointer to HBA context object.
7420 * @type: The resource extent type.
7421 * @extnt_cnt: buffer to hold port extent count response
7422 * @extnt_size: buffer to hold port extent size response.
7423 *
7424 * This function calls the port to read the host allocated extents
7425 * for a particular type.
7426 **/
7427int
7428lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7429			       uint16_t *extnt_cnt, uint16_t *extnt_size)
7430{
7431	bool emb;
7432	int rc = 0;
7433	uint16_t curr_blks = 0;
7434	uint32_t req_len, emb_len;
7435	uint32_t alloc_len, mbox_tmo;
7436	struct list_head *blk_list_head;
7437	struct lpfc_rsrc_blks *rsrc_blk;
7438	LPFC_MBOXQ_t *mbox;
7439	void *virtaddr = NULL;
7440	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7441	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7442	union  lpfc_sli4_cfg_shdr *shdr;
7443
7444	switch (type) {
7445	case LPFC_RSC_TYPE_FCOE_VPI:
7446		blk_list_head = &phba->lpfc_vpi_blk_list;
7447		break;
7448	case LPFC_RSC_TYPE_FCOE_XRI:
7449		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7450		break;
7451	case LPFC_RSC_TYPE_FCOE_VFI:
7452		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7453		break;
7454	case LPFC_RSC_TYPE_FCOE_RPI:
7455		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7456		break;
7457	default:
7458		return -EIO;
7459	}
7460
7461	/* Count the number of extents currently allocatd for this type. */
7462	list_for_each_entry(rsrc_blk, blk_list_head, list) {
7463		if (curr_blks == 0) {
7464			/*
7465			 * The GET_ALLOCATED mailbox does not return the size,
7466			 * just the count.  The size should be just the size
7467			 * stored in the current allocated block and all sizes
7468			 * for an extent type are the same so set the return
7469			 * value now.
7470			 */
7471			*extnt_size = rsrc_blk->rsrc_size;
7472		}
7473		curr_blks++;
7474	}
7475
7476	/*
7477	 * Calculate the size of an embedded mailbox.  The uint32_t
7478	 * accounts for extents-specific word.
7479	 */
7480	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7481		sizeof(uint32_t);
7482
7483	/*
7484	 * Presume the allocation and response will fit into an embedded
7485	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7486	 */
7487	emb = LPFC_SLI4_MBX_EMBED;
7488	req_len = emb_len;
7489	if (req_len > emb_len) {
7490		req_len = curr_blks * sizeof(uint16_t) +
7491			sizeof(union lpfc_sli4_cfg_shdr) +
7492			sizeof(uint32_t);
7493		emb = LPFC_SLI4_MBX_NEMBED;
7494	}
7495
7496	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7497	if (!mbox)
7498		return -ENOMEM;
7499	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7500
7501	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7502				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7503				     req_len, emb);
7504	if (alloc_len < req_len) {
7505		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7506			"2983 Allocated DMA memory size (x%x) is "
7507			"less than the requested DMA memory "
7508			"size (x%x)\n", alloc_len, req_len);
7509		rc = -ENOMEM;
7510		goto err_exit;
7511	}
7512	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7513	if (unlikely(rc)) {
7514		rc = -EIO;
7515		goto err_exit;
7516	}
7517
7518	if (!phba->sli4_hba.intr_enable)
7519		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7520	else {
7521		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7522		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7523	}
7524
7525	if (unlikely(rc)) {
7526		rc = -EIO;
7527		goto err_exit;
7528	}
7529
7530	/*
7531	 * Figure out where the response is located.  Then get local pointers
7532	 * to the response data.  The port does not guarantee to respond to
7533	 * all extents counts request so update the local variable with the
7534	 * allocated count from the port.
7535	 */
7536	if (emb == LPFC_SLI4_MBX_EMBED) {
7537		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7538		shdr = &rsrc_ext->header.cfg_shdr;
7539		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7540	} else {
7541		virtaddr = mbox->sge_array->addr[0];
7542		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7543		shdr = &n_rsrc->cfg_shdr;
7544		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7545	}
7546
7547	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7548		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7549			"2984 Failed to read allocated resources "
7550			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
7551			type,
7552			bf_get(lpfc_mbox_hdr_status, &shdr->response),
7553			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7554		rc = -EIO;
7555		goto err_exit;
7556	}
7557 err_exit:
7558	lpfc_sli4_mbox_cmd_free(phba, mbox);
7559	return rc;
7560}
7561
7562/**
7563 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7564 * @phba: pointer to lpfc hba data structure.
7565 * @sgl_list: linked link of sgl buffers to post
7566 * @cnt: number of linked list buffers
7567 *
7568 * This routine walks the list of buffers that have been allocated and
7569 * repost them to the port by using SGL block post. This is needed after a
7570 * pci_function_reset/warm_start or start. It attempts to construct blocks
7571 * of buffer sgls which contains contiguous xris and uses the non-embedded
7572 * SGL block post mailbox commands to post them to the port. For single
7573 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7574 * mailbox command for posting.
7575 *
7576 * Returns: 0 = success, non-zero failure.
7577 **/
7578static int
7579lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7580			  struct list_head *sgl_list, int cnt)
7581{
7582	struct lpfc_sglq *sglq_entry = NULL;
7583	struct lpfc_sglq *sglq_entry_next = NULL;
7584	struct lpfc_sglq *sglq_entry_first = NULL;
7585	int status = 0, total_cnt;
7586	int post_cnt = 0, num_posted = 0, block_cnt = 0;
7587	int last_xritag = NO_XRI;
7588	LIST_HEAD(prep_sgl_list);
7589	LIST_HEAD(blck_sgl_list);
7590	LIST_HEAD(allc_sgl_list);
7591	LIST_HEAD(post_sgl_list);
7592	LIST_HEAD(free_sgl_list);
7593
7594	spin_lock_irq(&phba->hbalock);
7595	spin_lock(&phba->sli4_hba.sgl_list_lock);
7596	list_splice_init(sgl_list, &allc_sgl_list);
7597	spin_unlock(&phba->sli4_hba.sgl_list_lock);
7598	spin_unlock_irq(&phba->hbalock);
7599
7600	total_cnt = cnt;
7601	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7602				 &allc_sgl_list, list) {
7603		list_del_init(&sglq_entry->list);
7604		block_cnt++;
7605		if ((last_xritag != NO_XRI) &&
7606		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
7607			/* a hole in xri block, form a sgl posting block */
7608			list_splice_init(&prep_sgl_list, &blck_sgl_list);
7609			post_cnt = block_cnt - 1;
7610			/* prepare list for next posting block */
7611			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7612			block_cnt = 1;
7613		} else {
7614			/* prepare list for next posting block */
7615			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7616			/* enough sgls for non-embed sgl mbox command */
7617			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7618				list_splice_init(&prep_sgl_list,
7619						 &blck_sgl_list);
7620				post_cnt = block_cnt;
7621				block_cnt = 0;
7622			}
7623		}
7624		num_posted++;
7625
7626		/* keep track of last sgl's xritag */
7627		last_xritag = sglq_entry->sli4_xritag;
7628
7629		/* end of repost sgl list condition for buffers */
7630		if (num_posted == total_cnt) {
7631			if (post_cnt == 0) {
7632				list_splice_init(&prep_sgl_list,
7633						 &blck_sgl_list);
7634				post_cnt = block_cnt;
7635			} else if (block_cnt == 1) {
7636				status = lpfc_sli4_post_sgl(phba,
7637						sglq_entry->phys, 0,
7638						sglq_entry->sli4_xritag);
7639				if (!status) {
7640					/* successful, put sgl to posted list */
7641					list_add_tail(&sglq_entry->list,
7642						      &post_sgl_list);
7643				} else {
7644					/* Failure, put sgl to free list */
7645					lpfc_printf_log(phba, KERN_WARNING,
7646						LOG_SLI,
7647						"3159 Failed to post "
7648						"sgl, xritag:x%x\n",
7649						sglq_entry->sli4_xritag);
7650					list_add_tail(&sglq_entry->list,
7651						      &free_sgl_list);
7652					total_cnt--;
7653				}
7654			}
7655		}
7656
7657		/* continue until a nembed page worth of sgls */
7658		if (post_cnt == 0)
7659			continue;
7660
7661		/* post the buffer list sgls as a block */
7662		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7663						 post_cnt);
7664
7665		if (!status) {
7666			/* success, put sgl list to posted sgl list */
7667			list_splice_init(&blck_sgl_list, &post_sgl_list);
7668		} else {
7669			/* Failure, put sgl list to free sgl list */
7670			sglq_entry_first = list_first_entry(&blck_sgl_list,
7671							    struct lpfc_sglq,
7672							    list);
7673			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7674					"3160 Failed to post sgl-list, "
7675					"xritag:x%x-x%x\n",
7676					sglq_entry_first->sli4_xritag,
7677					(sglq_entry_first->sli4_xritag +
7678					 post_cnt - 1));
7679			list_splice_init(&blck_sgl_list, &free_sgl_list);
7680			total_cnt -= post_cnt;
7681		}
7682
7683		/* don't reset xirtag due to hole in xri block */
7684		if (block_cnt == 0)
7685			last_xritag = NO_XRI;
7686
7687		/* reset sgl post count for next round of posting */
7688		post_cnt = 0;
7689	}
7690
7691	/* free the sgls failed to post */
7692	lpfc_free_sgl_list(phba, &free_sgl_list);
7693
7694	/* push sgls posted to the available list */
7695	if (!list_empty(&post_sgl_list)) {
7696		spin_lock_irq(&phba->hbalock);
7697		spin_lock(&phba->sli4_hba.sgl_list_lock);
7698		list_splice_init(&post_sgl_list, sgl_list);
7699		spin_unlock(&phba->sli4_hba.sgl_list_lock);
7700		spin_unlock_irq(&phba->hbalock);
7701	} else {
7702		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7703				"3161 Failure to post sgl to port,status %x "
7704				"blkcnt %d totalcnt %d postcnt %d\n",
7705				status, block_cnt, total_cnt, post_cnt);
7706		return -EIO;
7707	}
7708
7709	/* return the number of XRIs actually posted */
7710	return total_cnt;
7711}
7712
7713/**
7714 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7715 * @phba: pointer to lpfc hba data structure.
7716 *
7717 * This routine walks the list of nvme buffers that have been allocated and
7718 * repost them to the port by using SGL block post. This is needed after a
7719 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7720 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7721 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7722 *
7723 * Returns: 0 = success, non-zero failure.
7724 **/
7725static int
7726lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7727{
7728	LIST_HEAD(post_nblist);
7729	int num_posted, rc = 0;
7730
7731	/* get all NVME buffers need to repost to a local list */
7732	lpfc_io_buf_flush(phba, &post_nblist);
7733
7734	/* post the list of nvme buffer sgls to port if available */
7735	if (!list_empty(&post_nblist)) {
7736		num_posted = lpfc_sli4_post_io_sgl_list(
7737			phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7738		/* failed to post any nvme buffer, return error */
7739		if (num_posted == 0)
7740			rc = -EIO;
7741	}
7742	return rc;
7743}
7744
7745static void
7746lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7747{
7748	uint32_t len;
7749
7750	len = sizeof(struct lpfc_mbx_set_host_data) -
7751		sizeof(struct lpfc_sli4_cfg_mhdr);
7752	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7753			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7754			 LPFC_SLI4_MBX_EMBED);
7755
7756	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7757	mbox->u.mqe.un.set_host_data.param_len =
7758					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7759	snprintf(mbox->u.mqe.un.set_host_data.un.data,
7760		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7761		 "Linux %s v"LPFC_DRIVER_VERSION,
7762		 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7763}
7764
7765int
7766lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7767		    struct lpfc_queue *drq, int count, int idx)
7768{
7769	int rc, i;
7770	struct lpfc_rqe hrqe;
7771	struct lpfc_rqe drqe;
7772	struct lpfc_rqb *rqbp;
7773	unsigned long flags;
7774	struct rqb_dmabuf *rqb_buffer;
7775	LIST_HEAD(rqb_buf_list);
7776
7777	rqbp = hrq->rqbp;
7778	for (i = 0; i < count; i++) {
7779		spin_lock_irqsave(&phba->hbalock, flags);
7780		/* IF RQ is already full, don't bother */
7781		if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7782			spin_unlock_irqrestore(&phba->hbalock, flags);
7783			break;
7784		}
7785		spin_unlock_irqrestore(&phba->hbalock, flags);
7786
7787		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7788		if (!rqb_buffer)
7789			break;
7790		rqb_buffer->hrq = hrq;
7791		rqb_buffer->drq = drq;
7792		rqb_buffer->idx = idx;
7793		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7794	}
7795
7796	spin_lock_irqsave(&phba->hbalock, flags);
7797	while (!list_empty(&rqb_buf_list)) {
7798		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7799				 hbuf.list);
7800
7801		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7802		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7803		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7804		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7805		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7806		if (rc < 0) {
7807			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7808					"6421 Cannot post to HRQ %d: %x %x %x "
7809					"DRQ %x %x\n",
7810					hrq->queue_id,
7811					hrq->host_index,
7812					hrq->hba_index,
7813					hrq->entry_count,
7814					drq->host_index,
7815					drq->hba_index);
7816			rqbp->rqb_free_buffer(phba, rqb_buffer);
7817		} else {
7818			list_add_tail(&rqb_buffer->hbuf.list,
7819				      &rqbp->rqb_buffer_list);
7820			rqbp->buffer_count++;
7821		}
7822	}
7823	spin_unlock_irqrestore(&phba->hbalock, flags);
7824	return 1;
7825}
7826
7827static void
7828lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7829{
7830	union lpfc_sli4_cfg_shdr *shdr;
7831	u32 shdr_status, shdr_add_status;
7832
7833	shdr = (union lpfc_sli4_cfg_shdr *)
7834		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7835	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7836	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7837	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7838		lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7839				"4622 SET_FEATURE (x%x) mbox failed, "
7840				"status x%x add_status x%x, mbx status x%x\n",
7841				LPFC_SET_LD_SIGNAL, shdr_status,
7842				shdr_add_status, pmb->u.mb.mbxStatus);
7843		phba->degrade_activate_threshold = 0;
7844		phba->degrade_deactivate_threshold = 0;
7845		phba->fec_degrade_interval = 0;
7846		goto out;
7847	}
7848
7849	phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7850	phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7851	phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7852
7853	lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7854			"4624 Success: da x%x dd x%x interval x%x\n",
7855			phba->degrade_activate_threshold,
7856			phba->degrade_deactivate_threshold,
7857			phba->fec_degrade_interval);
7858out:
7859	mempool_free(pmb, phba->mbox_mem_pool);
7860}
7861
7862int
7863lpfc_read_lds_params(struct lpfc_hba *phba)
7864{
7865	LPFC_MBOXQ_t *mboxq;
7866	int rc;
7867
7868	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7869	if (!mboxq)
7870		return -ENOMEM;
7871
7872	lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7873	mboxq->vport = phba->pport;
7874	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7875	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7876	if (rc == MBX_NOT_FINISHED) {
7877		mempool_free(mboxq, phba->mbox_mem_pool);
7878		return -EIO;
7879	}
7880	return 0;
7881}
7882
7883static void
7884lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7885{
7886	struct lpfc_vport *vport = pmb->vport;
7887	union lpfc_sli4_cfg_shdr *shdr;
7888	u32 shdr_status, shdr_add_status;
7889	u32 sig, acqe;
7890
7891	/* Two outcomes. (1) Set featurs was successul and EDC negotiation
7892	 * is done. (2) Mailbox failed and send FPIN support only.
7893	 */
7894	shdr = (union lpfc_sli4_cfg_shdr *)
7895		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7896	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7897	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7898	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7899		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7900				"2516 CGN SET_FEATURE mbox failed with "
7901				"status x%x add_status x%x, mbx status x%x "
7902				"Reset Congestion to FPINs only\n",
7903				shdr_status, shdr_add_status,
7904				pmb->u.mb.mbxStatus);
7905		/* If there is a mbox error, move on to RDF */
7906		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7907		phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7908		goto out;
7909	}
7910
7911	/* Zero out Congestion Signal ACQE counter */
7912	phba->cgn_acqe_cnt = 0;
7913
7914	acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7915		      &pmb->u.mqe.un.set_feature);
7916	sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7917		     &pmb->u.mqe.un.set_feature);
7918	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7919			"4620 SET_FEATURES Success: Freq: %ds %dms "
7920			" Reg: x%x x%x\n", acqe, sig,
7921			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7922out:
7923	mempool_free(pmb, phba->mbox_mem_pool);
7924
7925	/* Register for FPIN events from the fabric now that the
7926	 * EDC common_set_features has completed.
7927	 */
7928	lpfc_issue_els_rdf(vport, 0);
7929}
7930
7931int
7932lpfc_config_cgn_signal(struct lpfc_hba *phba)
7933{
7934	LPFC_MBOXQ_t *mboxq;
7935	u32 rc;
7936
7937	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7938	if (!mboxq)
7939		goto out_rdf;
7940
7941	lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7942	mboxq->vport = phba->pport;
7943	mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7944
7945	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7946			"4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7947			"Reg: x%x x%x\n",
7948			phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7949			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7950
7951	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7952	if (rc == MBX_NOT_FINISHED)
7953		goto out;
7954	return 0;
7955
7956out:
7957	mempool_free(mboxq, phba->mbox_mem_pool);
7958out_rdf:
7959	/* If there is a mbox error, move on to RDF */
7960	phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7961	phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7962	lpfc_issue_els_rdf(phba->pport, 0);
7963	return -EIO;
7964}
7965
7966/**
7967 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7968 * @phba: pointer to lpfc hba data structure.
7969 *
7970 * This routine initializes the per-eq idle_stat to dynamically dictate
7971 * polling decisions.
7972 *
7973 * Return codes:
7974 *   None
7975 **/
7976static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7977{
7978	int i;
7979	struct lpfc_sli4_hdw_queue *hdwq;
7980	struct lpfc_queue *eq;
7981	struct lpfc_idle_stat *idle_stat;
7982	u64 wall;
7983
7984	for_each_present_cpu(i) {
7985		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7986		eq = hdwq->hba_eq;
7987
7988		/* Skip if we've already handled this eq's primary CPU */
7989		if (eq->chann != i)
7990			continue;
7991
7992		idle_stat = &phba->sli4_hba.idle_stat[i];
7993
7994		idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7995		idle_stat->prev_wall = wall;
7996
7997		if (phba->nvmet_support ||
7998		    phba->cmf_active_mode != LPFC_CFG_OFF ||
7999		    phba->intr_type != MSIX)
8000			eq->poll_mode = LPFC_QUEUE_WORK;
8001		else
8002			eq->poll_mode = LPFC_THREADED_IRQ;
8003	}
8004
8005	if (!phba->nvmet_support && phba->intr_type == MSIX)
8006		schedule_delayed_work(&phba->idle_stat_delay_work,
8007				      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
8008}
8009
8010static void lpfc_sli4_dip(struct lpfc_hba *phba)
8011{
8012	uint32_t if_type;
8013
8014	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8015	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8016	    if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8017		struct lpfc_register reg_data;
8018
8019		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8020			       &reg_data.word0))
8021			return;
8022
8023		if (bf_get(lpfc_sliport_status_dip, &reg_data))
8024			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8025					"2904 Firmware Dump Image Present"
8026					" on Adapter");
8027	}
8028}
8029
8030/**
8031 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8032 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8033 * @entries: Number of rx_info_entry objects to allocate in ring
8034 *
8035 * Return:
8036 * 0 - Success
8037 * ENOMEM - Failure to kmalloc
8038 **/
8039int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8040				u32 entries)
8041{
8042	rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8043					 GFP_KERNEL);
8044	if (!rx_monitor->ring)
8045		return -ENOMEM;
8046
8047	rx_monitor->head_idx = 0;
8048	rx_monitor->tail_idx = 0;
8049	spin_lock_init(&rx_monitor->lock);
8050	rx_monitor->entries = entries;
8051
8052	return 0;
8053}
8054
8055/**
8056 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8057 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8058 *
8059 * Called after cancellation of cmf_timer.
8060 **/
8061void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8062{
8063	kfree(rx_monitor->ring);
8064	rx_monitor->ring = NULL;
8065	rx_monitor->entries = 0;
8066	rx_monitor->head_idx = 0;
8067	rx_monitor->tail_idx = 0;
8068}
8069
8070/**
8071 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8072 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8073 * @entry: Pointer to rx_info_entry
8074 *
8075 * Used to insert an rx_info_entry into rx_monitor's ring.  Note that this is a
8076 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8077 *
8078 * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8079 *
8080 * In cases of old data overflow, we do a best effort of FIFO order.
8081 **/
8082void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8083			    struct rx_info_entry *entry)
8084{
8085	struct rx_info_entry *ring = rx_monitor->ring;
8086	u32 *head_idx = &rx_monitor->head_idx;
8087	u32 *tail_idx = &rx_monitor->tail_idx;
8088	spinlock_t *ring_lock = &rx_monitor->lock;
8089	u32 ring_size = rx_monitor->entries;
8090
8091	spin_lock(ring_lock);
8092	memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8093	*tail_idx = (*tail_idx + 1) % ring_size;
8094
8095	/* Best effort of FIFO saved data */
8096	if (*tail_idx == *head_idx)
8097		*head_idx = (*head_idx + 1) % ring_size;
8098
8099	spin_unlock(ring_lock);
8100}
8101
8102/**
8103 * lpfc_rx_monitor_report - Read out rx_monitor's ring
8104 * @phba: Pointer to lpfc_hba object
8105 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8106 * @buf: Pointer to char buffer that will contain rx monitor info data
8107 * @buf_len: Length buf including null char
8108 * @max_read_entries: Maximum number of entries to read out of ring
8109 *
8110 * Used to dump/read what's in rx_monitor's ring buffer.
8111 *
8112 * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8113 * information to kmsg instead of filling out buf.
8114 *
8115 * Return:
8116 * Number of entries read out of the ring
8117 **/
8118u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8119			   struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8120			   u32 buf_len, u32 max_read_entries)
8121{
8122	struct rx_info_entry *ring = rx_monitor->ring;
8123	struct rx_info_entry *entry;
8124	u32 *head_idx = &rx_monitor->head_idx;
8125	u32 *tail_idx = &rx_monitor->tail_idx;
8126	spinlock_t *ring_lock = &rx_monitor->lock;
8127	u32 ring_size = rx_monitor->entries;
8128	u32 cnt = 0;
8129	char tmp[DBG_LOG_STR_SZ] = {0};
8130	bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8131
8132	if (!log_to_kmsg) {
8133		/* clear the buffer to be sure */
8134		memset(buf, 0, buf_len);
8135
8136		scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8137					"%-8s%-8s%-8s%-16s\n",
8138					"MaxBPI", "Tot_Data_CMF",
8139					"Tot_Data_Cmd", "Tot_Data_Cmpl",
8140					"Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8141					"IO_cnt", "Info", "BWutil(ms)");
8142	}
8143
8144	/* Needs to be _irq because record is called from timer interrupt
8145	 * context
8146	 */
8147	spin_lock_irq(ring_lock);
8148	while (*head_idx != *tail_idx) {
8149		entry = &ring[*head_idx];
8150
8151		/* Read out this entry's data. */
8152		if (!log_to_kmsg) {
8153			/* If !log_to_kmsg, then store to buf. */
8154			scnprintf(tmp, sizeof(tmp),
8155				  "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8156				  "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8157				  *head_idx, entry->max_bytes_per_interval,
8158				  entry->cmf_bytes, entry->total_bytes,
8159				  entry->rcv_bytes, entry->avg_io_latency,
8160				  entry->avg_io_size, entry->max_read_cnt,
8161				  entry->cmf_busy, entry->io_cnt,
8162				  entry->cmf_info, entry->timer_utilization,
8163				  entry->timer_interval);
8164
8165			/* Check for buffer overflow */
8166			if ((strlen(buf) + strlen(tmp)) >= buf_len)
8167				break;
8168
8169			/* Append entry's data to buffer */
8170			strlcat(buf, tmp, buf_len);
8171		} else {
8172			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8173					"4410 %02u: MBPI %llu Xmit %llu "
8174					"Cmpl %llu Lat %llu ASz %llu Info %02u "
8175					"BWUtil %u Int %u slot %u\n",
8176					cnt, entry->max_bytes_per_interval,
8177					entry->total_bytes, entry->rcv_bytes,
8178					entry->avg_io_latency,
8179					entry->avg_io_size, entry->cmf_info,
8180					entry->timer_utilization,
8181					entry->timer_interval, *head_idx);
8182		}
8183
8184		*head_idx = (*head_idx + 1) % ring_size;
8185
8186		/* Don't feed more than max_read_entries */
8187		cnt++;
8188		if (cnt >= max_read_entries)
8189			break;
8190	}
8191	spin_unlock_irq(ring_lock);
8192
8193	return cnt;
8194}
8195
8196/**
8197 * lpfc_cmf_setup - Initialize idle_stat tracking
8198 * @phba: Pointer to HBA context object.
8199 *
8200 * This is called from HBA setup during driver load or when the HBA
8201 * comes online. this does all the initialization to support CMF and MI.
8202 **/
8203static int
8204lpfc_cmf_setup(struct lpfc_hba *phba)
8205{
8206	LPFC_MBOXQ_t *mboxq;
8207	struct lpfc_dmabuf *mp;
8208	struct lpfc_pc_sli4_params *sli4_params;
8209	int rc, cmf, mi_ver;
8210
8211	rc = lpfc_sli4_refresh_params(phba);
8212	if (unlikely(rc))
8213		return rc;
8214
8215	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8216	if (!mboxq)
8217		return -ENOMEM;
8218
8219	sli4_params = &phba->sli4_hba.pc_sli4_params;
8220
8221	/* Always try to enable MI feature if we can */
8222	if (sli4_params->mi_ver) {
8223		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8224		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8225		mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8226				 &mboxq->u.mqe.un.set_feature);
8227
8228		if (rc == MBX_SUCCESS) {
8229			if (mi_ver) {
8230				lpfc_printf_log(phba,
8231						KERN_WARNING, LOG_CGN_MGMT,
8232						"6215 MI is enabled\n");
8233				sli4_params->mi_ver = mi_ver;
8234			} else {
8235				lpfc_printf_log(phba,
8236						KERN_WARNING, LOG_CGN_MGMT,
8237						"6338 MI is disabled\n");
8238				sli4_params->mi_ver = 0;
8239			}
8240		} else {
8241			/* mi_ver is already set from GET_SLI4_PARAMETERS */
8242			lpfc_printf_log(phba, KERN_INFO,
8243					LOG_CGN_MGMT | LOG_INIT,
8244					"6245 Enable MI Mailbox x%x (x%x/x%x) "
8245					"failed, rc:x%x mi:x%x\n",
8246					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8247					lpfc_sli_config_mbox_subsys_get
8248						(phba, mboxq),
8249					lpfc_sli_config_mbox_opcode_get
8250						(phba, mboxq),
8251					rc, sli4_params->mi_ver);
8252		}
8253	} else {
8254		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8255				"6217 MI is disabled\n");
8256	}
8257
8258	/* Ensure FDMI is enabled for MI if enable_mi is set */
8259	if (sli4_params->mi_ver)
8260		phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8261
8262	/* Always try to enable CMF feature if we can */
8263	if (sli4_params->cmf) {
8264		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8265		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8266		cmf = bf_get(lpfc_mbx_set_feature_cmf,
8267			     &mboxq->u.mqe.un.set_feature);
8268		if (rc == MBX_SUCCESS && cmf) {
8269			lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8270					"6218 CMF is enabled: mode %d\n",
8271					phba->cmf_active_mode);
8272		} else {
8273			lpfc_printf_log(phba, KERN_WARNING,
8274					LOG_CGN_MGMT | LOG_INIT,
8275					"6219 Enable CMF Mailbox x%x (x%x/x%x) "
8276					"failed, rc:x%x dd:x%x\n",
8277					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8278					lpfc_sli_config_mbox_subsys_get
8279						(phba, mboxq),
8280					lpfc_sli_config_mbox_opcode_get
8281						(phba, mboxq),
8282					rc, cmf);
8283			sli4_params->cmf = 0;
8284			phba->cmf_active_mode = LPFC_CFG_OFF;
8285			goto no_cmf;
8286		}
8287
8288		/* Allocate Congestion Information Buffer */
8289		if (!phba->cgn_i) {
8290			mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8291			if (mp)
8292				mp->virt = dma_alloc_coherent
8293						(&phba->pcidev->dev,
8294						sizeof(struct lpfc_cgn_info),
8295						&mp->phys, GFP_KERNEL);
8296			if (!mp || !mp->virt) {
8297				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8298						"2640 Failed to alloc memory "
8299						"for Congestion Info\n");
8300				kfree(mp);
8301				sli4_params->cmf = 0;
8302				phba->cmf_active_mode = LPFC_CFG_OFF;
8303				goto no_cmf;
8304			}
8305			phba->cgn_i = mp;
8306
8307			/* initialize congestion buffer info */
8308			lpfc_init_congestion_buf(phba);
8309			lpfc_init_congestion_stat(phba);
8310
8311			/* Zero out Congestion Signal counters */
8312			atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8313			atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8314		}
8315
8316		rc = lpfc_sli4_cgn_params_read(phba);
8317		if (rc < 0) {
8318			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8319					"6242 Error reading Cgn Params (%d)\n",
8320					rc);
8321			/* Ensure CGN Mode is off */
8322			sli4_params->cmf = 0;
8323		} else if (!rc) {
8324			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8325					"6243 CGN Event empty object.\n");
8326			/* Ensure CGN Mode is off */
8327			sli4_params->cmf = 0;
8328		}
8329	} else {
8330no_cmf:
8331		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8332				"6220 CMF is disabled\n");
8333	}
8334
8335	/* Only register congestion buffer with firmware if BOTH
8336	 * CMF and E2E are enabled.
8337	 */
8338	if (sli4_params->cmf && sli4_params->mi_ver) {
8339		rc = lpfc_reg_congestion_buf(phba);
8340		if (rc) {
8341			dma_free_coherent(&phba->pcidev->dev,
8342					  sizeof(struct lpfc_cgn_info),
8343					  phba->cgn_i->virt, phba->cgn_i->phys);
8344			kfree(phba->cgn_i);
8345			phba->cgn_i = NULL;
8346			/* Ensure CGN Mode is off */
8347			phba->cmf_active_mode = LPFC_CFG_OFF;
8348			sli4_params->cmf = 0;
8349			return 0;
8350		}
8351	}
8352	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8353			"6470 Setup MI version %d CMF %d mode %d\n",
8354			sli4_params->mi_ver, sli4_params->cmf,
8355			phba->cmf_active_mode);
8356
8357	mempool_free(mboxq, phba->mbox_mem_pool);
8358
8359	/* Initialize atomic counters */
8360	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8361	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8362	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8363	atomic_set(&phba->cgn_sync_warn_cnt, 0);
8364	atomic_set(&phba->cgn_driver_evt_cnt, 0);
8365	atomic_set(&phba->cgn_latency_evt_cnt, 0);
8366	atomic64_set(&phba->cgn_latency_evt, 0);
8367
8368	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8369
8370	/* Allocate RX Monitor Buffer */
8371	if (!phba->rx_monitor) {
8372		phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8373					   GFP_KERNEL);
8374
8375		if (!phba->rx_monitor) {
8376			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8377					"2644 Failed to alloc memory "
8378					"for RX Monitor Buffer\n");
8379			return -ENOMEM;
8380		}
8381
8382		/* Instruct the rx_monitor object to instantiate its ring */
8383		if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8384						LPFC_MAX_RXMONITOR_ENTRY)) {
8385			kfree(phba->rx_monitor);
8386			phba->rx_monitor = NULL;
8387			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8388					"2645 Failed to alloc memory "
8389					"for RX Monitor's Ring\n");
8390			return -ENOMEM;
8391		}
8392	}
8393
8394	return 0;
8395}
8396
8397static int
8398lpfc_set_host_tm(struct lpfc_hba *phba)
8399{
8400	LPFC_MBOXQ_t *mboxq;
8401	uint32_t len, rc;
8402	struct timespec64 cur_time;
8403	struct tm broken;
8404	uint32_t month, day, year;
8405	uint32_t hour, minute, second;
8406	struct lpfc_mbx_set_host_date_time *tm;
8407
8408	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8409	if (!mboxq)
8410		return -ENOMEM;
8411
8412	len = sizeof(struct lpfc_mbx_set_host_data) -
8413		sizeof(struct lpfc_sli4_cfg_mhdr);
8414	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8415			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8416			 LPFC_SLI4_MBX_EMBED);
8417
8418	mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8419	mboxq->u.mqe.un.set_host_data.param_len =
8420			sizeof(struct lpfc_mbx_set_host_date_time);
8421	tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8422	ktime_get_real_ts64(&cur_time);
8423	time64_to_tm(cur_time.tv_sec, 0, &broken);
8424	month = broken.tm_mon + 1;
8425	day = broken.tm_mday;
8426	year = broken.tm_year - 100;
8427	hour = broken.tm_hour;
8428	minute = broken.tm_min;
8429	second = broken.tm_sec;
8430	bf_set(lpfc_mbx_set_host_month, tm, month);
8431	bf_set(lpfc_mbx_set_host_day, tm, day);
8432	bf_set(lpfc_mbx_set_host_year, tm, year);
8433	bf_set(lpfc_mbx_set_host_hour, tm, hour);
8434	bf_set(lpfc_mbx_set_host_min, tm, minute);
8435	bf_set(lpfc_mbx_set_host_sec, tm, second);
8436
8437	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8438	mempool_free(mboxq, phba->mbox_mem_pool);
8439	return rc;
8440}
8441
8442/**
8443 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8444 * @phba: Pointer to HBA context object.
8445 *
8446 * This function is the main SLI4 device initialization PCI function. This
8447 * function is called by the HBA initialization code, HBA reset code and
8448 * HBA error attention handler code. Caller is not required to hold any
8449 * locks.
8450 **/
8451int
8452lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8453{
8454	int rc, i, cnt, len, dd;
8455	LPFC_MBOXQ_t *mboxq;
8456	struct lpfc_mqe *mqe;
8457	uint8_t *vpd;
8458	uint32_t vpd_size;
8459	uint32_t ftr_rsp = 0;
8460	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8461	struct lpfc_vport *vport = phba->pport;
8462	struct lpfc_dmabuf *mp;
8463	struct lpfc_rqb *rqbp;
8464	u32 flg;
8465
8466	/* Perform a PCI function reset to start from clean */
8467	rc = lpfc_pci_function_reset(phba);
8468	if (unlikely(rc))
8469		return -ENODEV;
8470
8471	/* Check the HBA Host Status Register for readyness */
8472	rc = lpfc_sli4_post_status_check(phba);
8473	if (unlikely(rc))
8474		return -ENODEV;
8475	else {
8476		spin_lock_irq(&phba->hbalock);
8477		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8478		flg = phba->sli.sli_flag;
8479		spin_unlock_irq(&phba->hbalock);
8480		/* Allow a little time after setting SLI_ACTIVE for any polled
8481		 * MBX commands to complete via BSG.
8482		 */
8483		for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8484			msleep(20);
8485			spin_lock_irq(&phba->hbalock);
8486			flg = phba->sli.sli_flag;
8487			spin_unlock_irq(&phba->hbalock);
8488		}
8489	}
8490	phba->hba_flag &= ~HBA_SETUP;
8491
8492	lpfc_sli4_dip(phba);
8493
8494	/*
8495	 * Allocate a single mailbox container for initializing the
8496	 * port.
8497	 */
8498	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8499	if (!mboxq)
8500		return -ENOMEM;
8501
8502	/* Issue READ_REV to collect vpd and FW information. */
8503	vpd_size = SLI4_PAGE_SIZE;
8504	vpd = kzalloc(vpd_size, GFP_KERNEL);
8505	if (!vpd) {
8506		rc = -ENOMEM;
8507		goto out_free_mbox;
8508	}
8509
8510	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8511	if (unlikely(rc)) {
8512		kfree(vpd);
8513		goto out_free_mbox;
8514	}
8515
8516	mqe = &mboxq->u.mqe;
8517	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8518	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8519		phba->hba_flag |= HBA_FCOE_MODE;
8520		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
8521	} else {
8522		phba->hba_flag &= ~HBA_FCOE_MODE;
8523	}
8524
8525	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8526		LPFC_DCBX_CEE_MODE)
8527		phba->hba_flag |= HBA_FIP_SUPPORT;
8528	else
8529		phba->hba_flag &= ~HBA_FIP_SUPPORT;
8530
8531	phba->hba_flag &= ~HBA_IOQ_FLUSH;
8532
8533	if (phba->sli_rev != LPFC_SLI_REV4) {
8534		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8535			"0376 READ_REV Error. SLI Level %d "
8536			"FCoE enabled %d\n",
8537			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8538		rc = -EIO;
8539		kfree(vpd);
8540		goto out_free_mbox;
8541	}
8542
8543	rc = lpfc_set_host_tm(phba);
8544	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8545			"6468 Set host date / time: Status x%x:\n", rc);
8546
8547	/*
8548	 * Continue initialization with default values even if driver failed
8549	 * to read FCoE param config regions, only read parameters if the
8550	 * board is FCoE
8551	 */
8552	if (phba->hba_flag & HBA_FCOE_MODE &&
8553	    lpfc_sli4_read_fcoe_params(phba))
8554		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8555			"2570 Failed to read FCoE parameters\n");
8556
8557	/*
8558	 * Retrieve sli4 device physical port name, failure of doing it
8559	 * is considered as non-fatal.
8560	 */
8561	rc = lpfc_sli4_retrieve_pport_name(phba);
8562	if (!rc)
8563		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8564				"3080 Successful retrieving SLI4 device "
8565				"physical port name: %s.\n", phba->Port);
8566
8567	rc = lpfc_sli4_get_ctl_attr(phba);
8568	if (!rc)
8569		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8570				"8351 Successful retrieving SLI4 device "
8571				"CTL ATTR\n");
8572
8573	/*
8574	 * Evaluate the read rev and vpd data. Populate the driver
8575	 * state with the results. If this routine fails, the failure
8576	 * is not fatal as the driver will use generic values.
8577	 */
8578	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8579	if (unlikely(!rc))
8580		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8581				"0377 Error %d parsing vpd. "
8582				"Using defaults.\n", rc);
8583	kfree(vpd);
8584
8585	/* Save information as VPD data */
8586	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8587	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8588
8589	/*
8590	 * This is because first G7 ASIC doesn't support the standard
8591	 * 0x5a NVME cmd descriptor type/subtype
8592	 */
8593	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8594			LPFC_SLI_INTF_IF_TYPE_6) &&
8595	    (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8596	    (phba->vpd.rev.smRev == 0) &&
8597	    (phba->cfg_nvme_embed_cmd == 1))
8598		phba->cfg_nvme_embed_cmd = 0;
8599
8600	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8601	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8602					 &mqe->un.read_rev);
8603	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8604				       &mqe->un.read_rev);
8605	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8606					    &mqe->un.read_rev);
8607	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8608					   &mqe->un.read_rev);
8609	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8610	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8611	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8612	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8613	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8614	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8615	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8616			"(%d):0380 READ_REV Status x%x "
8617			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8618			mboxq->vport ? mboxq->vport->vpi : 0,
8619			bf_get(lpfc_mqe_status, mqe),
8620			phba->vpd.rev.opFwName,
8621			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8622			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8623
8624	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8625	    LPFC_SLI_INTF_IF_TYPE_0) {
8626		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8627		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8628		if (rc == MBX_SUCCESS) {
8629			phba->hba_flag |= HBA_RECOVERABLE_UE;
8630			/* Set 1Sec interval to detect UE */
8631			phba->eratt_poll_interval = 1;
8632			phba->sli4_hba.ue_to_sr = bf_get(
8633					lpfc_mbx_set_feature_UESR,
8634					&mboxq->u.mqe.un.set_feature);
8635			phba->sli4_hba.ue_to_rp = bf_get(
8636					lpfc_mbx_set_feature_UERP,
8637					&mboxq->u.mqe.un.set_feature);
8638		}
8639	}
8640
8641	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8642		/* Enable MDS Diagnostics only if the SLI Port supports it */
8643		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8644		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8645		if (rc != MBX_SUCCESS)
8646			phba->mds_diags_support = 0;
8647	}
8648
8649	/*
8650	 * Discover the port's supported feature set and match it against the
8651	 * hosts requests.
8652	 */
8653	lpfc_request_features(phba, mboxq);
8654	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8655	if (unlikely(rc)) {
8656		rc = -EIO;
8657		goto out_free_mbox;
8658	}
8659
8660	/* Disable VMID if app header is not supported */
8661	if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8662						  &mqe->un.req_ftrs))) {
8663		bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8664		phba->cfg_vmid_app_header = 0;
8665		lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8666				"1242 vmid feature not supported\n");
8667	}
8668
8669	/*
8670	 * The port must support FCP initiator mode as this is the
8671	 * only mode running in the host.
8672	 */
8673	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8674		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8675				"0378 No support for fcpi mode.\n");
8676		ftr_rsp++;
8677	}
8678
8679	/* Performance Hints are ONLY for FCoE */
8680	if (phba->hba_flag & HBA_FCOE_MODE) {
8681		if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8682			phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8683		else
8684			phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8685	}
8686
8687	/*
8688	 * If the port cannot support the host's requested features
8689	 * then turn off the global config parameters to disable the
8690	 * feature in the driver.  This is not a fatal error.
8691	 */
8692	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8693		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8694			phba->cfg_enable_bg = 0;
8695			phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8696			ftr_rsp++;
8697		}
8698	}
8699
8700	if (phba->max_vpi && phba->cfg_enable_npiv &&
8701	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8702		ftr_rsp++;
8703
8704	if (ftr_rsp) {
8705		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8706				"0379 Feature Mismatch Data: x%08x %08x "
8707				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8708				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8709				phba->cfg_enable_npiv, phba->max_vpi);
8710		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8711			phba->cfg_enable_bg = 0;
8712		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8713			phba->cfg_enable_npiv = 0;
8714	}
8715
8716	/* These SLI3 features are assumed in SLI4 */
8717	spin_lock_irq(&phba->hbalock);
8718	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8719	spin_unlock_irq(&phba->hbalock);
8720
8721	/* Always try to enable dual dump feature if we can */
8722	lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8723	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8724	dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8725	if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8726		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8727				"6448 Dual Dump is enabled\n");
8728	else
8729		lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8730				"6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8731				"rc:x%x dd:x%x\n",
8732				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8733				lpfc_sli_config_mbox_subsys_get(
8734					phba, mboxq),
8735				lpfc_sli_config_mbox_opcode_get(
8736					phba, mboxq),
8737				rc, dd);
8738	/*
8739	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8740	 * calls depends on these resources to complete port setup.
8741	 */
8742	rc = lpfc_sli4_alloc_resource_identifiers(phba);
8743	if (rc) {
8744		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8745				"2920 Failed to alloc Resource IDs "
8746				"rc = x%x\n", rc);
8747		goto out_free_mbox;
8748	}
8749
8750	lpfc_set_host_data(phba, mboxq);
8751
8752	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8753	if (rc) {
8754		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8755				"2134 Failed to set host os driver version %x",
8756				rc);
8757	}
8758
8759	/* Read the port's service parameters. */
8760	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8761	if (rc) {
8762		phba->link_state = LPFC_HBA_ERROR;
8763		rc = -ENOMEM;
8764		goto out_free_mbox;
8765	}
8766
8767	mboxq->vport = vport;
8768	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8769	mp = mboxq->ctx_buf;
8770	if (rc == MBX_SUCCESS) {
8771		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8772		rc = 0;
8773	}
8774
8775	/*
8776	 * This memory was allocated by the lpfc_read_sparam routine but is
8777	 * no longer needed.  It is released and ctx_buf NULLed to prevent
8778	 * unintended pointer access as the mbox is reused.
8779	 */
8780	lpfc_mbuf_free(phba, mp->virt, mp->phys);
8781	kfree(mp);
8782	mboxq->ctx_buf = NULL;
8783	if (unlikely(rc)) {
8784		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8785				"0382 READ_SPARAM command failed "
8786				"status %d, mbxStatus x%x\n",
8787				rc, bf_get(lpfc_mqe_status, mqe));
8788		phba->link_state = LPFC_HBA_ERROR;
8789		rc = -EIO;
8790		goto out_free_mbox;
8791	}
8792
8793	lpfc_update_vport_wwn(vport);
8794
8795	/* Update the fc_host data structures with new wwn. */
8796	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8797	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8798
8799	/* Create all the SLI4 queues */
8800	rc = lpfc_sli4_queue_create(phba);
8801	if (rc) {
8802		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8803				"3089 Failed to allocate queues\n");
8804		rc = -ENODEV;
8805		goto out_free_mbox;
8806	}
8807	/* Set up all the queues to the device */
8808	rc = lpfc_sli4_queue_setup(phba);
8809	if (unlikely(rc)) {
8810		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8811				"0381 Error %d during queue setup.\n ", rc);
8812		goto out_stop_timers;
8813	}
8814	/* Initialize the driver internal SLI layer lists. */
8815	lpfc_sli4_setup(phba);
8816	lpfc_sli4_queue_init(phba);
8817
8818	/* update host els xri-sgl sizes and mappings */
8819	rc = lpfc_sli4_els_sgl_update(phba);
8820	if (unlikely(rc)) {
8821		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8822				"1400 Failed to update xri-sgl size and "
8823				"mapping: %d\n", rc);
8824		goto out_destroy_queue;
8825	}
8826
8827	/* register the els sgl pool to the port */
8828	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8829				       phba->sli4_hba.els_xri_cnt);
8830	if (unlikely(rc < 0)) {
8831		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8832				"0582 Error %d during els sgl post "
8833				"operation\n", rc);
8834		rc = -ENODEV;
8835		goto out_destroy_queue;
8836	}
8837	phba->sli4_hba.els_xri_cnt = rc;
8838
8839	if (phba->nvmet_support) {
8840		/* update host nvmet xri-sgl sizes and mappings */
8841		rc = lpfc_sli4_nvmet_sgl_update(phba);
8842		if (unlikely(rc)) {
8843			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8844					"6308 Failed to update nvmet-sgl size "
8845					"and mapping: %d\n", rc);
8846			goto out_destroy_queue;
8847		}
8848
8849		/* register the nvmet sgl pool to the port */
8850		rc = lpfc_sli4_repost_sgl_list(
8851			phba,
8852			&phba->sli4_hba.lpfc_nvmet_sgl_list,
8853			phba->sli4_hba.nvmet_xri_cnt);
8854		if (unlikely(rc < 0)) {
8855			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8856					"3117 Error %d during nvmet "
8857					"sgl post\n", rc);
8858			rc = -ENODEV;
8859			goto out_destroy_queue;
8860		}
8861		phba->sli4_hba.nvmet_xri_cnt = rc;
8862
8863		/* We allocate an iocbq for every receive context SGL.
8864		 * The additional allocation is for abort and ls handling.
8865		 */
8866		cnt = phba->sli4_hba.nvmet_xri_cnt +
8867			phba->sli4_hba.max_cfg_param.max_xri;
8868	} else {
8869		/* update host common xri-sgl sizes and mappings */
8870		rc = lpfc_sli4_io_sgl_update(phba);
8871		if (unlikely(rc)) {
8872			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8873					"6082 Failed to update nvme-sgl size "
8874					"and mapping: %d\n", rc);
8875			goto out_destroy_queue;
8876		}
8877
8878		/* register the allocated common sgl pool to the port */
8879		rc = lpfc_sli4_repost_io_sgl_list(phba);
8880		if (unlikely(rc)) {
8881			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8882					"6116 Error %d during nvme sgl post "
8883					"operation\n", rc);
8884			/* Some NVME buffers were moved to abort nvme list */
8885			/* A pci function reset will repost them */
8886			rc = -ENODEV;
8887			goto out_destroy_queue;
8888		}
8889		/* Each lpfc_io_buf job structure has an iocbq element.
8890		 * This cnt provides for abort, els, ct and ls requests.
8891		 */
8892		cnt = phba->sli4_hba.max_cfg_param.max_xri;
8893	}
8894
8895	if (!phba->sli.iocbq_lookup) {
8896		/* Initialize and populate the iocb list per host */
8897		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8898				"2821 initialize iocb list with %d entries\n",
8899				cnt);
8900		rc = lpfc_init_iocb_list(phba, cnt);
8901		if (rc) {
8902			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8903					"1413 Failed to init iocb list.\n");
8904			goto out_destroy_queue;
8905		}
8906	}
8907
8908	if (phba->nvmet_support)
8909		lpfc_nvmet_create_targetport(phba);
8910
8911	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8912		/* Post initial buffers to all RQs created */
8913		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8914			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8915			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8916			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8917			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8918			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8919			rqbp->buffer_count = 0;
8920
8921			lpfc_post_rq_buffer(
8922				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8923				phba->sli4_hba.nvmet_mrq_data[i],
8924				phba->cfg_nvmet_mrq_post, i);
8925		}
8926	}
8927
8928	/* Post the rpi header region to the device. */
8929	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8930	if (unlikely(rc)) {
8931		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8932				"0393 Error %d during rpi post operation\n",
8933				rc);
8934		rc = -ENODEV;
8935		goto out_free_iocblist;
8936	}
8937	lpfc_sli4_node_prep(phba);
8938
8939	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8940		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8941			/*
8942			 * The FC Port needs to register FCFI (index 0)
8943			 */
8944			lpfc_reg_fcfi(phba, mboxq);
8945			mboxq->vport = phba->pport;
8946			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8947			if (rc != MBX_SUCCESS)
8948				goto out_unset_queue;
8949			rc = 0;
8950			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8951						&mboxq->u.mqe.un.reg_fcfi);
8952		} else {
8953			/* We are a NVME Target mode with MRQ > 1 */
8954
8955			/* First register the FCFI */
8956			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8957			mboxq->vport = phba->pport;
8958			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8959			if (rc != MBX_SUCCESS)
8960				goto out_unset_queue;
8961			rc = 0;
8962			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8963						&mboxq->u.mqe.un.reg_fcfi_mrq);
8964
8965			/* Next register the MRQs */
8966			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8967			mboxq->vport = phba->pport;
8968			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8969			if (rc != MBX_SUCCESS)
8970				goto out_unset_queue;
8971			rc = 0;
8972		}
8973		/* Check if the port is configured to be disabled */
8974		lpfc_sli_read_link_ste(phba);
8975	}
8976
8977	/* Don't post more new bufs if repost already recovered
8978	 * the nvme sgls.
8979	 */
8980	if (phba->nvmet_support == 0) {
8981		if (phba->sli4_hba.io_xri_cnt == 0) {
8982			len = lpfc_new_io_buf(
8983					      phba, phba->sli4_hba.io_xri_max);
8984			if (len == 0) {
8985				rc = -ENOMEM;
8986				goto out_unset_queue;
8987			}
8988
8989			if (phba->cfg_xri_rebalancing)
8990				lpfc_create_multixri_pools(phba);
8991		}
8992	} else {
8993		phba->cfg_xri_rebalancing = 0;
8994	}
8995
8996	/* Allow asynchronous mailbox command to go through */
8997	spin_lock_irq(&phba->hbalock);
8998	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8999	spin_unlock_irq(&phba->hbalock);
9000
9001	/* Post receive buffers to the device */
9002	lpfc_sli4_rb_setup(phba);
9003
9004	/* Reset HBA FCF states after HBA reset */
9005	phba->fcf.fcf_flag = 0;
9006	phba->fcf.current_rec.flag = 0;
9007
9008	/* Start the ELS watchdog timer */
9009	mod_timer(&vport->els_tmofunc,
9010		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9011
9012	/* Start heart beat timer */
9013	mod_timer(&phba->hb_tmofunc,
9014		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9015	phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
9016	phba->last_completion_time = jiffies;
9017
9018	/* start eq_delay heartbeat */
9019	if (phba->cfg_auto_imax)
9020		queue_delayed_work(phba->wq, &phba->eq_delay_work,
9021				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9022
9023	/* start per phba idle_stat_delay heartbeat */
9024	lpfc_init_idle_stat_hb(phba);
9025
9026	/* Start error attention (ERATT) polling timer */
9027	mod_timer(&phba->eratt_poll,
9028		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9029
9030	/*
9031	 * The port is ready, set the host's link state to LINK_DOWN
9032	 * in preparation for link interrupts.
9033	 */
9034	spin_lock_irq(&phba->hbalock);
9035	phba->link_state = LPFC_LINK_DOWN;
9036
9037	/* Check if physical ports are trunked */
9038	if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9039		phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9040	if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9041		phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9042	if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9043		phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9044	if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9045		phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9046	spin_unlock_irq(&phba->hbalock);
9047
9048	/* Arm the CQs and then EQs on device */
9049	lpfc_sli4_arm_cqeq_intr(phba);
9050
9051	/* Indicate device interrupt mode */
9052	phba->sli4_hba.intr_enable = 1;
9053
9054	/* Setup CMF after HBA is initialized */
9055	lpfc_cmf_setup(phba);
9056
9057	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
9058	    (phba->hba_flag & LINK_DISABLED)) {
9059		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9060				"3103 Adapter Link is disabled.\n");
9061		lpfc_down_link(phba, mboxq);
9062		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9063		if (rc != MBX_SUCCESS) {
9064			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9065					"3104 Adapter failed to issue "
9066					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
9067			goto out_io_buff_free;
9068		}
9069	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9070		/* don't perform init_link on SLI4 FC port loopback test */
9071		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9072			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9073			if (rc)
9074				goto out_io_buff_free;
9075		}
9076	}
9077	mempool_free(mboxq, phba->mbox_mem_pool);
9078
9079	/* Enable RAS FW log support */
9080	lpfc_sli4_ras_setup(phba);
9081
9082	phba->hba_flag |= HBA_SETUP;
9083	return rc;
9084
9085out_io_buff_free:
9086	/* Free allocated IO Buffers */
9087	lpfc_io_free(phba);
9088out_unset_queue:
9089	/* Unset all the queues set up in this routine when error out */
9090	lpfc_sli4_queue_unset(phba);
9091out_free_iocblist:
9092	lpfc_free_iocb_list(phba);
9093out_destroy_queue:
9094	lpfc_sli4_queue_destroy(phba);
9095out_stop_timers:
9096	lpfc_stop_hba_timers(phba);
9097out_free_mbox:
9098	mempool_free(mboxq, phba->mbox_mem_pool);
9099	return rc;
9100}
9101
9102/**
9103 * lpfc_mbox_timeout - Timeout call back function for mbox timer
9104 * @t: Context to fetch pointer to hba structure from.
9105 *
9106 * This is the callback function for mailbox timer. The mailbox
9107 * timer is armed when a new mailbox command is issued and the timer
9108 * is deleted when the mailbox complete. The function is called by
9109 * the kernel timer code when a mailbox does not complete within
9110 * expected time. This function wakes up the worker thread to
9111 * process the mailbox timeout and returns. All the processing is
9112 * done by the worker thread function lpfc_mbox_timeout_handler.
9113 **/
9114void
9115lpfc_mbox_timeout(struct timer_list *t)
9116{
9117	struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
9118	unsigned long iflag;
9119	uint32_t tmo_posted;
9120
9121	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9122	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9123	if (!tmo_posted)
9124		phba->pport->work_port_events |= WORKER_MBOX_TMO;
9125	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9126
9127	if (!tmo_posted)
9128		lpfc_worker_wake_up(phba);
9129	return;
9130}
9131
9132/**
9133 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9134 *                                    are pending
9135 * @phba: Pointer to HBA context object.
9136 *
9137 * This function checks if any mailbox completions are present on the mailbox
9138 * completion queue.
9139 **/
9140static bool
9141lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9142{
9143
9144	uint32_t idx;
9145	struct lpfc_queue *mcq;
9146	struct lpfc_mcqe *mcqe;
9147	bool pending_completions = false;
9148	uint8_t	qe_valid;
9149
9150	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9151		return false;
9152
9153	/* Check for completions on mailbox completion queue */
9154
9155	mcq = phba->sli4_hba.mbx_cq;
9156	idx = mcq->hba_index;
9157	qe_valid = mcq->qe_valid;
9158	while (bf_get_le32(lpfc_cqe_valid,
9159	       (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9160		mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9161		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9162		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9163			pending_completions = true;
9164			break;
9165		}
9166		idx = (idx + 1) % mcq->entry_count;
9167		if (mcq->hba_index == idx)
9168			break;
9169
9170		/* if the index wrapped around, toggle the valid bit */
9171		if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9172			qe_valid = (qe_valid) ? 0 : 1;
9173	}
9174	return pending_completions;
9175
9176}
9177
9178/**
9179 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9180 *					      that were missed.
9181 * @phba: Pointer to HBA context object.
9182 *
9183 * For sli4, it is possible to miss an interrupt. As such mbox completions
9184 * maybe missed causing erroneous mailbox timeouts to occur. This function
9185 * checks to see if mbox completions are on the mailbox completion queue
9186 * and will process all the completions associated with the eq for the
9187 * mailbox completion queue.
9188 **/
9189static bool
9190lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9191{
9192	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9193	uint32_t eqidx;
9194	struct lpfc_queue *fpeq = NULL;
9195	struct lpfc_queue *eq;
9196	bool mbox_pending;
9197
9198	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9199		return false;
9200
9201	/* Find the EQ associated with the mbox CQ */
9202	if (sli4_hba->hdwq) {
9203		for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9204			eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9205			if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9206				fpeq = eq;
9207				break;
9208			}
9209		}
9210	}
9211	if (!fpeq)
9212		return false;
9213
9214	/* Turn off interrupts from this EQ */
9215
9216	sli4_hba->sli4_eq_clr_intr(fpeq);
9217
9218	/* Check to see if a mbox completion is pending */
9219
9220	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9221
9222	/*
9223	 * If a mbox completion is pending, process all the events on EQ
9224	 * associated with the mbox completion queue (this could include
9225	 * mailbox commands, async events, els commands, receive queue data
9226	 * and fcp commands)
9227	 */
9228
9229	if (mbox_pending)
9230		/* process and rearm the EQ */
9231		lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
9232				     LPFC_QUEUE_WORK);
9233	else
9234		/* Always clear and re-arm the EQ */
9235		sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9236
9237	return mbox_pending;
9238
9239}
9240
9241/**
9242 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9243 * @phba: Pointer to HBA context object.
9244 *
9245 * This function is called from worker thread when a mailbox command times out.
9246 * The caller is not required to hold any locks. This function will reset the
9247 * HBA and recover all the pending commands.
9248 **/
9249void
9250lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9251{
9252	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9253	MAILBOX_t *mb = NULL;
9254
9255	struct lpfc_sli *psli = &phba->sli;
9256
9257	/* If the mailbox completed, process the completion */
9258	lpfc_sli4_process_missed_mbox_completions(phba);
9259
9260	if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9261		return;
9262
9263	if (pmbox != NULL)
9264		mb = &pmbox->u.mb;
9265	/* Check the pmbox pointer first.  There is a race condition
9266	 * between the mbox timeout handler getting executed in the
9267	 * worklist and the mailbox actually completing. When this
9268	 * race condition occurs, the mbox_active will be NULL.
9269	 */
9270	spin_lock_irq(&phba->hbalock);
9271	if (pmbox == NULL) {
9272		lpfc_printf_log(phba, KERN_WARNING,
9273				LOG_MBOX | LOG_SLI,
9274				"0353 Active Mailbox cleared - mailbox timeout "
9275				"exiting\n");
9276		spin_unlock_irq(&phba->hbalock);
9277		return;
9278	}
9279
9280	/* Mbox cmd <mbxCommand> timeout */
9281	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9282			"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9283			mb->mbxCommand,
9284			phba->pport->port_state,
9285			phba->sli.sli_flag,
9286			phba->sli.mbox_active);
9287	spin_unlock_irq(&phba->hbalock);
9288
9289	/* Setting state unknown so lpfc_sli_abort_iocb_ring
9290	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9291	 * it to fail all outstanding SCSI IO.
9292	 */
9293	set_bit(MBX_TMO_ERR, &phba->bit_flags);
9294	spin_lock_irq(&phba->pport->work_port_lock);
9295	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9296	spin_unlock_irq(&phba->pport->work_port_lock);
9297	spin_lock_irq(&phba->hbalock);
9298	phba->link_state = LPFC_LINK_UNKNOWN;
9299	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9300	spin_unlock_irq(&phba->hbalock);
9301
9302	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9303			"0345 Resetting board due to mailbox timeout\n");
9304
9305	/* Reset the HBA device */
9306	lpfc_reset_hba(phba);
9307}
9308
9309/**
9310 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9311 * @phba: Pointer to HBA context object.
9312 * @pmbox: Pointer to mailbox object.
9313 * @flag: Flag indicating how the mailbox need to be processed.
9314 *
9315 * This function is called by discovery code and HBA management code
9316 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9317 * function gets the hbalock to protect the data structures.
9318 * The mailbox command can be submitted in polling mode, in which case
9319 * this function will wait in a polling loop for the completion of the
9320 * mailbox.
9321 * If the mailbox is submitted in no_wait mode (not polling) the
9322 * function will submit the command and returns immediately without waiting
9323 * for the mailbox completion. The no_wait is supported only when HBA
9324 * is in SLI2/SLI3 mode - interrupts are enabled.
9325 * The SLI interface allows only one mailbox pending at a time. If the
9326 * mailbox is issued in polling mode and there is already a mailbox
9327 * pending, then the function will return an error. If the mailbox is issued
9328 * in NO_WAIT mode and there is a mailbox pending already, the function
9329 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9330 * The sli layer owns the mailbox object until the completion of mailbox
9331 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9332 * return codes the caller owns the mailbox command after the return of
9333 * the function.
9334 **/
9335static int
9336lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9337		       uint32_t flag)
9338{
9339	MAILBOX_t *mbx;
9340	struct lpfc_sli *psli = &phba->sli;
9341	uint32_t status, evtctr;
9342	uint32_t ha_copy, hc_copy;
9343	int i;
9344	unsigned long timeout;
9345	unsigned long drvr_flag = 0;
9346	uint32_t word0, ldata;
9347	void __iomem *to_slim;
9348	int processing_queue = 0;
9349
9350	spin_lock_irqsave(&phba->hbalock, drvr_flag);
9351	if (!pmbox) {
9352		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9353		/* processing mbox queue from intr_handler */
9354		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9355			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9356			return MBX_SUCCESS;
9357		}
9358		processing_queue = 1;
9359		pmbox = lpfc_mbox_get(phba);
9360		if (!pmbox) {
9361			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9362			return MBX_SUCCESS;
9363		}
9364	}
9365
9366	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9367		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9368		if(!pmbox->vport) {
9369			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9370			lpfc_printf_log(phba, KERN_ERR,
9371					LOG_MBOX | LOG_VPORT,
9372					"1806 Mbox x%x failed. No vport\n",
9373					pmbox->u.mb.mbxCommand);
9374			dump_stack();
9375			goto out_not_finished;
9376		}
9377	}
9378
9379	/* If the PCI channel is in offline state, do not post mbox. */
9380	if (unlikely(pci_channel_offline(phba->pcidev))) {
9381		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9382		goto out_not_finished;
9383	}
9384
9385	/* If HBA has a deferred error attention, fail the iocb. */
9386	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9387		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9388		goto out_not_finished;
9389	}
9390
9391	psli = &phba->sli;
9392
9393	mbx = &pmbox->u.mb;
9394	status = MBX_SUCCESS;
9395
9396	if (phba->link_state == LPFC_HBA_ERROR) {
9397		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9398
9399		/* Mbox command <mbxCommand> cannot issue */
9400		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9401				"(%d):0311 Mailbox command x%x cannot "
9402				"issue Data: x%x x%x\n",
9403				pmbox->vport ? pmbox->vport->vpi : 0,
9404				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9405		goto out_not_finished;
9406	}
9407
9408	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9409		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9410			!(hc_copy & HC_MBINT_ENA)) {
9411			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9412			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9413				"(%d):2528 Mailbox command x%x cannot "
9414				"issue Data: x%x x%x\n",
9415				pmbox->vport ? pmbox->vport->vpi : 0,
9416				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9417			goto out_not_finished;
9418		}
9419	}
9420
9421	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9422		/* Polling for a mbox command when another one is already active
9423		 * is not allowed in SLI. Also, the driver must have established
9424		 * SLI2 mode to queue and process multiple mbox commands.
9425		 */
9426
9427		if (flag & MBX_POLL) {
9428			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9429
9430			/* Mbox command <mbxCommand> cannot issue */
9431			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9432					"(%d):2529 Mailbox command x%x "
9433					"cannot issue Data: x%x x%x\n",
9434					pmbox->vport ? pmbox->vport->vpi : 0,
9435					pmbox->u.mb.mbxCommand,
9436					psli->sli_flag, flag);
9437			goto out_not_finished;
9438		}
9439
9440		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9441			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9442			/* Mbox command <mbxCommand> cannot issue */
9443			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9444					"(%d):2530 Mailbox command x%x "
9445					"cannot issue Data: x%x x%x\n",
9446					pmbox->vport ? pmbox->vport->vpi : 0,
9447					pmbox->u.mb.mbxCommand,
9448					psli->sli_flag, flag);
9449			goto out_not_finished;
9450		}
9451
9452		/* Another mailbox command is still being processed, queue this
9453		 * command to be processed later.
9454		 */
9455		lpfc_mbox_put(phba, pmbox);
9456
9457		/* Mbox cmd issue - BUSY */
9458		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9459				"(%d):0308 Mbox cmd issue - BUSY Data: "
9460				"x%x x%x x%x x%x\n",
9461				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9462				mbx->mbxCommand,
9463				phba->pport ? phba->pport->port_state : 0xff,
9464				psli->sli_flag, flag);
9465
9466		psli->slistat.mbox_busy++;
9467		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9468
9469		if (pmbox->vport) {
9470			lpfc_debugfs_disc_trc(pmbox->vport,
9471				LPFC_DISC_TRC_MBOX_VPORT,
9472				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9473				(uint32_t)mbx->mbxCommand,
9474				mbx->un.varWords[0], mbx->un.varWords[1]);
9475		}
9476		else {
9477			lpfc_debugfs_disc_trc(phba->pport,
9478				LPFC_DISC_TRC_MBOX,
9479				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
9480				(uint32_t)mbx->mbxCommand,
9481				mbx->un.varWords[0], mbx->un.varWords[1]);
9482		}
9483
9484		return MBX_BUSY;
9485	}
9486
9487	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9488
9489	/* If we are not polling, we MUST be in SLI2 mode */
9490	if (flag != MBX_POLL) {
9491		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9492		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
9493			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9494			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9495			/* Mbox command <mbxCommand> cannot issue */
9496			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9497					"(%d):2531 Mailbox command x%x "
9498					"cannot issue Data: x%x x%x\n",
9499					pmbox->vport ? pmbox->vport->vpi : 0,
9500					pmbox->u.mb.mbxCommand,
9501					psli->sli_flag, flag);
9502			goto out_not_finished;
9503		}
9504		/* timeout active mbox command */
9505		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9506					   1000);
9507		mod_timer(&psli->mbox_tmo, jiffies + timeout);
9508	}
9509
9510	/* Mailbox cmd <cmd> issue */
9511	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9512			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9513			"x%x\n",
9514			pmbox->vport ? pmbox->vport->vpi : 0,
9515			mbx->mbxCommand,
9516			phba->pport ? phba->pport->port_state : 0xff,
9517			psli->sli_flag, flag);
9518
9519	if (mbx->mbxCommand != MBX_HEARTBEAT) {
9520		if (pmbox->vport) {
9521			lpfc_debugfs_disc_trc(pmbox->vport,
9522				LPFC_DISC_TRC_MBOX_VPORT,
9523				"MBOX Send vport: cmd:x%x mb:x%x x%x",
9524				(uint32_t)mbx->mbxCommand,
9525				mbx->un.varWords[0], mbx->un.varWords[1]);
9526		}
9527		else {
9528			lpfc_debugfs_disc_trc(phba->pport,
9529				LPFC_DISC_TRC_MBOX,
9530				"MBOX Send:       cmd:x%x mb:x%x x%x",
9531				(uint32_t)mbx->mbxCommand,
9532				mbx->un.varWords[0], mbx->un.varWords[1]);
9533		}
9534	}
9535
9536	psli->slistat.mbox_cmd++;
9537	evtctr = psli->slistat.mbox_event;
9538
9539	/* next set own bit for the adapter and copy over command word */
9540	mbx->mbxOwner = OWN_CHIP;
9541
9542	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9543		/* Populate mbox extension offset word. */
9544		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9545			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9546				= (uint8_t *)phba->mbox_ext
9547				  - (uint8_t *)phba->mbox;
9548		}
9549
9550		/* Copy the mailbox extension data */
9551		if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
9552			lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
9553					      (uint8_t *)phba->mbox_ext,
9554					      pmbox->in_ext_byte_len);
9555		}
9556		/* Copy command data to host SLIM area */
9557		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9558	} else {
9559		/* Populate mbox extension offset word. */
9560		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9561			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9562				= MAILBOX_HBA_EXT_OFFSET;
9563
9564		/* Copy the mailbox extension data */
9565		if (pmbox->in_ext_byte_len && pmbox->ext_buf)
9566			lpfc_memcpy_to_slim(phba->MBslimaddr +
9567				MAILBOX_HBA_EXT_OFFSET,
9568				pmbox->ext_buf, pmbox->in_ext_byte_len);
9569
9570		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9571			/* copy command data into host mbox for cmpl */
9572			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9573					      MAILBOX_CMD_SIZE);
9574
9575		/* First copy mbox command data to HBA SLIM, skip past first
9576		   word */
9577		to_slim = phba->MBslimaddr + sizeof (uint32_t);
9578		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9579			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
9580
9581		/* Next copy over first word, with mbxOwner set */
9582		ldata = *((uint32_t *)mbx);
9583		to_slim = phba->MBslimaddr;
9584		writel(ldata, to_slim);
9585		readl(to_slim); /* flush */
9586
9587		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9588			/* switch over to host mailbox */
9589			psli->sli_flag |= LPFC_SLI_ACTIVE;
9590	}
9591
9592	wmb();
9593
9594	switch (flag) {
9595	case MBX_NOWAIT:
9596		/* Set up reference to mailbox command */
9597		psli->mbox_active = pmbox;
9598		/* Interrupt board to do it */
9599		writel(CA_MBATT, phba->CAregaddr);
9600		readl(phba->CAregaddr); /* flush */
9601		/* Don't wait for it to finish, just return */
9602		break;
9603
9604	case MBX_POLL:
9605		/* Set up null reference to mailbox command */
9606		psli->mbox_active = NULL;
9607		/* Interrupt board to do it */
9608		writel(CA_MBATT, phba->CAregaddr);
9609		readl(phba->CAregaddr); /* flush */
9610
9611		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9612			/* First read mbox status word */
9613			word0 = *((uint32_t *)phba->mbox);
9614			word0 = le32_to_cpu(word0);
9615		} else {
9616			/* First read mbox status word */
9617			if (lpfc_readl(phba->MBslimaddr, &word0)) {
9618				spin_unlock_irqrestore(&phba->hbalock,
9619						       drvr_flag);
9620				goto out_not_finished;
9621			}
9622		}
9623
9624		/* Read the HBA Host Attention Register */
9625		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9626			spin_unlock_irqrestore(&phba->hbalock,
9627						       drvr_flag);
9628			goto out_not_finished;
9629		}
9630		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9631							1000) + jiffies;
9632		i = 0;
9633		/* Wait for command to complete */
9634		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9635		       (!(ha_copy & HA_MBATT) &&
9636			(phba->link_state > LPFC_WARM_START))) {
9637			if (time_after(jiffies, timeout)) {
9638				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9639				spin_unlock_irqrestore(&phba->hbalock,
9640						       drvr_flag);
9641				goto out_not_finished;
9642			}
9643
9644			/* Check if we took a mbox interrupt while we were
9645			   polling */
9646			if (((word0 & OWN_CHIP) != OWN_CHIP)
9647			    && (evtctr != psli->slistat.mbox_event))
9648				break;
9649
9650			if (i++ > 10) {
9651				spin_unlock_irqrestore(&phba->hbalock,
9652						       drvr_flag);
9653				msleep(1);
9654				spin_lock_irqsave(&phba->hbalock, drvr_flag);
9655			}
9656
9657			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9658				/* First copy command data */
9659				word0 = *((uint32_t *)phba->mbox);
9660				word0 = le32_to_cpu(word0);
9661				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9662					MAILBOX_t *slimmb;
9663					uint32_t slimword0;
9664					/* Check real SLIM for any errors */
9665					slimword0 = readl(phba->MBslimaddr);
9666					slimmb = (MAILBOX_t *) & slimword0;
9667					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9668					    && slimmb->mbxStatus) {
9669						psli->sli_flag &=
9670						    ~LPFC_SLI_ACTIVE;
9671						word0 = slimword0;
9672					}
9673				}
9674			} else {
9675				/* First copy command data */
9676				word0 = readl(phba->MBslimaddr);
9677			}
9678			/* Read the HBA Host Attention Register */
9679			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9680				spin_unlock_irqrestore(&phba->hbalock,
9681						       drvr_flag);
9682				goto out_not_finished;
9683			}
9684		}
9685
9686		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9687			/* copy results back to user */
9688			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9689						MAILBOX_CMD_SIZE);
9690			/* Copy the mailbox extension data */
9691			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9692				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9693						      pmbox->ext_buf,
9694						      pmbox->out_ext_byte_len);
9695			}
9696		} else {
9697			/* First copy command data */
9698			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9699						MAILBOX_CMD_SIZE);
9700			/* Copy the mailbox extension data */
9701			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9702				lpfc_memcpy_from_slim(
9703					pmbox->ext_buf,
9704					phba->MBslimaddr +
9705					MAILBOX_HBA_EXT_OFFSET,
9706					pmbox->out_ext_byte_len);
9707			}
9708		}
9709
9710		writel(HA_MBATT, phba->HAregaddr);
9711		readl(phba->HAregaddr); /* flush */
9712
9713		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9714		status = mbx->mbxStatus;
9715	}
9716
9717	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9718	return status;
9719
9720out_not_finished:
9721	if (processing_queue) {
9722		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9723		lpfc_mbox_cmpl_put(phba, pmbox);
9724	}
9725	return MBX_NOT_FINISHED;
9726}
9727
9728/**
9729 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9730 * @phba: Pointer to HBA context object.
9731 *
9732 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9733 * the driver internal pending mailbox queue. It will then try to wait out the
9734 * possible outstanding mailbox command before return.
9735 *
9736 * Returns:
9737 * 	0 - the outstanding mailbox command completed; otherwise, the wait for
9738 * 	the outstanding mailbox command timed out.
9739 **/
9740static int
9741lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9742{
9743	struct lpfc_sli *psli = &phba->sli;
9744	LPFC_MBOXQ_t *mboxq;
9745	int rc = 0;
9746	unsigned long timeout = 0;
9747	u32 sli_flag;
9748	u8 cmd, subsys, opcode;
9749
9750	/* Mark the asynchronous mailbox command posting as blocked */
9751	spin_lock_irq(&phba->hbalock);
9752	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9753	/* Determine how long we might wait for the active mailbox
9754	 * command to be gracefully completed by firmware.
9755	 */
9756	if (phba->sli.mbox_active)
9757		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9758						phba->sli.mbox_active) *
9759						1000) + jiffies;
9760	spin_unlock_irq(&phba->hbalock);
9761
9762	/* Make sure the mailbox is really active */
9763	if (timeout)
9764		lpfc_sli4_process_missed_mbox_completions(phba);
9765
9766	/* Wait for the outstanding mailbox command to complete */
9767	while (phba->sli.mbox_active) {
9768		/* Check active mailbox complete status every 2ms */
9769		msleep(2);
9770		if (time_after(jiffies, timeout)) {
9771			/* Timeout, mark the outstanding cmd not complete */
9772
9773			/* Sanity check sli.mbox_active has not completed or
9774			 * cancelled from another context during last 2ms sleep,
9775			 * so take hbalock to be sure before logging.
9776			 */
9777			spin_lock_irq(&phba->hbalock);
9778			if (phba->sli.mbox_active) {
9779				mboxq = phba->sli.mbox_active;
9780				cmd = mboxq->u.mb.mbxCommand;
9781				subsys = lpfc_sli_config_mbox_subsys_get(phba,
9782									 mboxq);
9783				opcode = lpfc_sli_config_mbox_opcode_get(phba,
9784									 mboxq);
9785				sli_flag = psli->sli_flag;
9786				spin_unlock_irq(&phba->hbalock);
9787				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9788						"2352 Mailbox command x%x "
9789						"(x%x/x%x) sli_flag x%x could "
9790						"not complete\n",
9791						cmd, subsys, opcode,
9792						sli_flag);
9793			} else {
9794				spin_unlock_irq(&phba->hbalock);
9795			}
9796
9797			rc = 1;
9798			break;
9799		}
9800	}
9801
9802	/* Can not cleanly block async mailbox command, fails it */
9803	if (rc) {
9804		spin_lock_irq(&phba->hbalock);
9805		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9806		spin_unlock_irq(&phba->hbalock);
9807	}
9808	return rc;
9809}
9810
9811/**
9812 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9813 * @phba: Pointer to HBA context object.
9814 *
9815 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9816 * commands from the driver internal pending mailbox queue. It makes sure
9817 * that there is no outstanding mailbox command before resuming posting
9818 * asynchronous mailbox commands. If, for any reason, there is outstanding
9819 * mailbox command, it will try to wait it out before resuming asynchronous
9820 * mailbox command posting.
9821 **/
9822static void
9823lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9824{
9825	struct lpfc_sli *psli = &phba->sli;
9826
9827	spin_lock_irq(&phba->hbalock);
9828	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9829		/* Asynchronous mailbox posting is not blocked, do nothing */
9830		spin_unlock_irq(&phba->hbalock);
9831		return;
9832	}
9833
9834	/* Outstanding synchronous mailbox command is guaranteed to be done,
9835	 * successful or timeout, after timing-out the outstanding mailbox
9836	 * command shall always be removed, so just unblock posting async
9837	 * mailbox command and resume
9838	 */
9839	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9840	spin_unlock_irq(&phba->hbalock);
9841
9842	/* wake up worker thread to post asynchronous mailbox command */
9843	lpfc_worker_wake_up(phba);
9844}
9845
9846/**
9847 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9848 * @phba: Pointer to HBA context object.
9849 * @mboxq: Pointer to mailbox object.
9850 *
9851 * The function waits for the bootstrap mailbox register ready bit from
9852 * port for twice the regular mailbox command timeout value.
9853 *
9854 *      0 - no timeout on waiting for bootstrap mailbox register ready.
9855 *      MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9856 *                     is in an unrecoverable state.
9857 **/
9858static int
9859lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9860{
9861	uint32_t db_ready;
9862	unsigned long timeout;
9863	struct lpfc_register bmbx_reg;
9864	struct lpfc_register portstat_reg = {-1};
9865
9866	/* Sanity check - there is no point to wait if the port is in an
9867	 * unrecoverable state.
9868	 */
9869	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9870	    LPFC_SLI_INTF_IF_TYPE_2) {
9871		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9872			       &portstat_reg.word0) ||
9873		    lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9874			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9875					"3858 Skipping bmbx ready because "
9876					"Port Status x%x\n",
9877					portstat_reg.word0);
9878			return MBXERR_ERROR;
9879		}
9880	}
9881
9882	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9883				   * 1000) + jiffies;
9884
9885	do {
9886		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9887		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9888		if (!db_ready)
9889			mdelay(2);
9890
9891		if (time_after(jiffies, timeout))
9892			return MBXERR_ERROR;
9893	} while (!db_ready);
9894
9895	return 0;
9896}
9897
9898/**
9899 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9900 * @phba: Pointer to HBA context object.
9901 * @mboxq: Pointer to mailbox object.
9902 *
9903 * The function posts a mailbox to the port.  The mailbox is expected
9904 * to be comletely filled in and ready for the port to operate on it.
9905 * This routine executes a synchronous completion operation on the
9906 * mailbox by polling for its completion.
9907 *
9908 * The caller must not be holding any locks when calling this routine.
9909 *
9910 * Returns:
9911 *	MBX_SUCCESS - mailbox posted successfully
9912 *	Any of the MBX error values.
9913 **/
9914static int
9915lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9916{
9917	int rc = MBX_SUCCESS;
9918	unsigned long iflag;
9919	uint32_t mcqe_status;
9920	uint32_t mbx_cmnd;
9921	struct lpfc_sli *psli = &phba->sli;
9922	struct lpfc_mqe *mb = &mboxq->u.mqe;
9923	struct lpfc_bmbx_create *mbox_rgn;
9924	struct dma_address *dma_address;
9925
9926	/*
9927	 * Only one mailbox can be active to the bootstrap mailbox region
9928	 * at a time and there is no queueing provided.
9929	 */
9930	spin_lock_irqsave(&phba->hbalock, iflag);
9931	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9932		spin_unlock_irqrestore(&phba->hbalock, iflag);
9933		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9934				"(%d):2532 Mailbox command x%x (x%x/x%x) "
9935				"cannot issue Data: x%x x%x\n",
9936				mboxq->vport ? mboxq->vport->vpi : 0,
9937				mboxq->u.mb.mbxCommand,
9938				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9939				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9940				psli->sli_flag, MBX_POLL);
9941		return MBXERR_ERROR;
9942	}
9943	/* The server grabs the token and owns it until release */
9944	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9945	phba->sli.mbox_active = mboxq;
9946	spin_unlock_irqrestore(&phba->hbalock, iflag);
9947
9948	/* wait for bootstrap mbox register for readyness */
9949	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9950	if (rc)
9951		goto exit;
9952	/*
9953	 * Initialize the bootstrap memory region to avoid stale data areas
9954	 * in the mailbox post.  Then copy the caller's mailbox contents to
9955	 * the bmbx mailbox region.
9956	 */
9957	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9958	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9959	lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9960			       sizeof(struct lpfc_mqe));
9961
9962	/* Post the high mailbox dma address to the port and wait for ready. */
9963	dma_address = &phba->sli4_hba.bmbx.dma_address;
9964	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9965
9966	/* wait for bootstrap mbox register for hi-address write done */
9967	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9968	if (rc)
9969		goto exit;
9970
9971	/* Post the low mailbox dma address to the port. */
9972	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9973
9974	/* wait for bootstrap mbox register for low address write done */
9975	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9976	if (rc)
9977		goto exit;
9978
9979	/*
9980	 * Read the CQ to ensure the mailbox has completed.
9981	 * If so, update the mailbox status so that the upper layers
9982	 * can complete the request normally.
9983	 */
9984	lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9985			       sizeof(struct lpfc_mqe));
9986	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9987	lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9988			       sizeof(struct lpfc_mcqe));
9989	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9990	/*
9991	 * When the CQE status indicates a failure and the mailbox status
9992	 * indicates success then copy the CQE status into the mailbox status
9993	 * (and prefix it with x4000).
9994	 */
9995	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9996		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9997			bf_set(lpfc_mqe_status, mb,
9998			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
9999		rc = MBXERR_ERROR;
10000	} else
10001		lpfc_sli4_swap_str(phba, mboxq);
10002
10003	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10004			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10005			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10006			" x%x x%x CQ: x%x x%x x%x x%x\n",
10007			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10008			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10009			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10010			bf_get(lpfc_mqe_status, mb),
10011			mb->un.mb_words[0], mb->un.mb_words[1],
10012			mb->un.mb_words[2], mb->un.mb_words[3],
10013			mb->un.mb_words[4], mb->un.mb_words[5],
10014			mb->un.mb_words[6], mb->un.mb_words[7],
10015			mb->un.mb_words[8], mb->un.mb_words[9],
10016			mb->un.mb_words[10], mb->un.mb_words[11],
10017			mb->un.mb_words[12], mboxq->mcqe.word0,
10018			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
10019			mboxq->mcqe.trailer);
10020exit:
10021	/* We are holding the token, no needed for lock when release */
10022	spin_lock_irqsave(&phba->hbalock, iflag);
10023	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10024	phba->sli.mbox_active = NULL;
10025	spin_unlock_irqrestore(&phba->hbalock, iflag);
10026	return rc;
10027}
10028
10029/**
10030 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10031 * @phba: Pointer to HBA context object.
10032 * @mboxq: Pointer to mailbox object.
10033 * @flag: Flag indicating how the mailbox need to be processed.
10034 *
10035 * This function is called by discovery code and HBA management code to submit
10036 * a mailbox command to firmware with SLI-4 interface spec.
10037 *
10038 * Return codes the caller owns the mailbox command after the return of the
10039 * function.
10040 **/
10041static int
10042lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10043		       uint32_t flag)
10044{
10045	struct lpfc_sli *psli = &phba->sli;
10046	unsigned long iflags;
10047	int rc;
10048
10049	/* dump from issue mailbox command if setup */
10050	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10051
10052	rc = lpfc_mbox_dev_check(phba);
10053	if (unlikely(rc)) {
10054		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10055				"(%d):2544 Mailbox command x%x (x%x/x%x) "
10056				"cannot issue Data: x%x x%x\n",
10057				mboxq->vport ? mboxq->vport->vpi : 0,
10058				mboxq->u.mb.mbxCommand,
10059				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10060				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10061				psli->sli_flag, flag);
10062		goto out_not_finished;
10063	}
10064
10065	/* Detect polling mode and jump to a handler */
10066	if (!phba->sli4_hba.intr_enable) {
10067		if (flag == MBX_POLL)
10068			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10069		else
10070			rc = -EIO;
10071		if (rc != MBX_SUCCESS)
10072			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10073					"(%d):2541 Mailbox command x%x "
10074					"(x%x/x%x) failure: "
10075					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10076					"Data: x%x x%x\n",
10077					mboxq->vport ? mboxq->vport->vpi : 0,
10078					mboxq->u.mb.mbxCommand,
10079					lpfc_sli_config_mbox_subsys_get(phba,
10080									mboxq),
10081					lpfc_sli_config_mbox_opcode_get(phba,
10082									mboxq),
10083					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10084					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10085					bf_get(lpfc_mcqe_ext_status,
10086					       &mboxq->mcqe),
10087					psli->sli_flag, flag);
10088		return rc;
10089	} else if (flag == MBX_POLL) {
10090		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10091				"(%d):2542 Try to issue mailbox command "
10092				"x%x (x%x/x%x) synchronously ahead of async "
10093				"mailbox command queue: x%x x%x\n",
10094				mboxq->vport ? mboxq->vport->vpi : 0,
10095				mboxq->u.mb.mbxCommand,
10096				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10097				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10098				psli->sli_flag, flag);
10099		/* Try to block the asynchronous mailbox posting */
10100		rc = lpfc_sli4_async_mbox_block(phba);
10101		if (!rc) {
10102			/* Successfully blocked, now issue sync mbox cmd */
10103			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10104			if (rc != MBX_SUCCESS)
10105				lpfc_printf_log(phba, KERN_WARNING,
10106					LOG_MBOX | LOG_SLI,
10107					"(%d):2597 Sync Mailbox command "
10108					"x%x (x%x/x%x) failure: "
10109					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10110					"Data: x%x x%x\n",
10111					mboxq->vport ? mboxq->vport->vpi : 0,
10112					mboxq->u.mb.mbxCommand,
10113					lpfc_sli_config_mbox_subsys_get(phba,
10114									mboxq),
10115					lpfc_sli_config_mbox_opcode_get(phba,
10116									mboxq),
10117					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10118					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10119					bf_get(lpfc_mcqe_ext_status,
10120					       &mboxq->mcqe),
10121					psli->sli_flag, flag);
10122			/* Unblock the async mailbox posting afterward */
10123			lpfc_sli4_async_mbox_unblock(phba);
10124		}
10125		return rc;
10126	}
10127
10128	/* Now, interrupt mode asynchronous mailbox command */
10129	rc = lpfc_mbox_cmd_check(phba, mboxq);
10130	if (rc) {
10131		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10132				"(%d):2543 Mailbox command x%x (x%x/x%x) "
10133				"cannot issue Data: x%x x%x\n",
10134				mboxq->vport ? mboxq->vport->vpi : 0,
10135				mboxq->u.mb.mbxCommand,
10136				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10137				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10138				psli->sli_flag, flag);
10139		goto out_not_finished;
10140	}
10141
10142	/* Put the mailbox command to the driver internal FIFO */
10143	psli->slistat.mbox_busy++;
10144	spin_lock_irqsave(&phba->hbalock, iflags);
10145	lpfc_mbox_put(phba, mboxq);
10146	spin_unlock_irqrestore(&phba->hbalock, iflags);
10147	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10148			"(%d):0354 Mbox cmd issue - Enqueue Data: "
10149			"x%x (x%x/x%x) x%x x%x x%x x%x\n",
10150			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10151			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10152			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10153			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10154			mboxq->u.mb.un.varUnregLogin.rpi,
10155			phba->pport->port_state,
10156			psli->sli_flag, MBX_NOWAIT);
10157	/* Wake up worker thread to transport mailbox command from head */
10158	lpfc_worker_wake_up(phba);
10159
10160	return MBX_BUSY;
10161
10162out_not_finished:
10163	return MBX_NOT_FINISHED;
10164}
10165
10166/**
10167 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10168 * @phba: Pointer to HBA context object.
10169 *
10170 * This function is called by worker thread to send a mailbox command to
10171 * SLI4 HBA firmware.
10172 *
10173 **/
10174int
10175lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10176{
10177	struct lpfc_sli *psli = &phba->sli;
10178	LPFC_MBOXQ_t *mboxq;
10179	int rc = MBX_SUCCESS;
10180	unsigned long iflags;
10181	struct lpfc_mqe *mqe;
10182	uint32_t mbx_cmnd;
10183
10184	/* Check interrupt mode before post async mailbox command */
10185	if (unlikely(!phba->sli4_hba.intr_enable))
10186		return MBX_NOT_FINISHED;
10187
10188	/* Check for mailbox command service token */
10189	spin_lock_irqsave(&phba->hbalock, iflags);
10190	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10191		spin_unlock_irqrestore(&phba->hbalock, iflags);
10192		return MBX_NOT_FINISHED;
10193	}
10194	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10195		spin_unlock_irqrestore(&phba->hbalock, iflags);
10196		return MBX_NOT_FINISHED;
10197	}
10198	if (unlikely(phba->sli.mbox_active)) {
10199		spin_unlock_irqrestore(&phba->hbalock, iflags);
10200		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10201				"0384 There is pending active mailbox cmd\n");
10202		return MBX_NOT_FINISHED;
10203	}
10204	/* Take the mailbox command service token */
10205	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10206
10207	/* Get the next mailbox command from head of queue */
10208	mboxq = lpfc_mbox_get(phba);
10209
10210	/* If no more mailbox command waiting for post, we're done */
10211	if (!mboxq) {
10212		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10213		spin_unlock_irqrestore(&phba->hbalock, iflags);
10214		return MBX_SUCCESS;
10215	}
10216	phba->sli.mbox_active = mboxq;
10217	spin_unlock_irqrestore(&phba->hbalock, iflags);
10218
10219	/* Check device readiness for posting mailbox command */
10220	rc = lpfc_mbox_dev_check(phba);
10221	if (unlikely(rc))
10222		/* Driver clean routine will clean up pending mailbox */
10223		goto out_not_finished;
10224
10225	/* Prepare the mbox command to be posted */
10226	mqe = &mboxq->u.mqe;
10227	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10228
10229	/* Start timer for the mbox_tmo and log some mailbox post messages */
10230	mod_timer(&psli->mbox_tmo, (jiffies +
10231		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10232
10233	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10234			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10235			"x%x x%x\n",
10236			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10237			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10238			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10239			phba->pport->port_state, psli->sli_flag);
10240
10241	if (mbx_cmnd != MBX_HEARTBEAT) {
10242		if (mboxq->vport) {
10243			lpfc_debugfs_disc_trc(mboxq->vport,
10244				LPFC_DISC_TRC_MBOX_VPORT,
10245				"MBOX Send vport: cmd:x%x mb:x%x x%x",
10246				mbx_cmnd, mqe->un.mb_words[0],
10247				mqe->un.mb_words[1]);
10248		} else {
10249			lpfc_debugfs_disc_trc(phba->pport,
10250				LPFC_DISC_TRC_MBOX,
10251				"MBOX Send: cmd:x%x mb:x%x x%x",
10252				mbx_cmnd, mqe->un.mb_words[0],
10253				mqe->un.mb_words[1]);
10254		}
10255	}
10256	psli->slistat.mbox_cmd++;
10257
10258	/* Post the mailbox command to the port */
10259	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10260	if (rc != MBX_SUCCESS) {
10261		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10262				"(%d):2533 Mailbox command x%x (x%x/x%x) "
10263				"cannot issue Data: x%x x%x\n",
10264				mboxq->vport ? mboxq->vport->vpi : 0,
10265				mboxq->u.mb.mbxCommand,
10266				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10267				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10268				psli->sli_flag, MBX_NOWAIT);
10269		goto out_not_finished;
10270	}
10271
10272	return rc;
10273
10274out_not_finished:
10275	spin_lock_irqsave(&phba->hbalock, iflags);
10276	if (phba->sli.mbox_active) {
10277		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10278		__lpfc_mbox_cmpl_put(phba, mboxq);
10279		/* Release the token */
10280		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10281		phba->sli.mbox_active = NULL;
10282	}
10283	spin_unlock_irqrestore(&phba->hbalock, iflags);
10284
10285	return MBX_NOT_FINISHED;
10286}
10287
10288/**
10289 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10290 * @phba: Pointer to HBA context object.
10291 * @pmbox: Pointer to mailbox object.
10292 * @flag: Flag indicating how the mailbox need to be processed.
10293 *
10294 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10295 * the API jump table function pointer from the lpfc_hba struct.
10296 *
10297 * Return codes the caller owns the mailbox command after the return of the
10298 * function.
10299 **/
10300int
10301lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10302{
10303	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10304}
10305
10306/**
10307 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10308 * @phba: The hba struct for which this call is being executed.
10309 * @dev_grp: The HBA PCI-Device group number.
10310 *
10311 * This routine sets up the mbox interface API function jump table in @phba
10312 * struct.
10313 * Returns: 0 - success, -ENODEV - failure.
10314 **/
10315int
10316lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10317{
10318
10319	switch (dev_grp) {
10320	case LPFC_PCI_DEV_LP:
10321		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10322		phba->lpfc_sli_handle_slow_ring_event =
10323				lpfc_sli_handle_slow_ring_event_s3;
10324		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10325		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10326		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10327		break;
10328	case LPFC_PCI_DEV_OC:
10329		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10330		phba->lpfc_sli_handle_slow_ring_event =
10331				lpfc_sli_handle_slow_ring_event_s4;
10332		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10333		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10334		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10335		break;
10336	default:
10337		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10338				"1420 Invalid HBA PCI-device group: 0x%x\n",
10339				dev_grp);
10340		return -ENODEV;
10341	}
10342	return 0;
10343}
10344
10345/**
10346 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10347 * @phba: Pointer to HBA context object.
10348 * @pring: Pointer to driver SLI ring object.
10349 * @piocb: Pointer to address of newly added command iocb.
10350 *
10351 * This function is called with hbalock held for SLI3 ports or
10352 * the ring lock held for SLI4 ports to add a command
10353 * iocb to the txq when SLI layer cannot submit the command iocb
10354 * to the ring.
10355 **/
10356void
10357__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10358		    struct lpfc_iocbq *piocb)
10359{
10360	if (phba->sli_rev == LPFC_SLI_REV4)
10361		lockdep_assert_held(&pring->ring_lock);
10362	else
10363		lockdep_assert_held(&phba->hbalock);
10364	/* Insert the caller's iocb in the txq tail for later processing. */
10365	list_add_tail(&piocb->list, &pring->txq);
10366}
10367
10368/**
10369 * lpfc_sli_next_iocb - Get the next iocb in the txq
10370 * @phba: Pointer to HBA context object.
10371 * @pring: Pointer to driver SLI ring object.
10372 * @piocb: Pointer to address of newly added command iocb.
10373 *
10374 * This function is called with hbalock held before a new
10375 * iocb is submitted to the firmware. This function checks
10376 * txq to flush the iocbs in txq to Firmware before
10377 * submitting new iocbs to the Firmware.
10378 * If there are iocbs in the txq which need to be submitted
10379 * to firmware, lpfc_sli_next_iocb returns the first element
10380 * of the txq after dequeuing it from txq.
10381 * If there is no iocb in the txq then the function will return
10382 * *piocb and *piocb is set to NULL. Caller needs to check
10383 * *piocb to find if there are more commands in the txq.
10384 **/
10385static struct lpfc_iocbq *
10386lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10387		   struct lpfc_iocbq **piocb)
10388{
10389	struct lpfc_iocbq * nextiocb;
10390
10391	lockdep_assert_held(&phba->hbalock);
10392
10393	nextiocb = lpfc_sli_ringtx_get(phba, pring);
10394	if (!nextiocb) {
10395		nextiocb = *piocb;
10396		*piocb = NULL;
10397	}
10398
10399	return nextiocb;
10400}
10401
10402/**
10403 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10404 * @phba: Pointer to HBA context object.
10405 * @ring_number: SLI ring number to issue iocb on.
10406 * @piocb: Pointer to command iocb.
10407 * @flag: Flag indicating if this command can be put into txq.
10408 *
10409 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10410 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10411 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10412 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10413 * this function allows only iocbs for posting buffers. This function finds
10414 * next available slot in the command ring and posts the command to the
10415 * available slot and writes the port attention register to request HBA start
10416 * processing new iocb. If there is no slot available in the ring and
10417 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10418 * the function returns IOCB_BUSY.
10419 *
10420 * This function is called with hbalock held. The function will return success
10421 * after it successfully submit the iocb to firmware or after adding to the
10422 * txq.
10423 **/
10424static int
10425__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10426		    struct lpfc_iocbq *piocb, uint32_t flag)
10427{
10428	struct lpfc_iocbq *nextiocb;
10429	IOCB_t *iocb;
10430	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10431
10432	lockdep_assert_held(&phba->hbalock);
10433
10434	if (piocb->cmd_cmpl && (!piocb->vport) &&
10435	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10436	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10437		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10438				"1807 IOCB x%x failed. No vport\n",
10439				piocb->iocb.ulpCommand);
10440		dump_stack();
10441		return IOCB_ERROR;
10442	}
10443
10444
10445	/* If the PCI channel is in offline state, do not post iocbs. */
10446	if (unlikely(pci_channel_offline(phba->pcidev)))
10447		return IOCB_ERROR;
10448
10449	/* If HBA has a deferred error attention, fail the iocb. */
10450	if (unlikely(phba->hba_flag & DEFER_ERATT))
10451		return IOCB_ERROR;
10452
10453	/*
10454	 * We should never get an IOCB if we are in a < LINK_DOWN state
10455	 */
10456	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10457		return IOCB_ERROR;
10458
10459	/*
10460	 * Check to see if we are blocking IOCB processing because of a
10461	 * outstanding event.
10462	 */
10463	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10464		goto iocb_busy;
10465
10466	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10467		/*
10468		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10469		 * can be issued if the link is not up.
10470		 */
10471		switch (piocb->iocb.ulpCommand) {
10472		case CMD_QUE_RING_BUF_CN:
10473		case CMD_QUE_RING_BUF64_CN:
10474			/*
10475			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10476			 * completion, cmd_cmpl MUST be 0.
10477			 */
10478			if (piocb->cmd_cmpl)
10479				piocb->cmd_cmpl = NULL;
10480			fallthrough;
10481		case CMD_CREATE_XRI_CR:
10482		case CMD_CLOSE_XRI_CN:
10483		case CMD_CLOSE_XRI_CX:
10484			break;
10485		default:
10486			goto iocb_busy;
10487		}
10488
10489	/*
10490	 * For FCP commands, we must be in a state where we can process link
10491	 * attention events.
10492	 */
10493	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10494			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10495		goto iocb_busy;
10496	}
10497
10498	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10499	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10500		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10501
10502	if (iocb)
10503		lpfc_sli_update_ring(phba, pring);
10504	else
10505		lpfc_sli_update_full_ring(phba, pring);
10506
10507	if (!piocb)
10508		return IOCB_SUCCESS;
10509
10510	goto out_busy;
10511
10512 iocb_busy:
10513	pring->stats.iocb_cmd_delay++;
10514
10515 out_busy:
10516
10517	if (!(flag & SLI_IOCB_RET_IOCB)) {
10518		__lpfc_sli_ringtx_put(phba, pring, piocb);
10519		return IOCB_SUCCESS;
10520	}
10521
10522	return IOCB_BUSY;
10523}
10524
10525/**
10526 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10527 * @phba: Pointer to HBA context object.
10528 * @ring_number: SLI ring number to issue wqe on.
10529 * @piocb: Pointer to command iocb.
10530 * @flag: Flag indicating if this command can be put into txq.
10531 *
10532 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10533 * send  an iocb command to an HBA with SLI-3 interface spec.
10534 *
10535 * This function takes the hbalock before invoking the lockless version.
10536 * The function will return success after it successfully submit the wqe to
10537 * firmware or after adding to the txq.
10538 **/
10539static int
10540__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10541			   struct lpfc_iocbq *piocb, uint32_t flag)
10542{
10543	unsigned long iflags;
10544	int rc;
10545
10546	spin_lock_irqsave(&phba->hbalock, iflags);
10547	rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10548	spin_unlock_irqrestore(&phba->hbalock, iflags);
10549
10550	return rc;
10551}
10552
10553/**
10554 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10555 * @phba: Pointer to HBA context object.
10556 * @ring_number: SLI ring number to issue wqe on.
10557 * @piocb: Pointer to command iocb.
10558 * @flag: Flag indicating if this command can be put into txq.
10559 *
10560 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10561 * an wqe command to an HBA with SLI-4 interface spec.
10562 *
10563 * This function is a lockless version. The function will return success
10564 * after it successfully submit the wqe to firmware or after adding to the
10565 * txq.
10566 **/
10567static int
10568__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10569			   struct lpfc_iocbq *piocb, uint32_t flag)
10570{
10571	struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10572
10573	lpfc_prep_embed_io(phba, lpfc_cmd);
10574	return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10575}
10576
10577void
10578lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10579{
10580	struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10581	union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10582	struct sli4_sge *sgl;
10583
10584	/* 128 byte wqe support here */
10585	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10586
10587	if (phba->fcp_embed_io) {
10588		struct fcp_cmnd *fcp_cmnd;
10589		u32 *ptr;
10590
10591		fcp_cmnd = lpfc_cmd->fcp_cmnd;
10592
10593		/* Word 0-2 - FCP_CMND */
10594		wqe->generic.bde.tus.f.bdeFlags =
10595			BUFF_TYPE_BDE_IMMED;
10596		wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10597		wqe->generic.bde.addrHigh = 0;
10598		wqe->generic.bde.addrLow =  88;  /* Word 22 */
10599
10600		bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10601		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10602
10603		/* Word 22-29  FCP CMND Payload */
10604		ptr = &wqe->words[22];
10605		memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10606	} else {
10607		/* Word 0-2 - Inline BDE */
10608		wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10609		wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10610		wqe->generic.bde.addrHigh = sgl->addr_hi;
10611		wqe->generic.bde.addrLow =  sgl->addr_lo;
10612
10613		/* Word 10 */
10614		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10615		bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10616	}
10617
10618	/* add the VMID tags as per switch response */
10619	if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10620		if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10621			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10622			bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10623					(piocb->vmid_tag.cs_ctl_vmid));
10624		} else if (phba->cfg_vmid_app_header) {
10625			bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10626			bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10627			wqe->words[31] = piocb->vmid_tag.app_id;
10628		}
10629	}
10630}
10631
10632/**
10633 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10634 * @phba: Pointer to HBA context object.
10635 * @ring_number: SLI ring number to issue iocb on.
10636 * @piocb: Pointer to command iocb.
10637 * @flag: Flag indicating if this command can be put into txq.
10638 *
10639 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10640 * an iocb command to an HBA with SLI-4 interface spec.
10641 *
10642 * This function is called with ringlock held. The function will return success
10643 * after it successfully submit the iocb to firmware or after adding to the
10644 * txq.
10645 **/
10646static int
10647__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10648			 struct lpfc_iocbq *piocb, uint32_t flag)
10649{
10650	struct lpfc_sglq *sglq;
10651	union lpfc_wqe128 *wqe;
10652	struct lpfc_queue *wq;
10653	struct lpfc_sli_ring *pring;
10654	u32 ulp_command = get_job_cmnd(phba, piocb);
10655
10656	/* Get the WQ */
10657	if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10658	    (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10659		wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10660	} else {
10661		wq = phba->sli4_hba.els_wq;
10662	}
10663
10664	/* Get corresponding ring */
10665	pring = wq->pring;
10666
10667	/*
10668	 * The WQE can be either 64 or 128 bytes,
10669	 */
10670
10671	lockdep_assert_held(&pring->ring_lock);
10672	wqe = &piocb->wqe;
10673	if (piocb->sli4_xritag == NO_XRI) {
10674		if (ulp_command == CMD_ABORT_XRI_CX)
10675			sglq = NULL;
10676		else {
10677			sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10678			if (!sglq) {
10679				if (!(flag & SLI_IOCB_RET_IOCB)) {
10680					__lpfc_sli_ringtx_put(phba,
10681							pring,
10682							piocb);
10683					return IOCB_SUCCESS;
10684				} else {
10685					return IOCB_BUSY;
10686				}
10687			}
10688		}
10689	} else if (piocb->cmd_flag &  LPFC_IO_FCP) {
10690		/* These IO's already have an XRI and a mapped sgl. */
10691		sglq = NULL;
10692	}
10693	else {
10694		/*
10695		 * This is a continuation of a commandi,(CX) so this
10696		 * sglq is on the active list
10697		 */
10698		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10699		if (!sglq)
10700			return IOCB_ERROR;
10701	}
10702
10703	if (sglq) {
10704		piocb->sli4_lxritag = sglq->sli4_lxritag;
10705		piocb->sli4_xritag = sglq->sli4_xritag;
10706
10707		/* ABTS sent by initiator to CT exchange, the
10708		 * RX_ID field will be filled with the newly
10709		 * allocated responder XRI.
10710		 */
10711		if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10712		    piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10713			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10714			       piocb->sli4_xritag);
10715
10716		bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10717		       piocb->sli4_xritag);
10718
10719		if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10720			return IOCB_ERROR;
10721	}
10722
10723	if (lpfc_sli4_wq_put(wq, wqe))
10724		return IOCB_ERROR;
10725
10726	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10727
10728	return 0;
10729}
10730
10731/*
10732 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10733 *
10734 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10735 * or IOCB for sli-3  function.
10736 * pointer from the lpfc_hba struct.
10737 *
10738 * Return codes:
10739 * IOCB_ERROR - Error
10740 * IOCB_SUCCESS - Success
10741 * IOCB_BUSY - Busy
10742 **/
10743int
10744lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10745		      struct lpfc_iocbq *piocb, uint32_t flag)
10746{
10747	return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10748}
10749
10750/*
10751 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10752 *
10753 * This routine wraps the actual lockless version for issusing IOCB function
10754 * pointer from the lpfc_hba struct.
10755 *
10756 * Return codes:
10757 * IOCB_ERROR - Error
10758 * IOCB_SUCCESS - Success
10759 * IOCB_BUSY - Busy
10760 **/
10761int
10762__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10763		struct lpfc_iocbq *piocb, uint32_t flag)
10764{
10765	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10766}
10767
10768static void
10769__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10770			       struct lpfc_vport *vport,
10771			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10772			       u32 elscmd, u8 tmo, u8 expect_rsp)
10773{
10774	struct lpfc_hba *phba = vport->phba;
10775	IOCB_t *cmd;
10776
10777	cmd = &cmdiocbq->iocb;
10778	memset(cmd, 0, sizeof(*cmd));
10779
10780	cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10781	cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10782	cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10783
10784	if (expect_rsp) {
10785		cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10786		cmd->un.elsreq64.remoteID = did; /* DID */
10787		cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10788		cmd->ulpTimeout = tmo;
10789	} else {
10790		cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10791		cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10792		cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10793		cmd->ulpPU = PARM_NPIV_DID;
10794	}
10795	cmd->ulpBdeCount = 1;
10796	cmd->ulpLe = 1;
10797	cmd->ulpClass = CLASS3;
10798
10799	/* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10800	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10801		if (expect_rsp) {
10802			cmd->un.elsreq64.myID = vport->fc_myDID;
10803
10804			/* For ELS_REQUEST64_CR, use the VPI by default */
10805			cmd->ulpContext = phba->vpi_ids[vport->vpi];
10806		}
10807
10808		cmd->ulpCt_h = 0;
10809		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10810		if (elscmd == ELS_CMD_ECHO)
10811			cmd->ulpCt_l = 0; /* context = invalid RPI */
10812		else
10813			cmd->ulpCt_l = 1; /* context = VPI */
10814	}
10815}
10816
10817static void
10818__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10819			       struct lpfc_vport *vport,
10820			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10821			       u32 elscmd, u8 tmo, u8 expect_rsp)
10822{
10823	struct lpfc_hba  *phba = vport->phba;
10824	union lpfc_wqe128 *wqe;
10825	struct ulp_bde64_le *bde;
10826	u8 els_id;
10827
10828	wqe = &cmdiocbq->wqe;
10829	memset(wqe, 0, sizeof(*wqe));
10830
10831	/* Word 0 - 2 BDE */
10832	bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10833	bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10834	bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10835	bde->type_size = cpu_to_le32(cmd_size);
10836	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10837
10838	if (expect_rsp) {
10839		bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10840
10841		/* Transfer length */
10842		wqe->els_req.payload_len = cmd_size;
10843		wqe->els_req.max_response_payload_len = FCELSSIZE;
10844
10845		/* DID */
10846		bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10847
10848		/* Word 11 - ELS_ID */
10849		switch (elscmd) {
10850		case ELS_CMD_PLOGI:
10851			els_id = LPFC_ELS_ID_PLOGI;
10852			break;
10853		case ELS_CMD_FLOGI:
10854			els_id = LPFC_ELS_ID_FLOGI;
10855			break;
10856		case ELS_CMD_LOGO:
10857			els_id = LPFC_ELS_ID_LOGO;
10858			break;
10859		case ELS_CMD_FDISC:
10860			if (!vport->fc_myDID) {
10861				els_id = LPFC_ELS_ID_FDISC;
10862				break;
10863			}
10864			fallthrough;
10865		default:
10866			els_id = LPFC_ELS_ID_DEFAULT;
10867			break;
10868		}
10869
10870		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10871	} else {
10872		/* DID */
10873		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10874
10875		/* Transfer length */
10876		wqe->xmit_els_rsp.response_payload_len = cmd_size;
10877
10878		bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10879		       CMD_XMIT_ELS_RSP64_WQE);
10880	}
10881
10882	bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10883	bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10884	bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10885
10886	/* If we have NPIV enabled, we want to send ELS traffic by VPI.
10887	 * For SLI4, since the driver controls VPIs we also want to include
10888	 * all ELS pt2pt protocol traffic as well.
10889	 */
10890	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10891	    test_bit(FC_PT2PT, &vport->fc_flag)) {
10892		if (expect_rsp) {
10893			bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10894
10895			/* For ELS_REQUEST64_WQE, use the VPI by default */
10896			bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10897			       phba->vpi_ids[vport->vpi]);
10898		}
10899
10900		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10901		if (elscmd == ELS_CMD_ECHO)
10902			bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10903		else
10904			bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10905	}
10906}
10907
10908void
10909lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10910			  struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10911			  u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10912			  u8 expect_rsp)
10913{
10914	phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10915					  elscmd, tmo, expect_rsp);
10916}
10917
10918static void
10919__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10920			   u16 rpi, u32 num_entry, u8 tmo)
10921{
10922	IOCB_t *cmd;
10923
10924	cmd = &cmdiocbq->iocb;
10925	memset(cmd, 0, sizeof(*cmd));
10926
10927	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10928	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10929	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10930	cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10931
10932	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10933	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10934	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10935
10936	cmd->ulpContext = rpi;
10937	cmd->ulpClass = CLASS3;
10938	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10939	cmd->ulpBdeCount = 1;
10940	cmd->ulpLe = 1;
10941	cmd->ulpOwner = OWN_CHIP;
10942	cmd->ulpTimeout = tmo;
10943}
10944
10945static void
10946__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10947			   u16 rpi, u32 num_entry, u8 tmo)
10948{
10949	union lpfc_wqe128 *cmdwqe;
10950	struct ulp_bde64_le *bde, *bpl;
10951	u32 xmit_len = 0, total_len = 0, size, type, i;
10952
10953	cmdwqe = &cmdiocbq->wqe;
10954	memset(cmdwqe, 0, sizeof(*cmdwqe));
10955
10956	/* Calculate total_len and xmit_len */
10957	bpl = (struct ulp_bde64_le *)bmp->virt;
10958	for (i = 0; i < num_entry; i++) {
10959		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10960		total_len += size;
10961	}
10962	for (i = 0; i < num_entry; i++) {
10963		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10964		type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10965		if (type != ULP_BDE64_TYPE_BDE_64)
10966			break;
10967		xmit_len += size;
10968	}
10969
10970	/* Words 0 - 2 */
10971	bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10972	bde->addr_low = bpl->addr_low;
10973	bde->addr_high = bpl->addr_high;
10974	bde->type_size = cpu_to_le32(xmit_len);
10975	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10976
10977	/* Word 3 */
10978	cmdwqe->gen_req.request_payload_len = xmit_len;
10979
10980	/* Word 5 */
10981	bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10982	bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10983	bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10984	bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10985
10986	/* Word 6 */
10987	bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10988
10989	/* Word 7 */
10990	bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10991	bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10992	bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10993	bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10994
10995	/* Word 12 */
10996	cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10997}
10998
10999void
11000lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11001		      struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
11002{
11003	phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11004}
11005
11006static void
11007__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11008			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11009			      u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11010{
11011	IOCB_t *icmd;
11012
11013	icmd = &cmdiocbq->iocb;
11014	memset(icmd, 0, sizeof(*icmd));
11015
11016	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11017	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11018	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11019	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11020	icmd->un.xseq64.w5.hcsw.Fctl = LA;
11021	if (last_seq)
11022		icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11023	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11024	icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11025	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11026
11027	icmd->ulpBdeCount = 1;
11028	icmd->ulpLe = 1;
11029	icmd->ulpClass = CLASS3;
11030
11031	switch (cr_cx_cmd) {
11032	case CMD_XMIT_SEQUENCE64_CR:
11033		icmd->ulpContext = rpi;
11034		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11035		break;
11036	case CMD_XMIT_SEQUENCE64_CX:
11037		icmd->ulpContext = ox_id;
11038		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11039		break;
11040	default:
11041		break;
11042	}
11043}
11044
11045static void
11046__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11047			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11048			      u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11049{
11050	union lpfc_wqe128 *wqe;
11051	struct ulp_bde64 *bpl;
11052
11053	wqe = &cmdiocbq->wqe;
11054	memset(wqe, 0, sizeof(*wqe));
11055
11056	/* Words 0 - 2 */
11057	bpl = (struct ulp_bde64 *)bmp->virt;
11058	wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11059	wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11060	wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11061
11062	/* Word 5 */
11063	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11064	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11065	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11066	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11067	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11068
11069	/* Word 6 */
11070	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11071
11072	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11073	       CMD_XMIT_SEQUENCE64_WQE);
11074
11075	/* Word 7 */
11076	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11077
11078	/* Word 9 */
11079	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11080
11081	/* Word 12 */
11082	if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11083		wqe->xmit_sequence.xmit_len = full_size;
11084	else
11085		wqe->xmit_sequence.xmit_len =
11086			wqe->xmit_sequence.bde.tus.f.bdeSize;
11087}
11088
11089void
11090lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11091			 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11092			 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11093{
11094	phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11095					 rctl, last_seq, cr_cx_cmd);
11096}
11097
11098static void
11099__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11100			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11101			     bool wqec)
11102{
11103	IOCB_t *icmd = NULL;
11104
11105	icmd = &cmdiocbq->iocb;
11106	memset(icmd, 0, sizeof(*icmd));
11107
11108	/* Word 5 */
11109	icmd->un.acxri.abortContextTag = ulp_context;
11110	icmd->un.acxri.abortIoTag = iotag;
11111
11112	if (ia) {
11113		/* Word 7 */
11114		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11115	} else {
11116		/* Word 3 */
11117		icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11118
11119		/* Word 7 */
11120		icmd->ulpClass = ulp_class;
11121		icmd->ulpCommand = CMD_ABORT_XRI_CN;
11122	}
11123
11124	/* Word 7 */
11125	icmd->ulpLe = 1;
11126}
11127
11128static void
11129__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11130			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11131			     bool wqec)
11132{
11133	union lpfc_wqe128 *wqe;
11134
11135	wqe = &cmdiocbq->wqe;
11136	memset(wqe, 0, sizeof(*wqe));
11137
11138	/* Word 3 */
11139	bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11140	if (ia)
11141		bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11142	else
11143		bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11144
11145	/* Word 7 */
11146	bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11147
11148	/* Word 8 */
11149	wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11150
11151	/* Word 9 */
11152	bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11153
11154	/* Word 10 */
11155	bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11156
11157	/* Word 11 */
11158	if (wqec)
11159		bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11160	bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11161	bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11162}
11163
11164void
11165lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11166			u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11167			bool ia, bool wqec)
11168{
11169	phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11170					cqid, ia, wqec);
11171}
11172
11173/**
11174 * lpfc_sli_api_table_setup - Set up sli api function jump table
11175 * @phba: The hba struct for which this call is being executed.
11176 * @dev_grp: The HBA PCI-Device group number.
11177 *
11178 * This routine sets up the SLI interface API function jump table in @phba
11179 * struct.
11180 * Returns: 0 - success, -ENODEV - failure.
11181 **/
11182int
11183lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11184{
11185
11186	switch (dev_grp) {
11187	case LPFC_PCI_DEV_LP:
11188		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11189		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11190		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11191		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11192		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11193		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11194		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11195		break;
11196	case LPFC_PCI_DEV_OC:
11197		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11198		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11199		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11200		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11201		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11202		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11203		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11204		break;
11205	default:
11206		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11207				"1419 Invalid HBA PCI-device group: 0x%x\n",
11208				dev_grp);
11209		return -ENODEV;
11210	}
11211	return 0;
11212}
11213
11214/**
11215 * lpfc_sli4_calc_ring - Calculates which ring to use
11216 * @phba: Pointer to HBA context object.
11217 * @piocb: Pointer to command iocb.
11218 *
11219 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11220 * hba_wqidx, thus we need to calculate the corresponding ring.
11221 * Since ABORTS must go on the same WQ of the command they are
11222 * aborting, we use command's hba_wqidx.
11223 */
11224struct lpfc_sli_ring *
11225lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11226{
11227	struct lpfc_io_buf *lpfc_cmd;
11228
11229	if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11230		if (unlikely(!phba->sli4_hba.hdwq))
11231			return NULL;
11232		/*
11233		 * for abort iocb hba_wqidx should already
11234		 * be setup based on what work queue we used.
11235		 */
11236		if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11237			lpfc_cmd = piocb->io_buf;
11238			piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11239		}
11240		return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11241	} else {
11242		if (unlikely(!phba->sli4_hba.els_wq))
11243			return NULL;
11244		piocb->hba_wqidx = 0;
11245		return phba->sli4_hba.els_wq->pring;
11246	}
11247}
11248
11249inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11250{
11251	struct lpfc_hba *phba = eq->phba;
11252
11253	/*
11254	 * Unlocking an irq is one of the entry point to check
11255	 * for re-schedule, but we are good for io submission
11256	 * path as midlayer does a get_cpu to glue us in. Flush
11257	 * out the invalidate queue so we can see the updated
11258	 * value for flag.
11259	 */
11260	smp_rmb();
11261
11262	if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11263		/* We will not likely get the completion for the caller
11264		 * during this iteration but i guess that's fine.
11265		 * Future io's coming on this eq should be able to
11266		 * pick it up.  As for the case of single io's, they
11267		 * will be handled through a sched from polling timer
11268		 * function which is currently triggered every 1msec.
11269		 */
11270		lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11271				     LPFC_QUEUE_WORK);
11272}
11273
11274/**
11275 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11276 * @phba: Pointer to HBA context object.
11277 * @ring_number: Ring number
11278 * @piocb: Pointer to command iocb.
11279 * @flag: Flag indicating if this command can be put into txq.
11280 *
11281 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11282 * function. This function gets the hbalock and calls
11283 * __lpfc_sli_issue_iocb function and will return the error returned
11284 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11285 * functions which do not hold hbalock.
11286 **/
11287int
11288lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11289		    struct lpfc_iocbq *piocb, uint32_t flag)
11290{
11291	struct lpfc_sli_ring *pring;
11292	struct lpfc_queue *eq;
11293	unsigned long iflags;
11294	int rc;
11295
11296	/* If the PCI channel is in offline state, do not post iocbs. */
11297	if (unlikely(pci_channel_offline(phba->pcidev)))
11298		return IOCB_ERROR;
11299
11300	if (phba->sli_rev == LPFC_SLI_REV4) {
11301		lpfc_sli_prep_wqe(phba, piocb);
11302
11303		eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11304
11305		pring = lpfc_sli4_calc_ring(phba, piocb);
11306		if (unlikely(pring == NULL))
11307			return IOCB_ERROR;
11308
11309		spin_lock_irqsave(&pring->ring_lock, iflags);
11310		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11311		spin_unlock_irqrestore(&pring->ring_lock, iflags);
11312
11313		lpfc_sli4_poll_eq(eq);
11314	} else {
11315		/* For now, SLI2/3 will still use hbalock */
11316		spin_lock_irqsave(&phba->hbalock, iflags);
11317		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11318		spin_unlock_irqrestore(&phba->hbalock, iflags);
11319	}
11320	return rc;
11321}
11322
11323/**
11324 * lpfc_extra_ring_setup - Extra ring setup function
11325 * @phba: Pointer to HBA context object.
11326 *
11327 * This function is called while driver attaches with the
11328 * HBA to setup the extra ring. The extra ring is used
11329 * only when driver needs to support target mode functionality
11330 * or IP over FC functionalities.
11331 *
11332 * This function is called with no lock held. SLI3 only.
11333 **/
11334static int
11335lpfc_extra_ring_setup( struct lpfc_hba *phba)
11336{
11337	struct lpfc_sli *psli;
11338	struct lpfc_sli_ring *pring;
11339
11340	psli = &phba->sli;
11341
11342	/* Adjust cmd/rsp ring iocb entries more evenly */
11343
11344	/* Take some away from the FCP ring */
11345	pring = &psli->sli3_ring[LPFC_FCP_RING];
11346	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11347	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11348	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11349	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11350
11351	/* and give them to the extra ring */
11352	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11353
11354	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11355	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11356	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11357	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11358
11359	/* Setup default profile for this ring */
11360	pring->iotag_max = 4096;
11361	pring->num_mask = 1;
11362	pring->prt[0].profile = 0;      /* Mask 0 */
11363	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11364	pring->prt[0].type = phba->cfg_multi_ring_type;
11365	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11366	return 0;
11367}
11368
11369static void
11370lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11371			     struct lpfc_nodelist *ndlp)
11372{
11373	unsigned long iflags;
11374	struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11375
11376	/* Hold a node reference for outstanding queued work */
11377	if (!lpfc_nlp_get(ndlp))
11378		return;
11379
11380	spin_lock_irqsave(&phba->hbalock, iflags);
11381	if (!list_empty(&evtp->evt_listp)) {
11382		spin_unlock_irqrestore(&phba->hbalock, iflags);
11383		lpfc_nlp_put(ndlp);
11384		return;
11385	}
11386
11387	evtp->evt_arg1 = ndlp;
11388	evtp->evt = LPFC_EVT_RECOVER_PORT;
11389	list_add_tail(&evtp->evt_listp, &phba->work_list);
11390	spin_unlock_irqrestore(&phba->hbalock, iflags);
11391
11392	lpfc_worker_wake_up(phba);
11393}
11394
11395/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11396 * @phba: Pointer to HBA context object.
11397 * @iocbq: Pointer to iocb object.
11398 *
11399 * The async_event handler calls this routine when it receives
11400 * an ASYNC_STATUS_CN event from the port.  The port generates
11401 * this event when an Abort Sequence request to an rport fails
11402 * twice in succession.  The abort could be originated by the
11403 * driver or by the port.  The ABTS could have been for an ELS
11404 * or FCP IO.  The port only generates this event when an ABTS
11405 * fails to complete after one retry.
11406 */
11407static void
11408lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11409			  struct lpfc_iocbq *iocbq)
11410{
11411	struct lpfc_nodelist *ndlp = NULL;
11412	uint16_t rpi = 0, vpi = 0;
11413	struct lpfc_vport *vport = NULL;
11414
11415	/* The rpi in the ulpContext is vport-sensitive. */
11416	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11417	rpi = iocbq->iocb.ulpContext;
11418
11419	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11420			"3092 Port generated ABTS async event "
11421			"on vpi %d rpi %d status 0x%x\n",
11422			vpi, rpi, iocbq->iocb.ulpStatus);
11423
11424	vport = lpfc_find_vport_by_vpid(phba, vpi);
11425	if (!vport)
11426		goto err_exit;
11427	ndlp = lpfc_findnode_rpi(vport, rpi);
11428	if (!ndlp)
11429		goto err_exit;
11430
11431	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11432		lpfc_sli_abts_recover_port(vport, ndlp);
11433	return;
11434
11435 err_exit:
11436	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11437			"3095 Event Context not found, no "
11438			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11439			vpi, rpi, iocbq->iocb.ulpStatus,
11440			iocbq->iocb.ulpContext);
11441}
11442
11443/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11444 * @phba: pointer to HBA context object.
11445 * @ndlp: nodelist pointer for the impacted rport.
11446 * @axri: pointer to the wcqe containing the failed exchange.
11447 *
11448 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11449 * port.  The port generates this event when an abort exchange request to an
11450 * rport fails twice in succession with no reply.  The abort could be originated
11451 * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11452 */
11453void
11454lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11455			   struct lpfc_nodelist *ndlp,
11456			   struct sli4_wcqe_xri_aborted *axri)
11457{
11458	uint32_t ext_status = 0;
11459
11460	if (!ndlp) {
11461		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11462				"3115 Node Context not found, driver "
11463				"ignoring abts err event\n");
11464		return;
11465	}
11466
11467	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11468			"3116 Port generated FCP XRI ABORT event on "
11469			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11470			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11471			bf_get(lpfc_wcqe_xa_xri, axri),
11472			bf_get(lpfc_wcqe_xa_status, axri),
11473			axri->parameter);
11474
11475	/*
11476	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11477	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11478	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11479	 */
11480	ext_status = axri->parameter & IOERR_PARAM_MASK;
11481	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11482	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11483		lpfc_sli_post_recovery_event(phba, ndlp);
11484}
11485
11486/**
11487 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11488 * @phba: Pointer to HBA context object.
11489 * @pring: Pointer to driver SLI ring object.
11490 * @iocbq: Pointer to iocb object.
11491 *
11492 * This function is called by the slow ring event handler
11493 * function when there is an ASYNC event iocb in the ring.
11494 * This function is called with no lock held.
11495 * Currently this function handles only temperature related
11496 * ASYNC events. The function decodes the temperature sensor
11497 * event message and posts events for the management applications.
11498 **/
11499static void
11500lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11501	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11502{
11503	IOCB_t *icmd;
11504	uint16_t evt_code;
11505	struct temp_event temp_event_data;
11506	struct Scsi_Host *shost;
11507	uint32_t *iocb_w;
11508
11509	icmd = &iocbq->iocb;
11510	evt_code = icmd->un.asyncstat.evt_code;
11511
11512	switch (evt_code) {
11513	case ASYNC_TEMP_WARN:
11514	case ASYNC_TEMP_SAFE:
11515		temp_event_data.data = (uint32_t) icmd->ulpContext;
11516		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11517		if (evt_code == ASYNC_TEMP_WARN) {
11518			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11519			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11520				"0347 Adapter is very hot, please take "
11521				"corrective action. temperature : %d Celsius\n",
11522				(uint32_t) icmd->ulpContext);
11523		} else {
11524			temp_event_data.event_code = LPFC_NORMAL_TEMP;
11525			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11526				"0340 Adapter temperature is OK now. "
11527				"temperature : %d Celsius\n",
11528				(uint32_t) icmd->ulpContext);
11529		}
11530
11531		/* Send temperature change event to applications */
11532		shost = lpfc_shost_from_vport(phba->pport);
11533		fc_host_post_vendor_event(shost, fc_get_event_number(),
11534			sizeof(temp_event_data), (char *) &temp_event_data,
11535			LPFC_NL_VENDOR_ID);
11536		break;
11537	case ASYNC_STATUS_CN:
11538		lpfc_sli_abts_err_handler(phba, iocbq);
11539		break;
11540	default:
11541		iocb_w = (uint32_t *) icmd;
11542		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11543			"0346 Ring %d handler: unexpected ASYNC_STATUS"
11544			" evt_code 0x%x\n"
11545			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11546			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11547			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11548			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11549			pring->ringno, icmd->un.asyncstat.evt_code,
11550			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11551			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11552			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11553			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11554
11555		break;
11556	}
11557}
11558
11559
11560/**
11561 * lpfc_sli4_setup - SLI ring setup function
11562 * @phba: Pointer to HBA context object.
11563 *
11564 * lpfc_sli_setup sets up rings of the SLI interface with
11565 * number of iocbs per ring and iotags. This function is
11566 * called while driver attach to the HBA and before the
11567 * interrupts are enabled. So there is no need for locking.
11568 *
11569 * This function always returns 0.
11570 **/
11571int
11572lpfc_sli4_setup(struct lpfc_hba *phba)
11573{
11574	struct lpfc_sli_ring *pring;
11575
11576	pring = phba->sli4_hba.els_wq->pring;
11577	pring->num_mask = LPFC_MAX_RING_MASK;
11578	pring->prt[0].profile = 0;	/* Mask 0 */
11579	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11580	pring->prt[0].type = FC_TYPE_ELS;
11581	pring->prt[0].lpfc_sli_rcv_unsol_event =
11582	    lpfc_els_unsol_event;
11583	pring->prt[1].profile = 0;	/* Mask 1 */
11584	pring->prt[1].rctl = FC_RCTL_ELS_REP;
11585	pring->prt[1].type = FC_TYPE_ELS;
11586	pring->prt[1].lpfc_sli_rcv_unsol_event =
11587	    lpfc_els_unsol_event;
11588	pring->prt[2].profile = 0;	/* Mask 2 */
11589	/* NameServer Inquiry */
11590	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11591	/* NameServer */
11592	pring->prt[2].type = FC_TYPE_CT;
11593	pring->prt[2].lpfc_sli_rcv_unsol_event =
11594	    lpfc_ct_unsol_event;
11595	pring->prt[3].profile = 0;	/* Mask 3 */
11596	/* NameServer response */
11597	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11598	/* NameServer */
11599	pring->prt[3].type = FC_TYPE_CT;
11600	pring->prt[3].lpfc_sli_rcv_unsol_event =
11601	    lpfc_ct_unsol_event;
11602	return 0;
11603}
11604
11605/**
11606 * lpfc_sli_setup - SLI ring setup function
11607 * @phba: Pointer to HBA context object.
11608 *
11609 * lpfc_sli_setup sets up rings of the SLI interface with
11610 * number of iocbs per ring and iotags. This function is
11611 * called while driver attach to the HBA and before the
11612 * interrupts are enabled. So there is no need for locking.
11613 *
11614 * This function always returns 0. SLI3 only.
11615 **/
11616int
11617lpfc_sli_setup(struct lpfc_hba *phba)
11618{
11619	int i, totiocbsize = 0;
11620	struct lpfc_sli *psli = &phba->sli;
11621	struct lpfc_sli_ring *pring;
11622
11623	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11624	psli->sli_flag = 0;
11625
11626	psli->iocbq_lookup = NULL;
11627	psli->iocbq_lookup_len = 0;
11628	psli->last_iotag = 0;
11629
11630	for (i = 0; i < psli->num_rings; i++) {
11631		pring = &psli->sli3_ring[i];
11632		switch (i) {
11633		case LPFC_FCP_RING:	/* ring 0 - FCP */
11634			/* numCiocb and numRiocb are used in config_port */
11635			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11636			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11637			pring->sli.sli3.numCiocb +=
11638				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11639			pring->sli.sli3.numRiocb +=
11640				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11641			pring->sli.sli3.numCiocb +=
11642				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11643			pring->sli.sli3.numRiocb +=
11644				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11645			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11646							SLI3_IOCB_CMD_SIZE :
11647							SLI2_IOCB_CMD_SIZE;
11648			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11649							SLI3_IOCB_RSP_SIZE :
11650							SLI2_IOCB_RSP_SIZE;
11651			pring->iotag_ctr = 0;
11652			pring->iotag_max =
11653			    (phba->cfg_hba_queue_depth * 2);
11654			pring->fast_iotag = pring->iotag_max;
11655			pring->num_mask = 0;
11656			break;
11657		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
11658			/* numCiocb and numRiocb are used in config_port */
11659			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11660			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11661			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11662							SLI3_IOCB_CMD_SIZE :
11663							SLI2_IOCB_CMD_SIZE;
11664			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11665							SLI3_IOCB_RSP_SIZE :
11666							SLI2_IOCB_RSP_SIZE;
11667			pring->iotag_max = phba->cfg_hba_queue_depth;
11668			pring->num_mask = 0;
11669			break;
11670		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
11671			/* numCiocb and numRiocb are used in config_port */
11672			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11673			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11674			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11675							SLI3_IOCB_CMD_SIZE :
11676							SLI2_IOCB_CMD_SIZE;
11677			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11678							SLI3_IOCB_RSP_SIZE :
11679							SLI2_IOCB_RSP_SIZE;
11680			pring->fast_iotag = 0;
11681			pring->iotag_ctr = 0;
11682			pring->iotag_max = 4096;
11683			pring->lpfc_sli_rcv_async_status =
11684				lpfc_sli_async_event_handler;
11685			pring->num_mask = LPFC_MAX_RING_MASK;
11686			pring->prt[0].profile = 0;	/* Mask 0 */
11687			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11688			pring->prt[0].type = FC_TYPE_ELS;
11689			pring->prt[0].lpfc_sli_rcv_unsol_event =
11690			    lpfc_els_unsol_event;
11691			pring->prt[1].profile = 0;	/* Mask 1 */
11692			pring->prt[1].rctl = FC_RCTL_ELS_REP;
11693			pring->prt[1].type = FC_TYPE_ELS;
11694			pring->prt[1].lpfc_sli_rcv_unsol_event =
11695			    lpfc_els_unsol_event;
11696			pring->prt[2].profile = 0;	/* Mask 2 */
11697			/* NameServer Inquiry */
11698			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11699			/* NameServer */
11700			pring->prt[2].type = FC_TYPE_CT;
11701			pring->prt[2].lpfc_sli_rcv_unsol_event =
11702			    lpfc_ct_unsol_event;
11703			pring->prt[3].profile = 0;	/* Mask 3 */
11704			/* NameServer response */
11705			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11706			/* NameServer */
11707			pring->prt[3].type = FC_TYPE_CT;
11708			pring->prt[3].lpfc_sli_rcv_unsol_event =
11709			    lpfc_ct_unsol_event;
11710			break;
11711		}
11712		totiocbsize += (pring->sli.sli3.numCiocb *
11713			pring->sli.sli3.sizeCiocb) +
11714			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11715	}
11716	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11717		/* Too many cmd / rsp ring entries in SLI2 SLIM */
11718		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11719		       "SLI2 SLIM Data: x%x x%lx\n",
11720		       phba->brd_no, totiocbsize,
11721		       (unsigned long) MAX_SLIM_IOCB_SIZE);
11722	}
11723	if (phba->cfg_multi_ring_support == 2)
11724		lpfc_extra_ring_setup(phba);
11725
11726	return 0;
11727}
11728
11729/**
11730 * lpfc_sli4_queue_init - Queue initialization function
11731 * @phba: Pointer to HBA context object.
11732 *
11733 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11734 * ring. This function also initializes ring indices of each ring.
11735 * This function is called during the initialization of the SLI
11736 * interface of an HBA.
11737 * This function is called with no lock held and always returns
11738 * 1.
11739 **/
11740void
11741lpfc_sli4_queue_init(struct lpfc_hba *phba)
11742{
11743	struct lpfc_sli *psli;
11744	struct lpfc_sli_ring *pring;
11745	int i;
11746
11747	psli = &phba->sli;
11748	spin_lock_irq(&phba->hbalock);
11749	INIT_LIST_HEAD(&psli->mboxq);
11750	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11751	/* Initialize list headers for txq and txcmplq as double linked lists */
11752	for (i = 0; i < phba->cfg_hdw_queue; i++) {
11753		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11754		pring->flag = 0;
11755		pring->ringno = LPFC_FCP_RING;
11756		pring->txcmplq_cnt = 0;
11757		INIT_LIST_HEAD(&pring->txq);
11758		INIT_LIST_HEAD(&pring->txcmplq);
11759		INIT_LIST_HEAD(&pring->iocb_continueq);
11760		spin_lock_init(&pring->ring_lock);
11761	}
11762	pring = phba->sli4_hba.els_wq->pring;
11763	pring->flag = 0;
11764	pring->ringno = LPFC_ELS_RING;
11765	pring->txcmplq_cnt = 0;
11766	INIT_LIST_HEAD(&pring->txq);
11767	INIT_LIST_HEAD(&pring->txcmplq);
11768	INIT_LIST_HEAD(&pring->iocb_continueq);
11769	spin_lock_init(&pring->ring_lock);
11770
11771	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11772		pring = phba->sli4_hba.nvmels_wq->pring;
11773		pring->flag = 0;
11774		pring->ringno = LPFC_ELS_RING;
11775		pring->txcmplq_cnt = 0;
11776		INIT_LIST_HEAD(&pring->txq);
11777		INIT_LIST_HEAD(&pring->txcmplq);
11778		INIT_LIST_HEAD(&pring->iocb_continueq);
11779		spin_lock_init(&pring->ring_lock);
11780	}
11781
11782	spin_unlock_irq(&phba->hbalock);
11783}
11784
11785/**
11786 * lpfc_sli_queue_init - Queue initialization function
11787 * @phba: Pointer to HBA context object.
11788 *
11789 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11790 * ring. This function also initializes ring indices of each ring.
11791 * This function is called during the initialization of the SLI
11792 * interface of an HBA.
11793 * This function is called with no lock held and always returns
11794 * 1.
11795 **/
11796void
11797lpfc_sli_queue_init(struct lpfc_hba *phba)
11798{
11799	struct lpfc_sli *psli;
11800	struct lpfc_sli_ring *pring;
11801	int i;
11802
11803	psli = &phba->sli;
11804	spin_lock_irq(&phba->hbalock);
11805	INIT_LIST_HEAD(&psli->mboxq);
11806	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11807	/* Initialize list headers for txq and txcmplq as double linked lists */
11808	for (i = 0; i < psli->num_rings; i++) {
11809		pring = &psli->sli3_ring[i];
11810		pring->ringno = i;
11811		pring->sli.sli3.next_cmdidx  = 0;
11812		pring->sli.sli3.local_getidx = 0;
11813		pring->sli.sli3.cmdidx = 0;
11814		INIT_LIST_HEAD(&pring->iocb_continueq);
11815		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11816		INIT_LIST_HEAD(&pring->postbufq);
11817		pring->flag = 0;
11818		INIT_LIST_HEAD(&pring->txq);
11819		INIT_LIST_HEAD(&pring->txcmplq);
11820		spin_lock_init(&pring->ring_lock);
11821	}
11822	spin_unlock_irq(&phba->hbalock);
11823}
11824
11825/**
11826 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11827 * @phba: Pointer to HBA context object.
11828 *
11829 * This routine flushes the mailbox command subsystem. It will unconditionally
11830 * flush all the mailbox commands in the three possible stages in the mailbox
11831 * command sub-system: pending mailbox command queue; the outstanding mailbox
11832 * command; and completed mailbox command queue. It is caller's responsibility
11833 * to make sure that the driver is in the proper state to flush the mailbox
11834 * command sub-system. Namely, the posting of mailbox commands into the
11835 * pending mailbox command queue from the various clients must be stopped;
11836 * either the HBA is in a state that it will never works on the outstanding
11837 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11838 * mailbox command has been completed.
11839 **/
11840static void
11841lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11842{
11843	LIST_HEAD(completions);
11844	struct lpfc_sli *psli = &phba->sli;
11845	LPFC_MBOXQ_t *pmb;
11846	unsigned long iflag;
11847
11848	/* Disable softirqs, including timers from obtaining phba->hbalock */
11849	local_bh_disable();
11850
11851	/* Flush all the mailbox commands in the mbox system */
11852	spin_lock_irqsave(&phba->hbalock, iflag);
11853
11854	/* The pending mailbox command queue */
11855	list_splice_init(&phba->sli.mboxq, &completions);
11856	/* The outstanding active mailbox command */
11857	if (psli->mbox_active) {
11858		list_add_tail(&psli->mbox_active->list, &completions);
11859		psli->mbox_active = NULL;
11860		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11861	}
11862	/* The completed mailbox command queue */
11863	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11864	spin_unlock_irqrestore(&phba->hbalock, iflag);
11865
11866	/* Enable softirqs again, done with phba->hbalock */
11867	local_bh_enable();
11868
11869	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11870	while (!list_empty(&completions)) {
11871		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11872		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11873		if (pmb->mbox_cmpl)
11874			pmb->mbox_cmpl(phba, pmb);
11875	}
11876}
11877
11878/**
11879 * lpfc_sli_host_down - Vport cleanup function
11880 * @vport: Pointer to virtual port object.
11881 *
11882 * lpfc_sli_host_down is called to clean up the resources
11883 * associated with a vport before destroying virtual
11884 * port data structures.
11885 * This function does following operations:
11886 * - Free discovery resources associated with this virtual
11887 *   port.
11888 * - Free iocbs associated with this virtual port in
11889 *   the txq.
11890 * - Send abort for all iocb commands associated with this
11891 *   vport in txcmplq.
11892 *
11893 * This function is called with no lock held and always returns 1.
11894 **/
11895int
11896lpfc_sli_host_down(struct lpfc_vport *vport)
11897{
11898	LIST_HEAD(completions);
11899	struct lpfc_hba *phba = vport->phba;
11900	struct lpfc_sli *psli = &phba->sli;
11901	struct lpfc_queue *qp = NULL;
11902	struct lpfc_sli_ring *pring;
11903	struct lpfc_iocbq *iocb, *next_iocb;
11904	int i;
11905	unsigned long flags = 0;
11906	uint16_t prev_pring_flag;
11907
11908	lpfc_cleanup_discovery_resources(vport);
11909
11910	spin_lock_irqsave(&phba->hbalock, flags);
11911
11912	/*
11913	 * Error everything on the txq since these iocbs
11914	 * have not been given to the FW yet.
11915	 * Also issue ABTS for everything on the txcmplq
11916	 */
11917	if (phba->sli_rev != LPFC_SLI_REV4) {
11918		for (i = 0; i < psli->num_rings; i++) {
11919			pring = &psli->sli3_ring[i];
11920			prev_pring_flag = pring->flag;
11921			/* Only slow rings */
11922			if (pring->ringno == LPFC_ELS_RING) {
11923				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11924				/* Set the lpfc data pending flag */
11925				set_bit(LPFC_DATA_READY, &phba->data_flags);
11926			}
11927			list_for_each_entry_safe(iocb, next_iocb,
11928						 &pring->txq, list) {
11929				if (iocb->vport != vport)
11930					continue;
11931				list_move_tail(&iocb->list, &completions);
11932			}
11933			list_for_each_entry_safe(iocb, next_iocb,
11934						 &pring->txcmplq, list) {
11935				if (iocb->vport != vport)
11936					continue;
11937				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11938							   NULL);
11939			}
11940			pring->flag = prev_pring_flag;
11941		}
11942	} else {
11943		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11944			pring = qp->pring;
11945			if (!pring)
11946				continue;
11947			if (pring == phba->sli4_hba.els_wq->pring) {
11948				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11949				/* Set the lpfc data pending flag */
11950				set_bit(LPFC_DATA_READY, &phba->data_flags);
11951			}
11952			prev_pring_flag = pring->flag;
11953			spin_lock(&pring->ring_lock);
11954			list_for_each_entry_safe(iocb, next_iocb,
11955						 &pring->txq, list) {
11956				if (iocb->vport != vport)
11957					continue;
11958				list_move_tail(&iocb->list, &completions);
11959			}
11960			spin_unlock(&pring->ring_lock);
11961			list_for_each_entry_safe(iocb, next_iocb,
11962						 &pring->txcmplq, list) {
11963				if (iocb->vport != vport)
11964					continue;
11965				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11966							   NULL);
11967			}
11968			pring->flag = prev_pring_flag;
11969		}
11970	}
11971	spin_unlock_irqrestore(&phba->hbalock, flags);
11972
11973	/* Make sure HBA is alive */
11974	lpfc_issue_hb_tmo(phba);
11975
11976	/* Cancel all the IOCBs from the completions list */
11977	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11978			      IOERR_SLI_DOWN);
11979	return 1;
11980}
11981
11982/**
11983 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11984 * @phba: Pointer to HBA context object.
11985 *
11986 * This function cleans up all iocb, buffers, mailbox commands
11987 * while shutting down the HBA. This function is called with no
11988 * lock held and always returns 1.
11989 * This function does the following to cleanup driver resources:
11990 * - Free discovery resources for each virtual port
11991 * - Cleanup any pending fabric iocbs
11992 * - Iterate through the iocb txq and free each entry
11993 *   in the list.
11994 * - Free up any buffer posted to the HBA
11995 * - Free mailbox commands in the mailbox queue.
11996 **/
11997int
11998lpfc_sli_hba_down(struct lpfc_hba *phba)
11999{
12000	LIST_HEAD(completions);
12001	struct lpfc_sli *psli = &phba->sli;
12002	struct lpfc_queue *qp = NULL;
12003	struct lpfc_sli_ring *pring;
12004	struct lpfc_dmabuf *buf_ptr;
12005	unsigned long flags = 0;
12006	int i;
12007
12008	/* Shutdown the mailbox command sub-system */
12009	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
12010
12011	lpfc_hba_down_prep(phba);
12012
12013	/* Disable softirqs, including timers from obtaining phba->hbalock */
12014	local_bh_disable();
12015
12016	lpfc_fabric_abort_hba(phba);
12017
12018	spin_lock_irqsave(&phba->hbalock, flags);
12019
12020	/*
12021	 * Error everything on the txq since these iocbs
12022	 * have not been given to the FW yet.
12023	 */
12024	if (phba->sli_rev != LPFC_SLI_REV4) {
12025		for (i = 0; i < psli->num_rings; i++) {
12026			pring = &psli->sli3_ring[i];
12027			/* Only slow rings */
12028			if (pring->ringno == LPFC_ELS_RING) {
12029				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12030				/* Set the lpfc data pending flag */
12031				set_bit(LPFC_DATA_READY, &phba->data_flags);
12032			}
12033			list_splice_init(&pring->txq, &completions);
12034		}
12035	} else {
12036		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12037			pring = qp->pring;
12038			if (!pring)
12039				continue;
12040			spin_lock(&pring->ring_lock);
12041			list_splice_init(&pring->txq, &completions);
12042			spin_unlock(&pring->ring_lock);
12043			if (pring == phba->sli4_hba.els_wq->pring) {
12044				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12045				/* Set the lpfc data pending flag */
12046				set_bit(LPFC_DATA_READY, &phba->data_flags);
12047			}
12048		}
12049	}
12050	spin_unlock_irqrestore(&phba->hbalock, flags);
12051
12052	/* Cancel all the IOCBs from the completions list */
12053	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12054			      IOERR_SLI_DOWN);
12055
12056	spin_lock_irqsave(&phba->hbalock, flags);
12057	list_splice_init(&phba->elsbuf, &completions);
12058	phba->elsbuf_cnt = 0;
12059	phba->elsbuf_prev_cnt = 0;
12060	spin_unlock_irqrestore(&phba->hbalock, flags);
12061
12062	while (!list_empty(&completions)) {
12063		list_remove_head(&completions, buf_ptr,
12064			struct lpfc_dmabuf, list);
12065		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12066		kfree(buf_ptr);
12067	}
12068
12069	/* Enable softirqs again, done with phba->hbalock */
12070	local_bh_enable();
12071
12072	/* Return any active mbox cmds */
12073	del_timer_sync(&psli->mbox_tmo);
12074
12075	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12076	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12077	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12078
12079	return 1;
12080}
12081
12082/**
12083 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12084 * @srcp: Source memory pointer.
12085 * @destp: Destination memory pointer.
12086 * @cnt: Number of words required to be copied.
12087 *
12088 * This function is used for copying data between driver memory
12089 * and the SLI memory. This function also changes the endianness
12090 * of each word if native endianness is different from SLI
12091 * endianness. This function can be called with or without
12092 * lock.
12093 **/
12094void
12095lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12096{
12097	uint32_t *src = srcp;
12098	uint32_t *dest = destp;
12099	uint32_t ldata;
12100	int i;
12101
12102	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12103		ldata = *src;
12104		ldata = le32_to_cpu(ldata);
12105		*dest = ldata;
12106		src++;
12107		dest++;
12108	}
12109}
12110
12111
12112/**
12113 * lpfc_sli_bemem_bcopy - SLI memory copy function
12114 * @srcp: Source memory pointer.
12115 * @destp: Destination memory pointer.
12116 * @cnt: Number of words required to be copied.
12117 *
12118 * This function is used for copying data between a data structure
12119 * with big endian representation to local endianness.
12120 * This function can be called with or without lock.
12121 **/
12122void
12123lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12124{
12125	uint32_t *src = srcp;
12126	uint32_t *dest = destp;
12127	uint32_t ldata;
12128	int i;
12129
12130	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12131		ldata = *src;
12132		ldata = be32_to_cpu(ldata);
12133		*dest = ldata;
12134		src++;
12135		dest++;
12136	}
12137}
12138
12139/**
12140 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12141 * @phba: Pointer to HBA context object.
12142 * @pring: Pointer to driver SLI ring object.
12143 * @mp: Pointer to driver buffer object.
12144 *
12145 * This function is called with no lock held.
12146 * It always return zero after adding the buffer to the postbufq
12147 * buffer list.
12148 **/
12149int
12150lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12151			 struct lpfc_dmabuf *mp)
12152{
12153	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12154	   later */
12155	spin_lock_irq(&phba->hbalock);
12156	list_add_tail(&mp->list, &pring->postbufq);
12157	pring->postbufq_cnt++;
12158	spin_unlock_irq(&phba->hbalock);
12159	return 0;
12160}
12161
12162/**
12163 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12164 * @phba: Pointer to HBA context object.
12165 *
12166 * When HBQ is enabled, buffers are searched based on tags. This function
12167 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12168 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12169 * does not conflict with tags of buffer posted for unsolicited events.
12170 * The function returns the allocated tag. The function is called with
12171 * no locks held.
12172 **/
12173uint32_t
12174lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12175{
12176	spin_lock_irq(&phba->hbalock);
12177	phba->buffer_tag_count++;
12178	/*
12179	 * Always set the QUE_BUFTAG_BIT to distiguish between
12180	 * a tag assigned by HBQ.
12181	 */
12182	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12183	spin_unlock_irq(&phba->hbalock);
12184	return phba->buffer_tag_count;
12185}
12186
12187/**
12188 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12189 * @phba: Pointer to HBA context object.
12190 * @pring: Pointer to driver SLI ring object.
12191 * @tag: Buffer tag.
12192 *
12193 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12194 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12195 * iocb is posted to the response ring with the tag of the buffer.
12196 * This function searches the pring->postbufq list using the tag
12197 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12198 * iocb. If the buffer is found then lpfc_dmabuf object of the
12199 * buffer is returned to the caller else NULL is returned.
12200 * This function is called with no lock held.
12201 **/
12202struct lpfc_dmabuf *
12203lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12204			uint32_t tag)
12205{
12206	struct lpfc_dmabuf *mp, *next_mp;
12207	struct list_head *slp = &pring->postbufq;
12208
12209	/* Search postbufq, from the beginning, looking for a match on tag */
12210	spin_lock_irq(&phba->hbalock);
12211	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12212		if (mp->buffer_tag == tag) {
12213			list_del_init(&mp->list);
12214			pring->postbufq_cnt--;
12215			spin_unlock_irq(&phba->hbalock);
12216			return mp;
12217		}
12218	}
12219
12220	spin_unlock_irq(&phba->hbalock);
12221	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12222			"0402 Cannot find virtual addr for buffer tag on "
12223			"ring %d Data x%lx x%px x%px x%x\n",
12224			pring->ringno, (unsigned long) tag,
12225			slp->next, slp->prev, pring->postbufq_cnt);
12226
12227	return NULL;
12228}
12229
12230/**
12231 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12232 * @phba: Pointer to HBA context object.
12233 * @pring: Pointer to driver SLI ring object.
12234 * @phys: DMA address of the buffer.
12235 *
12236 * This function searches the buffer list using the dma_address
12237 * of unsolicited event to find the driver's lpfc_dmabuf object
12238 * corresponding to the dma_address. The function returns the
12239 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12240 * This function is called by the ct and els unsolicited event
12241 * handlers to get the buffer associated with the unsolicited
12242 * event.
12243 *
12244 * This function is called with no lock held.
12245 **/
12246struct lpfc_dmabuf *
12247lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12248			 dma_addr_t phys)
12249{
12250	struct lpfc_dmabuf *mp, *next_mp;
12251	struct list_head *slp = &pring->postbufq;
12252
12253	/* Search postbufq, from the beginning, looking for a match on phys */
12254	spin_lock_irq(&phba->hbalock);
12255	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12256		if (mp->phys == phys) {
12257			list_del_init(&mp->list);
12258			pring->postbufq_cnt--;
12259			spin_unlock_irq(&phba->hbalock);
12260			return mp;
12261		}
12262	}
12263
12264	spin_unlock_irq(&phba->hbalock);
12265	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12266			"0410 Cannot find virtual addr for mapped buf on "
12267			"ring %d Data x%llx x%px x%px x%x\n",
12268			pring->ringno, (unsigned long long)phys,
12269			slp->next, slp->prev, pring->postbufq_cnt);
12270	return NULL;
12271}
12272
12273/**
12274 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12275 * @phba: Pointer to HBA context object.
12276 * @cmdiocb: Pointer to driver command iocb object.
12277 * @rspiocb: Pointer to driver response iocb object.
12278 *
12279 * This function is the completion handler for the abort iocbs for
12280 * ELS commands. This function is called from the ELS ring event
12281 * handler with no lock held. This function frees memory resources
12282 * associated with the abort iocb.
12283 **/
12284static void
12285lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12286			struct lpfc_iocbq *rspiocb)
12287{
12288	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12289	u32 ulp_word4 = get_job_word4(phba, rspiocb);
12290	u8 cmnd = get_job_cmnd(phba, cmdiocb);
12291
12292	if (ulp_status) {
12293		/*
12294		 * Assume that the port already completed and returned, or
12295		 * will return the iocb. Just Log the message.
12296		 */
12297		if (phba->sli_rev < LPFC_SLI_REV4) {
12298			if (cmnd == CMD_ABORT_XRI_CX &&
12299			    ulp_status == IOSTAT_LOCAL_REJECT &&
12300			    ulp_word4 == IOERR_ABORT_REQUESTED) {
12301				goto release_iocb;
12302			}
12303		}
12304
12305		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12306				"0327 Cannot abort els iocb x%px "
12307				"with io cmd xri %x abort tag : x%x, "
12308				"abort status %x abort code %x\n",
12309				cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12310				(phba->sli_rev == LPFC_SLI_REV4) ?
12311				get_wqe_reqtag(cmdiocb) :
12312				cmdiocb->iocb.un.acxri.abortContextTag,
12313				ulp_status, ulp_word4);
12314
12315	}
12316release_iocb:
12317	lpfc_sli_release_iocbq(phba, cmdiocb);
12318	return;
12319}
12320
12321/**
12322 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12323 * @phba: Pointer to HBA context object.
12324 * @cmdiocb: Pointer to driver command iocb object.
12325 * @rspiocb: Pointer to driver response iocb object.
12326 *
12327 * The function is called from SLI ring event handler with no
12328 * lock held. This function is the completion handler for ELS commands
12329 * which are aborted. The function frees memory resources used for
12330 * the aborted ELS commands.
12331 **/
12332void
12333lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12334		     struct lpfc_iocbq *rspiocb)
12335{
12336	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12337	IOCB_t *irsp;
12338	LPFC_MBOXQ_t *mbox;
12339	u32 ulp_command, ulp_status, ulp_word4, iotag;
12340
12341	ulp_command = get_job_cmnd(phba, cmdiocb);
12342	ulp_status = get_job_ulpstatus(phba, rspiocb);
12343	ulp_word4 = get_job_word4(phba, rspiocb);
12344
12345	if (phba->sli_rev == LPFC_SLI_REV4) {
12346		iotag = get_wqe_reqtag(cmdiocb);
12347	} else {
12348		irsp = &rspiocb->iocb;
12349		iotag = irsp->ulpIoTag;
12350
12351		/* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12352		 * The MBX_REG_LOGIN64 mbox command is freed back to the
12353		 * mbox_mem_pool here.
12354		 */
12355		if (cmdiocb->context_un.mbox) {
12356			mbox = cmdiocb->context_un.mbox;
12357			lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12358			cmdiocb->context_un.mbox = NULL;
12359		}
12360	}
12361
12362	/* ELS cmd tag <ulpIoTag> completes */
12363	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12364			"0139 Ignoring ELS cmd code x%x completion Data: "
12365			"x%x x%x x%x x%px\n",
12366			ulp_command, ulp_status, ulp_word4, iotag,
12367			cmdiocb->ndlp);
12368	/*
12369	 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12370	 * if exchange is busy.
12371	 */
12372	if (ulp_command == CMD_GEN_REQUEST64_CR)
12373		lpfc_ct_free_iocb(phba, cmdiocb);
12374	else
12375		lpfc_els_free_iocb(phba, cmdiocb);
12376
12377	lpfc_nlp_put(ndlp);
12378}
12379
12380/**
12381 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12382 * @phba: Pointer to HBA context object.
12383 * @pring: Pointer to driver SLI ring object.
12384 * @cmdiocb: Pointer to driver command iocb object.
12385 * @cmpl: completion function.
12386 *
12387 * This function issues an abort iocb for the provided command iocb. In case
12388 * of unloading, the abort iocb will not be issued to commands on the ELS
12389 * ring. Instead, the callback function shall be changed to those commands
12390 * so that nothing happens when them finishes. This function is called with
12391 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12392 * when the command iocb is an abort request.
12393 *
12394 **/
12395int
12396lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12397			   struct lpfc_iocbq *cmdiocb, void *cmpl)
12398{
12399	struct lpfc_vport *vport = cmdiocb->vport;
12400	struct lpfc_iocbq *abtsiocbp;
12401	int retval = IOCB_ERROR;
12402	unsigned long iflags;
12403	struct lpfc_nodelist *ndlp = NULL;
12404	u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12405	u16 ulp_context, iotag;
12406	bool ia;
12407
12408	/*
12409	 * There are certain command types we don't want to abort.  And we
12410	 * don't want to abort commands that are already in the process of
12411	 * being aborted.
12412	 */
12413	if (ulp_command == CMD_ABORT_XRI_WQE ||
12414	    ulp_command == CMD_ABORT_XRI_CN ||
12415	    ulp_command == CMD_CLOSE_XRI_CN ||
12416	    cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12417		return IOCB_ABORTING;
12418
12419	if (!pring) {
12420		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12421			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12422		else
12423			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12424		return retval;
12425	}
12426
12427	/*
12428	 * If we're unloading, don't abort iocb on the ELS ring, but change
12429	 * the callback so that nothing happens when it finishes.
12430	 */
12431	if (test_bit(FC_UNLOADING, &vport->load_flag) &&
12432	    pring->ringno == LPFC_ELS_RING) {
12433		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12434			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12435		else
12436			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12437		return retval;
12438	}
12439
12440	/* issue ABTS for this IOCB based on iotag */
12441	abtsiocbp = __lpfc_sli_get_iocbq(phba);
12442	if (abtsiocbp == NULL)
12443		return IOCB_NORESOURCE;
12444
12445	/* This signals the response to set the correct status
12446	 * before calling the completion handler
12447	 */
12448	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12449
12450	if (phba->sli_rev == LPFC_SLI_REV4) {
12451		ulp_context = cmdiocb->sli4_xritag;
12452		iotag = abtsiocbp->iotag;
12453	} else {
12454		iotag = cmdiocb->iocb.ulpIoTag;
12455		if (pring->ringno == LPFC_ELS_RING) {
12456			ndlp = cmdiocb->ndlp;
12457			ulp_context = ndlp->nlp_rpi;
12458		} else {
12459			ulp_context = cmdiocb->iocb.ulpContext;
12460		}
12461	}
12462
12463	if (phba->link_state < LPFC_LINK_UP ||
12464	    (phba->sli_rev == LPFC_SLI_REV4 &&
12465	     phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12466	    (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12467		ia = true;
12468	else
12469		ia = false;
12470
12471	lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12472				cmdiocb->iocb.ulpClass,
12473				LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12474
12475	abtsiocbp->vport = vport;
12476
12477	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12478	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12479	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12480		abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12481
12482	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12483		abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12484
12485	if (cmpl)
12486		abtsiocbp->cmd_cmpl = cmpl;
12487	else
12488		abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12489	abtsiocbp->vport = vport;
12490
12491	if (phba->sli_rev == LPFC_SLI_REV4) {
12492		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12493		if (unlikely(pring == NULL))
12494			goto abort_iotag_exit;
12495		/* Note: both hbalock and ring_lock need to be set here */
12496		spin_lock_irqsave(&pring->ring_lock, iflags);
12497		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12498			abtsiocbp, 0);
12499		spin_unlock_irqrestore(&pring->ring_lock, iflags);
12500	} else {
12501		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12502			abtsiocbp, 0);
12503	}
12504
12505abort_iotag_exit:
12506
12507	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12508			 "0339 Abort IO XRI x%x, Original iotag x%x, "
12509			 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12510			 "retval x%x\n",
12511			 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12512			 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12513			 retval);
12514	if (retval) {
12515		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12516		__lpfc_sli_release_iocbq(phba, abtsiocbp);
12517	}
12518
12519	/*
12520	 * Caller to this routine should check for IOCB_ERROR
12521	 * and handle it properly.  This routine no longer removes
12522	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12523	 */
12524	return retval;
12525}
12526
12527/**
12528 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12529 * @phba: pointer to lpfc HBA data structure.
12530 *
12531 * This routine will abort all pending and outstanding iocbs to an HBA.
12532 **/
12533void
12534lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12535{
12536	struct lpfc_sli *psli = &phba->sli;
12537	struct lpfc_sli_ring *pring;
12538	struct lpfc_queue *qp = NULL;
12539	int i;
12540
12541	if (phba->sli_rev != LPFC_SLI_REV4) {
12542		for (i = 0; i < psli->num_rings; i++) {
12543			pring = &psli->sli3_ring[i];
12544			lpfc_sli_abort_iocb_ring(phba, pring);
12545		}
12546		return;
12547	}
12548	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12549		pring = qp->pring;
12550		if (!pring)
12551			continue;
12552		lpfc_sli_abort_iocb_ring(phba, pring);
12553	}
12554}
12555
12556/**
12557 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12558 * @iocbq: Pointer to iocb object.
12559 * @vport: Pointer to driver virtual port object.
12560 *
12561 * This function acts as an iocb filter for functions which abort FCP iocbs.
12562 *
12563 * Return values
12564 * -ENODEV, if a null iocb or vport ptr is encountered
12565 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12566 *          driver already started the abort process, or is an abort iocb itself
12567 * 0, passes criteria for aborting the FCP I/O iocb
12568 **/
12569static int
12570lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12571				     struct lpfc_vport *vport)
12572{
12573	u8 ulp_command;
12574
12575	/* No null ptr vports */
12576	if (!iocbq || iocbq->vport != vport)
12577		return -ENODEV;
12578
12579	/* iocb must be for FCP IO, already exists on the TX cmpl queue,
12580	 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12581	 */
12582	ulp_command = get_job_cmnd(vport->phba, iocbq);
12583	if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12584	    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12585	    (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12586	    (ulp_command == CMD_ABORT_XRI_CN ||
12587	     ulp_command == CMD_CLOSE_XRI_CN ||
12588	     ulp_command == CMD_ABORT_XRI_WQE))
12589		return -EINVAL;
12590
12591	return 0;
12592}
12593
12594/**
12595 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12596 * @iocbq: Pointer to driver iocb object.
12597 * @vport: Pointer to driver virtual port object.
12598 * @tgt_id: SCSI ID of the target.
12599 * @lun_id: LUN ID of the scsi device.
12600 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12601 *
12602 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12603 * host.
12604 *
12605 * It will return
12606 * 0 if the filtering criteria is met for the given iocb and will return
12607 * 1 if the filtering criteria is not met.
12608 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12609 * given iocb is for the SCSI device specified by vport, tgt_id and
12610 * lun_id parameter.
12611 * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12612 * given iocb is for the SCSI target specified by vport and tgt_id
12613 * parameters.
12614 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12615 * given iocb is for the SCSI host associated with the given vport.
12616 * This function is called with no locks held.
12617 **/
12618static int
12619lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12620			   uint16_t tgt_id, uint64_t lun_id,
12621			   lpfc_ctx_cmd ctx_cmd)
12622{
12623	struct lpfc_io_buf *lpfc_cmd;
12624	int rc = 1;
12625
12626	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12627
12628	if (lpfc_cmd->pCmd == NULL)
12629		return rc;
12630
12631	switch (ctx_cmd) {
12632	case LPFC_CTX_LUN:
12633		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12634		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12635		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12636			rc = 0;
12637		break;
12638	case LPFC_CTX_TGT:
12639		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12640		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12641			rc = 0;
12642		break;
12643	case LPFC_CTX_HOST:
12644		rc = 0;
12645		break;
12646	default:
12647		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12648			__func__, ctx_cmd);
12649		break;
12650	}
12651
12652	return rc;
12653}
12654
12655/**
12656 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12657 * @vport: Pointer to virtual port.
12658 * @tgt_id: SCSI ID of the target.
12659 * @lun_id: LUN ID of the scsi device.
12660 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12661 *
12662 * This function returns number of FCP commands pending for the vport.
12663 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12664 * commands pending on the vport associated with SCSI device specified
12665 * by tgt_id and lun_id parameters.
12666 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12667 * commands pending on the vport associated with SCSI target specified
12668 * by tgt_id parameter.
12669 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12670 * commands pending on the vport.
12671 * This function returns the number of iocbs which satisfy the filter.
12672 * This function is called without any lock held.
12673 **/
12674int
12675lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12676		  lpfc_ctx_cmd ctx_cmd)
12677{
12678	struct lpfc_hba *phba = vport->phba;
12679	struct lpfc_iocbq *iocbq;
12680	int sum, i;
12681	unsigned long iflags;
12682	u8 ulp_command;
12683
12684	spin_lock_irqsave(&phba->hbalock, iflags);
12685	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12686		iocbq = phba->sli.iocbq_lookup[i];
12687
12688		if (!iocbq || iocbq->vport != vport)
12689			continue;
12690		if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12691		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12692			continue;
12693
12694		/* Include counting outstanding aborts */
12695		ulp_command = get_job_cmnd(phba, iocbq);
12696		if (ulp_command == CMD_ABORT_XRI_CN ||
12697		    ulp_command == CMD_CLOSE_XRI_CN ||
12698		    ulp_command == CMD_ABORT_XRI_WQE) {
12699			sum++;
12700			continue;
12701		}
12702
12703		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12704					       ctx_cmd) == 0)
12705			sum++;
12706	}
12707	spin_unlock_irqrestore(&phba->hbalock, iflags);
12708
12709	return sum;
12710}
12711
12712/**
12713 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12714 * @phba: Pointer to HBA context object
12715 * @cmdiocb: Pointer to command iocb object.
12716 * @rspiocb: Pointer to response iocb object.
12717 *
12718 * This function is called when an aborted FCP iocb completes. This
12719 * function is called by the ring event handler with no lock held.
12720 * This function frees the iocb.
12721 **/
12722void
12723lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12724			struct lpfc_iocbq *rspiocb)
12725{
12726	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12727			"3096 ABORT_XRI_CX completing on rpi x%x "
12728			"original iotag x%x, abort cmd iotag x%x "
12729			"status 0x%x, reason 0x%x\n",
12730			(phba->sli_rev == LPFC_SLI_REV4) ?
12731			cmdiocb->sli4_xritag :
12732			cmdiocb->iocb.un.acxri.abortContextTag,
12733			get_job_abtsiotag(phba, cmdiocb),
12734			cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12735			get_job_word4(phba, rspiocb));
12736	lpfc_sli_release_iocbq(phba, cmdiocb);
12737	return;
12738}
12739
12740/**
12741 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12742 * @vport: Pointer to virtual port.
12743 * @tgt_id: SCSI ID of the target.
12744 * @lun_id: LUN ID of the scsi device.
12745 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12746 *
12747 * This function sends an abort command for every SCSI command
12748 * associated with the given virtual port pending on the ring
12749 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12750 * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12751 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12752 * followed by lpfc_sli_validate_fcp_iocb.
12753 *
12754 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12755 * FCP iocbs associated with lun specified by tgt_id and lun_id
12756 * parameters
12757 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12758 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12759 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12760 * FCP iocbs associated with virtual port.
12761 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12762 * lpfc_sli4_calc_ring is used.
12763 * This function returns number of iocbs it failed to abort.
12764 * This function is called with no locks held.
12765 **/
12766int
12767lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12768		    lpfc_ctx_cmd abort_cmd)
12769{
12770	struct lpfc_hba *phba = vport->phba;
12771	struct lpfc_sli_ring *pring = NULL;
12772	struct lpfc_iocbq *iocbq;
12773	int errcnt = 0, ret_val = 0;
12774	unsigned long iflags;
12775	int i;
12776
12777	/* all I/Os are in process of being flushed */
12778	if (phba->hba_flag & HBA_IOQ_FLUSH)
12779		return errcnt;
12780
12781	for (i = 1; i <= phba->sli.last_iotag; i++) {
12782		iocbq = phba->sli.iocbq_lookup[i];
12783
12784		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12785			continue;
12786
12787		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12788					       abort_cmd) != 0)
12789			continue;
12790
12791		spin_lock_irqsave(&phba->hbalock, iflags);
12792		if (phba->sli_rev == LPFC_SLI_REV3) {
12793			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12794		} else if (phba->sli_rev == LPFC_SLI_REV4) {
12795			pring = lpfc_sli4_calc_ring(phba, iocbq);
12796		}
12797		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12798						     lpfc_sli_abort_fcp_cmpl);
12799		spin_unlock_irqrestore(&phba->hbalock, iflags);
12800		if (ret_val != IOCB_SUCCESS)
12801			errcnt++;
12802	}
12803
12804	return errcnt;
12805}
12806
12807/**
12808 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12809 * @vport: Pointer to virtual port.
12810 * @pring: Pointer to driver SLI ring object.
12811 * @tgt_id: SCSI ID of the target.
12812 * @lun_id: LUN ID of the scsi device.
12813 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12814 *
12815 * This function sends an abort command for every SCSI command
12816 * associated with the given virtual port pending on the ring
12817 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12818 * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12819 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12820 * followed by lpfc_sli_validate_fcp_iocb.
12821 *
12822 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12823 * FCP iocbs associated with lun specified by tgt_id and lun_id
12824 * parameters
12825 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12826 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12827 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12828 * FCP iocbs associated with virtual port.
12829 * This function returns number of iocbs it aborted .
12830 * This function is called with no locks held right after a taskmgmt
12831 * command is sent.
12832 **/
12833int
12834lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12835			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12836{
12837	struct lpfc_hba *phba = vport->phba;
12838	struct lpfc_io_buf *lpfc_cmd;
12839	struct lpfc_iocbq *abtsiocbq;
12840	struct lpfc_nodelist *ndlp = NULL;
12841	struct lpfc_iocbq *iocbq;
12842	int sum, i, ret_val;
12843	unsigned long iflags;
12844	struct lpfc_sli_ring *pring_s4 = NULL;
12845	u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12846	bool ia;
12847
12848	spin_lock_irqsave(&phba->hbalock, iflags);
12849
12850	/* all I/Os are in process of being flushed */
12851	if (phba->hba_flag & HBA_IOQ_FLUSH) {
12852		spin_unlock_irqrestore(&phba->hbalock, iflags);
12853		return 0;
12854	}
12855	sum = 0;
12856
12857	for (i = 1; i <= phba->sli.last_iotag; i++) {
12858		iocbq = phba->sli.iocbq_lookup[i];
12859
12860		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12861			continue;
12862
12863		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12864					       cmd) != 0)
12865			continue;
12866
12867		/* Guard against IO completion being called at same time */
12868		lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12869		spin_lock(&lpfc_cmd->buf_lock);
12870
12871		if (!lpfc_cmd->pCmd) {
12872			spin_unlock(&lpfc_cmd->buf_lock);
12873			continue;
12874		}
12875
12876		if (phba->sli_rev == LPFC_SLI_REV4) {
12877			pring_s4 =
12878			    phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12879			if (!pring_s4) {
12880				spin_unlock(&lpfc_cmd->buf_lock);
12881				continue;
12882			}
12883			/* Note: both hbalock and ring_lock must be set here */
12884			spin_lock(&pring_s4->ring_lock);
12885		}
12886
12887		/*
12888		 * If the iocbq is already being aborted, don't take a second
12889		 * action, but do count it.
12890		 */
12891		if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12892		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12893			if (phba->sli_rev == LPFC_SLI_REV4)
12894				spin_unlock(&pring_s4->ring_lock);
12895			spin_unlock(&lpfc_cmd->buf_lock);
12896			continue;
12897		}
12898
12899		/* issue ABTS for this IOCB based on iotag */
12900		abtsiocbq = __lpfc_sli_get_iocbq(phba);
12901		if (!abtsiocbq) {
12902			if (phba->sli_rev == LPFC_SLI_REV4)
12903				spin_unlock(&pring_s4->ring_lock);
12904			spin_unlock(&lpfc_cmd->buf_lock);
12905			continue;
12906		}
12907
12908		if (phba->sli_rev == LPFC_SLI_REV4) {
12909			iotag = abtsiocbq->iotag;
12910			ulp_context = iocbq->sli4_xritag;
12911			cqid = lpfc_cmd->hdwq->io_cq_map;
12912		} else {
12913			iotag = iocbq->iocb.ulpIoTag;
12914			if (pring->ringno == LPFC_ELS_RING) {
12915				ndlp = iocbq->ndlp;
12916				ulp_context = ndlp->nlp_rpi;
12917			} else {
12918				ulp_context = iocbq->iocb.ulpContext;
12919			}
12920		}
12921
12922		ndlp = lpfc_cmd->rdata->pnode;
12923
12924		if (lpfc_is_link_up(phba) &&
12925		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12926		    !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12927			ia = false;
12928		else
12929			ia = true;
12930
12931		lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12932					iocbq->iocb.ulpClass, cqid,
12933					ia, false);
12934
12935		abtsiocbq->vport = vport;
12936
12937		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12938		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12939		if (iocbq->cmd_flag & LPFC_IO_FCP)
12940			abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12941		if (iocbq->cmd_flag & LPFC_IO_FOF)
12942			abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12943
12944		/* Setup callback routine and issue the command. */
12945		abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12946
12947		/*
12948		 * Indicate the IO is being aborted by the driver and set
12949		 * the caller's flag into the aborted IO.
12950		 */
12951		iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12952
12953		if (phba->sli_rev == LPFC_SLI_REV4) {
12954			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12955							abtsiocbq, 0);
12956			spin_unlock(&pring_s4->ring_lock);
12957		} else {
12958			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12959							abtsiocbq, 0);
12960		}
12961
12962		spin_unlock(&lpfc_cmd->buf_lock);
12963
12964		if (ret_val == IOCB_ERROR)
12965			__lpfc_sli_release_iocbq(phba, abtsiocbq);
12966		else
12967			sum++;
12968	}
12969	spin_unlock_irqrestore(&phba->hbalock, iflags);
12970	return sum;
12971}
12972
12973/**
12974 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12975 * @phba: Pointer to HBA context object.
12976 * @cmdiocbq: Pointer to command iocb.
12977 * @rspiocbq: Pointer to response iocb.
12978 *
12979 * This function is the completion handler for iocbs issued using
12980 * lpfc_sli_issue_iocb_wait function. This function is called by the
12981 * ring event handler function without any lock held. This function
12982 * can be called from both worker thread context and interrupt
12983 * context. This function also can be called from other thread which
12984 * cleans up the SLI layer objects.
12985 * This function copy the contents of the response iocb to the
12986 * response iocb memory object provided by the caller of
12987 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12988 * sleeps for the iocb completion.
12989 **/
12990static void
12991lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12992			struct lpfc_iocbq *cmdiocbq,
12993			struct lpfc_iocbq *rspiocbq)
12994{
12995	wait_queue_head_t *pdone_q;
12996	unsigned long iflags;
12997	struct lpfc_io_buf *lpfc_cmd;
12998	size_t offset = offsetof(struct lpfc_iocbq, wqe);
12999
13000	spin_lock_irqsave(&phba->hbalock, iflags);
13001	if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
13002
13003		/*
13004		 * A time out has occurred for the iocb.  If a time out
13005		 * completion handler has been supplied, call it.  Otherwise,
13006		 * just free the iocbq.
13007		 */
13008
13009		spin_unlock_irqrestore(&phba->hbalock, iflags);
13010		cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
13011		cmdiocbq->wait_cmd_cmpl = NULL;
13012		if (cmdiocbq->cmd_cmpl)
13013			cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13014		else
13015			lpfc_sli_release_iocbq(phba, cmdiocbq);
13016		return;
13017	}
13018
13019	/* Copy the contents of the local rspiocb into the caller's buffer. */
13020	cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13021	if (cmdiocbq->rsp_iocb && rspiocbq)
13022		memcpy((char *)cmdiocbq->rsp_iocb + offset,
13023		       (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13024
13025	/* Set the exchange busy flag for task management commands */
13026	if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13027	    !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13028		lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13029					cur_iocbq);
13030		if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13031			lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13032		else
13033			lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13034	}
13035
13036	pdone_q = cmdiocbq->context_un.wait_queue;
13037	if (pdone_q)
13038		wake_up(pdone_q);
13039	spin_unlock_irqrestore(&phba->hbalock, iflags);
13040	return;
13041}
13042
13043/**
13044 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13045 * @phba: Pointer to HBA context object..
13046 * @piocbq: Pointer to command iocb.
13047 * @flag: Flag to test.
13048 *
13049 * This routine grabs the hbalock and then test the cmd_flag to
13050 * see if the passed in flag is set.
13051 * Returns:
13052 * 1 if flag is set.
13053 * 0 if flag is not set.
13054 **/
13055static int
13056lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13057		 struct lpfc_iocbq *piocbq, uint32_t flag)
13058{
13059	unsigned long iflags;
13060	int ret;
13061
13062	spin_lock_irqsave(&phba->hbalock, iflags);
13063	ret = piocbq->cmd_flag & flag;
13064	spin_unlock_irqrestore(&phba->hbalock, iflags);
13065	return ret;
13066
13067}
13068
13069/**
13070 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13071 * @phba: Pointer to HBA context object..
13072 * @ring_number: Ring number
13073 * @piocb: Pointer to command iocb.
13074 * @prspiocbq: Pointer to response iocb.
13075 * @timeout: Timeout in number of seconds.
13076 *
13077 * This function issues the iocb to firmware and waits for the
13078 * iocb to complete. The cmd_cmpl field of the shall be used
13079 * to handle iocbs which time out. If the field is NULL, the
13080 * function shall free the iocbq structure.  If more clean up is
13081 * needed, the caller is expected to provide a completion function
13082 * that will provide the needed clean up.  If the iocb command is
13083 * not completed within timeout seconds, the function will either
13084 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13085 * completion function set in the cmd_cmpl field and then return
13086 * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
13087 * resources if this function returns IOCB_TIMEDOUT.
13088 * The function waits for the iocb completion using an
13089 * non-interruptible wait.
13090 * This function will sleep while waiting for iocb completion.
13091 * So, this function should not be called from any context which
13092 * does not allow sleeping. Due to the same reason, this function
13093 * cannot be called with interrupt disabled.
13094 * This function assumes that the iocb completions occur while
13095 * this function sleep. So, this function cannot be called from
13096 * the thread which process iocb completion for this ring.
13097 * This function clears the cmd_flag of the iocb object before
13098 * issuing the iocb and the iocb completion handler sets this
13099 * flag and wakes this thread when the iocb completes.
13100 * The contents of the response iocb will be copied to prspiocbq
13101 * by the completion handler when the command completes.
13102 * This function returns IOCB_SUCCESS when success.
13103 * This function is called with no lock held.
13104 **/
13105int
13106lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13107			 uint32_t ring_number,
13108			 struct lpfc_iocbq *piocb,
13109			 struct lpfc_iocbq *prspiocbq,
13110			 uint32_t timeout)
13111{
13112	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13113	long timeleft, timeout_req = 0;
13114	int retval = IOCB_SUCCESS;
13115	uint32_t creg_val;
13116	struct lpfc_iocbq *iocb;
13117	int txq_cnt = 0;
13118	int txcmplq_cnt = 0;
13119	struct lpfc_sli_ring *pring;
13120	unsigned long iflags;
13121	bool iocb_completed = true;
13122
13123	if (phba->sli_rev >= LPFC_SLI_REV4) {
13124		lpfc_sli_prep_wqe(phba, piocb);
13125
13126		pring = lpfc_sli4_calc_ring(phba, piocb);
13127	} else
13128		pring = &phba->sli.sli3_ring[ring_number];
13129	/*
13130	 * If the caller has provided a response iocbq buffer, then rsp_iocb
13131	 * is NULL or its an error.
13132	 */
13133	if (prspiocbq) {
13134		if (piocb->rsp_iocb)
13135			return IOCB_ERROR;
13136		piocb->rsp_iocb = prspiocbq;
13137	}
13138
13139	piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13140	piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13141	piocb->context_un.wait_queue = &done_q;
13142	piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13143
13144	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13145		if (lpfc_readl(phba->HCregaddr, &creg_val))
13146			return IOCB_ERROR;
13147		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13148		writel(creg_val, phba->HCregaddr);
13149		readl(phba->HCregaddr); /* flush */
13150	}
13151
13152	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13153				     SLI_IOCB_RET_IOCB);
13154	if (retval == IOCB_SUCCESS) {
13155		timeout_req = msecs_to_jiffies(timeout * 1000);
13156		timeleft = wait_event_timeout(done_q,
13157				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13158				timeout_req);
13159		spin_lock_irqsave(&phba->hbalock, iflags);
13160		if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13161
13162			/*
13163			 * IOCB timed out.  Inform the wake iocb wait
13164			 * completion function and set local status
13165			 */
13166
13167			iocb_completed = false;
13168			piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13169		}
13170		spin_unlock_irqrestore(&phba->hbalock, iflags);
13171		if (iocb_completed) {
13172			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13173					"0331 IOCB wake signaled\n");
13174			/* Note: we are not indicating if the IOCB has a success
13175			 * status or not - that's for the caller to check.
13176			 * IOCB_SUCCESS means just that the command was sent and
13177			 * completed. Not that it completed successfully.
13178			 * */
13179		} else if (timeleft == 0) {
13180			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13181					"0338 IOCB wait timeout error - no "
13182					"wake response Data x%x\n", timeout);
13183			retval = IOCB_TIMEDOUT;
13184		} else {
13185			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13186					"0330 IOCB wake NOT set, "
13187					"Data x%x x%lx\n",
13188					timeout, (timeleft / jiffies));
13189			retval = IOCB_TIMEDOUT;
13190		}
13191	} else if (retval == IOCB_BUSY) {
13192		if (phba->cfg_log_verbose & LOG_SLI) {
13193			list_for_each_entry(iocb, &pring->txq, list) {
13194				txq_cnt++;
13195			}
13196			list_for_each_entry(iocb, &pring->txcmplq, list) {
13197				txcmplq_cnt++;
13198			}
13199			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13200				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13201				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13202		}
13203		return retval;
13204	} else {
13205		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13206				"0332 IOCB wait issue failed, Data x%x\n",
13207				retval);
13208		retval = IOCB_ERROR;
13209	}
13210
13211	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13212		if (lpfc_readl(phba->HCregaddr, &creg_val))
13213			return IOCB_ERROR;
13214		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13215		writel(creg_val, phba->HCregaddr);
13216		readl(phba->HCregaddr); /* flush */
13217	}
13218
13219	if (prspiocbq)
13220		piocb->rsp_iocb = NULL;
13221
13222	piocb->context_un.wait_queue = NULL;
13223	piocb->cmd_cmpl = NULL;
13224	return retval;
13225}
13226
13227/**
13228 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13229 * @phba: Pointer to HBA context object.
13230 * @pmboxq: Pointer to driver mailbox object.
13231 * @timeout: Timeout in number of seconds.
13232 *
13233 * This function issues the mailbox to firmware and waits for the
13234 * mailbox command to complete. If the mailbox command is not
13235 * completed within timeout seconds, it returns MBX_TIMEOUT.
13236 * The function waits for the mailbox completion using an
13237 * interruptible wait. If the thread is woken up due to a
13238 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13239 * should not free the mailbox resources, if this function returns
13240 * MBX_TIMEOUT.
13241 * This function will sleep while waiting for mailbox completion.
13242 * So, this function should not be called from any context which
13243 * does not allow sleeping. Due to the same reason, this function
13244 * cannot be called with interrupt disabled.
13245 * This function assumes that the mailbox completion occurs while
13246 * this function sleep. So, this function cannot be called from
13247 * the worker thread which processes mailbox completion.
13248 * This function is called in the context of HBA management
13249 * applications.
13250 * This function returns MBX_SUCCESS when successful.
13251 * This function is called with no lock held.
13252 **/
13253int
13254lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13255			 uint32_t timeout)
13256{
13257	struct completion mbox_done;
13258	int retval;
13259	unsigned long flag;
13260
13261	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13262	/* setup wake call as IOCB callback */
13263	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13264
13265	/* setup ctx_u field to pass wait_queue pointer to wake function  */
13266	init_completion(&mbox_done);
13267	pmboxq->ctx_u.mbox_wait = &mbox_done;
13268	/* now issue the command */
13269	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13270	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13271		wait_for_completion_timeout(&mbox_done,
13272					    msecs_to_jiffies(timeout * 1000));
13273
13274		spin_lock_irqsave(&phba->hbalock, flag);
13275		pmboxq->ctx_u.mbox_wait = NULL;
13276		/*
13277		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13278		 * else do not free the resources.
13279		 */
13280		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13281			retval = MBX_SUCCESS;
13282		} else {
13283			retval = MBX_TIMEOUT;
13284			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13285		}
13286		spin_unlock_irqrestore(&phba->hbalock, flag);
13287	}
13288	return retval;
13289}
13290
13291/**
13292 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13293 * @phba: Pointer to HBA context.
13294 * @mbx_action: Mailbox shutdown options.
13295 *
13296 * This function is called to shutdown the driver's mailbox sub-system.
13297 * It first marks the mailbox sub-system is in a block state to prevent
13298 * the asynchronous mailbox command from issued off the pending mailbox
13299 * command queue. If the mailbox command sub-system shutdown is due to
13300 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13301 * the mailbox sub-system flush routine to forcefully bring down the
13302 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13303 * as with offline or HBA function reset), this routine will wait for the
13304 * outstanding mailbox command to complete before invoking the mailbox
13305 * sub-system flush routine to gracefully bring down mailbox sub-system.
13306 **/
13307void
13308lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13309{
13310	struct lpfc_sli *psli = &phba->sli;
13311	unsigned long timeout;
13312
13313	if (mbx_action == LPFC_MBX_NO_WAIT) {
13314		/* delay 100ms for port state */
13315		msleep(100);
13316		lpfc_sli_mbox_sys_flush(phba);
13317		return;
13318	}
13319	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13320
13321	/* Disable softirqs, including timers from obtaining phba->hbalock */
13322	local_bh_disable();
13323
13324	spin_lock_irq(&phba->hbalock);
13325	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13326
13327	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13328		/* Determine how long we might wait for the active mailbox
13329		 * command to be gracefully completed by firmware.
13330		 */
13331		if (phba->sli.mbox_active)
13332			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13333						phba->sli.mbox_active) *
13334						1000) + jiffies;
13335		spin_unlock_irq(&phba->hbalock);
13336
13337		/* Enable softirqs again, done with phba->hbalock */
13338		local_bh_enable();
13339
13340		while (phba->sli.mbox_active) {
13341			/* Check active mailbox complete status every 2ms */
13342			msleep(2);
13343			if (time_after(jiffies, timeout))
13344				/* Timeout, let the mailbox flush routine to
13345				 * forcefully release active mailbox command
13346				 */
13347				break;
13348		}
13349	} else {
13350		spin_unlock_irq(&phba->hbalock);
13351
13352		/* Enable softirqs again, done with phba->hbalock */
13353		local_bh_enable();
13354	}
13355
13356	lpfc_sli_mbox_sys_flush(phba);
13357}
13358
13359/**
13360 * lpfc_sli_eratt_read - read sli-3 error attention events
13361 * @phba: Pointer to HBA context.
13362 *
13363 * This function is called to read the SLI3 device error attention registers
13364 * for possible error attention events. The caller must hold the hostlock
13365 * with spin_lock_irq().
13366 *
13367 * This function returns 1 when there is Error Attention in the Host Attention
13368 * Register and returns 0 otherwise.
13369 **/
13370static int
13371lpfc_sli_eratt_read(struct lpfc_hba *phba)
13372{
13373	uint32_t ha_copy;
13374
13375	/* Read chip Host Attention (HA) register */
13376	if (lpfc_readl(phba->HAregaddr, &ha_copy))
13377		goto unplug_err;
13378
13379	if (ha_copy & HA_ERATT) {
13380		/* Read host status register to retrieve error event */
13381		if (lpfc_sli_read_hs(phba))
13382			goto unplug_err;
13383
13384		/* Check if there is a deferred error condition is active */
13385		if ((HS_FFER1 & phba->work_hs) &&
13386		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13387		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13388			phba->hba_flag |= DEFER_ERATT;
13389			/* Clear all interrupt enable conditions */
13390			writel(0, phba->HCregaddr);
13391			readl(phba->HCregaddr);
13392		}
13393
13394		/* Set the driver HA work bitmap */
13395		phba->work_ha |= HA_ERATT;
13396		/* Indicate polling handles this ERATT */
13397		phba->hba_flag |= HBA_ERATT_HANDLED;
13398		return 1;
13399	}
13400	return 0;
13401
13402unplug_err:
13403	/* Set the driver HS work bitmap */
13404	phba->work_hs |= UNPLUG_ERR;
13405	/* Set the driver HA work bitmap */
13406	phba->work_ha |= HA_ERATT;
13407	/* Indicate polling handles this ERATT */
13408	phba->hba_flag |= HBA_ERATT_HANDLED;
13409	return 1;
13410}
13411
13412/**
13413 * lpfc_sli4_eratt_read - read sli-4 error attention events
13414 * @phba: Pointer to HBA context.
13415 *
13416 * This function is called to read the SLI4 device error attention registers
13417 * for possible error attention events. The caller must hold the hostlock
13418 * with spin_lock_irq().
13419 *
13420 * This function returns 1 when there is Error Attention in the Host Attention
13421 * Register and returns 0 otherwise.
13422 **/
13423static int
13424lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13425{
13426	uint32_t uerr_sta_hi, uerr_sta_lo;
13427	uint32_t if_type, portsmphr;
13428	struct lpfc_register portstat_reg;
13429	u32 logmask;
13430
13431	/*
13432	 * For now, use the SLI4 device internal unrecoverable error
13433	 * registers for error attention. This can be changed later.
13434	 */
13435	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13436	switch (if_type) {
13437	case LPFC_SLI_INTF_IF_TYPE_0:
13438		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13439			&uerr_sta_lo) ||
13440			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13441			&uerr_sta_hi)) {
13442			phba->work_hs |= UNPLUG_ERR;
13443			phba->work_ha |= HA_ERATT;
13444			phba->hba_flag |= HBA_ERATT_HANDLED;
13445			return 1;
13446		}
13447		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13448		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13449			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13450					"1423 HBA Unrecoverable error: "
13451					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13452					"ue_mask_lo_reg=0x%x, "
13453					"ue_mask_hi_reg=0x%x\n",
13454					uerr_sta_lo, uerr_sta_hi,
13455					phba->sli4_hba.ue_mask_lo,
13456					phba->sli4_hba.ue_mask_hi);
13457			phba->work_status[0] = uerr_sta_lo;
13458			phba->work_status[1] = uerr_sta_hi;
13459			phba->work_ha |= HA_ERATT;
13460			phba->hba_flag |= HBA_ERATT_HANDLED;
13461			return 1;
13462		}
13463		break;
13464	case LPFC_SLI_INTF_IF_TYPE_2:
13465	case LPFC_SLI_INTF_IF_TYPE_6:
13466		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13467			&portstat_reg.word0) ||
13468			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13469			&portsmphr)){
13470			phba->work_hs |= UNPLUG_ERR;
13471			phba->work_ha |= HA_ERATT;
13472			phba->hba_flag |= HBA_ERATT_HANDLED;
13473			return 1;
13474		}
13475		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13476			phba->work_status[0] =
13477				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13478			phba->work_status[1] =
13479				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13480			logmask = LOG_TRACE_EVENT;
13481			if (phba->work_status[0] ==
13482				SLIPORT_ERR1_REG_ERR_CODE_2 &&
13483			    phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13484				logmask = LOG_SLI;
13485			lpfc_printf_log(phba, KERN_ERR, logmask,
13486					"2885 Port Status Event: "
13487					"port status reg 0x%x, "
13488					"port smphr reg 0x%x, "
13489					"error 1=0x%x, error 2=0x%x\n",
13490					portstat_reg.word0,
13491					portsmphr,
13492					phba->work_status[0],
13493					phba->work_status[1]);
13494			phba->work_ha |= HA_ERATT;
13495			phba->hba_flag |= HBA_ERATT_HANDLED;
13496			return 1;
13497		}
13498		break;
13499	case LPFC_SLI_INTF_IF_TYPE_1:
13500	default:
13501		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13502				"2886 HBA Error Attention on unsupported "
13503				"if type %d.", if_type);
13504		return 1;
13505	}
13506
13507	return 0;
13508}
13509
13510/**
13511 * lpfc_sli_check_eratt - check error attention events
13512 * @phba: Pointer to HBA context.
13513 *
13514 * This function is called from timer soft interrupt context to check HBA's
13515 * error attention register bit for error attention events.
13516 *
13517 * This function returns 1 when there is Error Attention in the Host Attention
13518 * Register and returns 0 otherwise.
13519 **/
13520int
13521lpfc_sli_check_eratt(struct lpfc_hba *phba)
13522{
13523	uint32_t ha_copy;
13524
13525	/* If somebody is waiting to handle an eratt, don't process it
13526	 * here. The brdkill function will do this.
13527	 */
13528	if (phba->link_flag & LS_IGNORE_ERATT)
13529		return 0;
13530
13531	/* Check if interrupt handler handles this ERATT */
13532	spin_lock_irq(&phba->hbalock);
13533	if (phba->hba_flag & HBA_ERATT_HANDLED) {
13534		/* Interrupt handler has handled ERATT */
13535		spin_unlock_irq(&phba->hbalock);
13536		return 0;
13537	}
13538
13539	/*
13540	 * If there is deferred error attention, do not check for error
13541	 * attention
13542	 */
13543	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13544		spin_unlock_irq(&phba->hbalock);
13545		return 0;
13546	}
13547
13548	/* If PCI channel is offline, don't process it */
13549	if (unlikely(pci_channel_offline(phba->pcidev))) {
13550		spin_unlock_irq(&phba->hbalock);
13551		return 0;
13552	}
13553
13554	switch (phba->sli_rev) {
13555	case LPFC_SLI_REV2:
13556	case LPFC_SLI_REV3:
13557		/* Read chip Host Attention (HA) register */
13558		ha_copy = lpfc_sli_eratt_read(phba);
13559		break;
13560	case LPFC_SLI_REV4:
13561		/* Read device Uncoverable Error (UERR) registers */
13562		ha_copy = lpfc_sli4_eratt_read(phba);
13563		break;
13564	default:
13565		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13566				"0299 Invalid SLI revision (%d)\n",
13567				phba->sli_rev);
13568		ha_copy = 0;
13569		break;
13570	}
13571	spin_unlock_irq(&phba->hbalock);
13572
13573	return ha_copy;
13574}
13575
13576/**
13577 * lpfc_intr_state_check - Check device state for interrupt handling
13578 * @phba: Pointer to HBA context.
13579 *
13580 * This inline routine checks whether a device or its PCI slot is in a state
13581 * that the interrupt should be handled.
13582 *
13583 * This function returns 0 if the device or the PCI slot is in a state that
13584 * interrupt should be handled, otherwise -EIO.
13585 */
13586static inline int
13587lpfc_intr_state_check(struct lpfc_hba *phba)
13588{
13589	/* If the pci channel is offline, ignore all the interrupts */
13590	if (unlikely(pci_channel_offline(phba->pcidev)))
13591		return -EIO;
13592
13593	/* Update device level interrupt statistics */
13594	phba->sli.slistat.sli_intr++;
13595
13596	/* Ignore all interrupts during initialization. */
13597	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13598		return -EIO;
13599
13600	return 0;
13601}
13602
13603/**
13604 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13605 * @irq: Interrupt number.
13606 * @dev_id: The device context pointer.
13607 *
13608 * This function is directly called from the PCI layer as an interrupt
13609 * service routine when device with SLI-3 interface spec is enabled with
13610 * MSI-X multi-message interrupt mode and there are slow-path events in
13611 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13612 * interrupt mode, this function is called as part of the device-level
13613 * interrupt handler. When the PCI slot is in error recovery or the HBA
13614 * is undergoing initialization, the interrupt handler will not process
13615 * the interrupt. The link attention and ELS ring attention events are
13616 * handled by the worker thread. The interrupt handler signals the worker
13617 * thread and returns for these events. This function is called without
13618 * any lock held. It gets the hbalock to access and update SLI data
13619 * structures.
13620 *
13621 * This function returns IRQ_HANDLED when interrupt is handled else it
13622 * returns IRQ_NONE.
13623 **/
13624irqreturn_t
13625lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13626{
13627	struct lpfc_hba  *phba;
13628	uint32_t ha_copy, hc_copy;
13629	uint32_t work_ha_copy;
13630	unsigned long status;
13631	unsigned long iflag;
13632	uint32_t control;
13633
13634	MAILBOX_t *mbox, *pmbox;
13635	struct lpfc_vport *vport;
13636	struct lpfc_nodelist *ndlp;
13637	struct lpfc_dmabuf *mp;
13638	LPFC_MBOXQ_t *pmb;
13639	int rc;
13640
13641	/*
13642	 * Get the driver's phba structure from the dev_id and
13643	 * assume the HBA is not interrupting.
13644	 */
13645	phba = (struct lpfc_hba *)dev_id;
13646
13647	if (unlikely(!phba))
13648		return IRQ_NONE;
13649
13650	/*
13651	 * Stuff needs to be attented to when this function is invoked as an
13652	 * individual interrupt handler in MSI-X multi-message interrupt mode
13653	 */
13654	if (phba->intr_type == MSIX) {
13655		/* Check device state for handling interrupt */
13656		if (lpfc_intr_state_check(phba))
13657			return IRQ_NONE;
13658		/* Need to read HA REG for slow-path events */
13659		spin_lock_irqsave(&phba->hbalock, iflag);
13660		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13661			goto unplug_error;
13662		/* If somebody is waiting to handle an eratt don't process it
13663		 * here. The brdkill function will do this.
13664		 */
13665		if (phba->link_flag & LS_IGNORE_ERATT)
13666			ha_copy &= ~HA_ERATT;
13667		/* Check the need for handling ERATT in interrupt handler */
13668		if (ha_copy & HA_ERATT) {
13669			if (phba->hba_flag & HBA_ERATT_HANDLED)
13670				/* ERATT polling has handled ERATT */
13671				ha_copy &= ~HA_ERATT;
13672			else
13673				/* Indicate interrupt handler handles ERATT */
13674				phba->hba_flag |= HBA_ERATT_HANDLED;
13675		}
13676
13677		/*
13678		 * If there is deferred error attention, do not check for any
13679		 * interrupt.
13680		 */
13681		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13682			spin_unlock_irqrestore(&phba->hbalock, iflag);
13683			return IRQ_NONE;
13684		}
13685
13686		/* Clear up only attention source related to slow-path */
13687		if (lpfc_readl(phba->HCregaddr, &hc_copy))
13688			goto unplug_error;
13689
13690		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13691			HC_LAINT_ENA | HC_ERINT_ENA),
13692			phba->HCregaddr);
13693		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13694			phba->HAregaddr);
13695		writel(hc_copy, phba->HCregaddr);
13696		readl(phba->HAregaddr); /* flush */
13697		spin_unlock_irqrestore(&phba->hbalock, iflag);
13698	} else
13699		ha_copy = phba->ha_copy;
13700
13701	work_ha_copy = ha_copy & phba->work_ha_mask;
13702
13703	if (work_ha_copy) {
13704		if (work_ha_copy & HA_LATT) {
13705			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13706				/*
13707				 * Turn off Link Attention interrupts
13708				 * until CLEAR_LA done
13709				 */
13710				spin_lock_irqsave(&phba->hbalock, iflag);
13711				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13712				if (lpfc_readl(phba->HCregaddr, &control))
13713					goto unplug_error;
13714				control &= ~HC_LAINT_ENA;
13715				writel(control, phba->HCregaddr);
13716				readl(phba->HCregaddr); /* flush */
13717				spin_unlock_irqrestore(&phba->hbalock, iflag);
13718			}
13719			else
13720				work_ha_copy &= ~HA_LATT;
13721		}
13722
13723		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13724			/*
13725			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13726			 * the only slow ring.
13727			 */
13728			status = (work_ha_copy &
13729				(HA_RXMASK  << (4*LPFC_ELS_RING)));
13730			status >>= (4*LPFC_ELS_RING);
13731			if (status & HA_RXMASK) {
13732				spin_lock_irqsave(&phba->hbalock, iflag);
13733				if (lpfc_readl(phba->HCregaddr, &control))
13734					goto unplug_error;
13735
13736				lpfc_debugfs_slow_ring_trc(phba,
13737				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13738				control, status,
13739				(uint32_t)phba->sli.slistat.sli_intr);
13740
13741				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13742					lpfc_debugfs_slow_ring_trc(phba,
13743						"ISR Disable ring:"
13744						"pwork:x%x hawork:x%x wait:x%x",
13745						phba->work_ha, work_ha_copy,
13746						(uint32_t)((unsigned long)
13747						&phba->work_waitq));
13748
13749					control &=
13750					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
13751					writel(control, phba->HCregaddr);
13752					readl(phba->HCregaddr); /* flush */
13753				}
13754				else {
13755					lpfc_debugfs_slow_ring_trc(phba,
13756						"ISR slow ring:   pwork:"
13757						"x%x hawork:x%x wait:x%x",
13758						phba->work_ha, work_ha_copy,
13759						(uint32_t)((unsigned long)
13760						&phba->work_waitq));
13761				}
13762				spin_unlock_irqrestore(&phba->hbalock, iflag);
13763			}
13764		}
13765		spin_lock_irqsave(&phba->hbalock, iflag);
13766		if (work_ha_copy & HA_ERATT) {
13767			if (lpfc_sli_read_hs(phba))
13768				goto unplug_error;
13769			/*
13770			 * Check if there is a deferred error condition
13771			 * is active
13772			 */
13773			if ((HS_FFER1 & phba->work_hs) &&
13774				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13775				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
13776				  phba->work_hs)) {
13777				phba->hba_flag |= DEFER_ERATT;
13778				/* Clear all interrupt enable conditions */
13779				writel(0, phba->HCregaddr);
13780				readl(phba->HCregaddr);
13781			}
13782		}
13783
13784		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13785			pmb = phba->sli.mbox_active;
13786			pmbox = &pmb->u.mb;
13787			mbox = phba->mbox;
13788			vport = pmb->vport;
13789
13790			/* First check out the status word */
13791			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13792			if (pmbox->mbxOwner != OWN_HOST) {
13793				spin_unlock_irqrestore(&phba->hbalock, iflag);
13794				/*
13795				 * Stray Mailbox Interrupt, mbxCommand <cmd>
13796				 * mbxStatus <status>
13797				 */
13798				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13799						"(%d):0304 Stray Mailbox "
13800						"Interrupt mbxCommand x%x "
13801						"mbxStatus x%x\n",
13802						(vport ? vport->vpi : 0),
13803						pmbox->mbxCommand,
13804						pmbox->mbxStatus);
13805				/* clear mailbox attention bit */
13806				work_ha_copy &= ~HA_MBATT;
13807			} else {
13808				phba->sli.mbox_active = NULL;
13809				spin_unlock_irqrestore(&phba->hbalock, iflag);
13810				phba->last_completion_time = jiffies;
13811				del_timer(&phba->sli.mbox_tmo);
13812				if (pmb->mbox_cmpl) {
13813					lpfc_sli_pcimem_bcopy(mbox, pmbox,
13814							MAILBOX_CMD_SIZE);
13815					if (pmb->out_ext_byte_len &&
13816						pmb->ext_buf)
13817						lpfc_sli_pcimem_bcopy(
13818						phba->mbox_ext,
13819						pmb->ext_buf,
13820						pmb->out_ext_byte_len);
13821				}
13822				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13823					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13824
13825					lpfc_debugfs_disc_trc(vport,
13826						LPFC_DISC_TRC_MBOX_VPORT,
13827						"MBOX dflt rpi: : "
13828						"status:x%x rpi:x%x",
13829						(uint32_t)pmbox->mbxStatus,
13830						pmbox->un.varWords[0], 0);
13831
13832					if (!pmbox->mbxStatus) {
13833						mp = pmb->ctx_buf;
13834						ndlp = pmb->ctx_ndlp;
13835
13836						/* Reg_LOGIN of dflt RPI was
13837						 * successful. new lets get
13838						 * rid of the RPI using the
13839						 * same mbox buffer.
13840						 */
13841						lpfc_unreg_login(phba,
13842							vport->vpi,
13843							pmbox->un.varWords[0],
13844							pmb);
13845						pmb->mbox_cmpl =
13846							lpfc_mbx_cmpl_dflt_rpi;
13847						pmb->ctx_buf = mp;
13848						pmb->ctx_ndlp = ndlp;
13849						pmb->vport = vport;
13850						rc = lpfc_sli_issue_mbox(phba,
13851								pmb,
13852								MBX_NOWAIT);
13853						if (rc != MBX_BUSY)
13854							lpfc_printf_log(phba,
13855							KERN_ERR,
13856							LOG_TRACE_EVENT,
13857							"0350 rc should have"
13858							"been MBX_BUSY\n");
13859						if (rc != MBX_NOT_FINISHED)
13860							goto send_current_mbox;
13861					}
13862				}
13863				spin_lock_irqsave(
13864						&phba->pport->work_port_lock,
13865						iflag);
13866				phba->pport->work_port_events &=
13867					~WORKER_MBOX_TMO;
13868				spin_unlock_irqrestore(
13869						&phba->pport->work_port_lock,
13870						iflag);
13871
13872				/* Do NOT queue MBX_HEARTBEAT to the worker
13873				 * thread for processing.
13874				 */
13875				if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13876					/* Process mbox now */
13877					phba->sli.mbox_active = NULL;
13878					phba->sli.sli_flag &=
13879						~LPFC_SLI_MBOX_ACTIVE;
13880					if (pmb->mbox_cmpl)
13881						pmb->mbox_cmpl(phba, pmb);
13882				} else {
13883					/* Queue to worker thread to process */
13884					lpfc_mbox_cmpl_put(phba, pmb);
13885				}
13886			}
13887		} else
13888			spin_unlock_irqrestore(&phba->hbalock, iflag);
13889
13890		if ((work_ha_copy & HA_MBATT) &&
13891		    (phba->sli.mbox_active == NULL)) {
13892send_current_mbox:
13893			/* Process next mailbox command if there is one */
13894			do {
13895				rc = lpfc_sli_issue_mbox(phba, NULL,
13896							 MBX_NOWAIT);
13897			} while (rc == MBX_NOT_FINISHED);
13898			if (rc != MBX_SUCCESS)
13899				lpfc_printf_log(phba, KERN_ERR,
13900						LOG_TRACE_EVENT,
13901						"0349 rc should be "
13902						"MBX_SUCCESS\n");
13903		}
13904
13905		spin_lock_irqsave(&phba->hbalock, iflag);
13906		phba->work_ha |= work_ha_copy;
13907		spin_unlock_irqrestore(&phba->hbalock, iflag);
13908		lpfc_worker_wake_up(phba);
13909	}
13910	return IRQ_HANDLED;
13911unplug_error:
13912	spin_unlock_irqrestore(&phba->hbalock, iflag);
13913	return IRQ_HANDLED;
13914
13915} /* lpfc_sli_sp_intr_handler */
13916
13917/**
13918 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13919 * @irq: Interrupt number.
13920 * @dev_id: The device context pointer.
13921 *
13922 * This function is directly called from the PCI layer as an interrupt
13923 * service routine when device with SLI-3 interface spec is enabled with
13924 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13925 * ring event in the HBA. However, when the device is enabled with either
13926 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13927 * device-level interrupt handler. When the PCI slot is in error recovery
13928 * or the HBA is undergoing initialization, the interrupt handler will not
13929 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13930 * the intrrupt context. This function is called without any lock held.
13931 * It gets the hbalock to access and update SLI data structures.
13932 *
13933 * This function returns IRQ_HANDLED when interrupt is handled else it
13934 * returns IRQ_NONE.
13935 **/
13936irqreturn_t
13937lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13938{
13939	struct lpfc_hba  *phba;
13940	uint32_t ha_copy;
13941	unsigned long status;
13942	unsigned long iflag;
13943	struct lpfc_sli_ring *pring;
13944
13945	/* Get the driver's phba structure from the dev_id and
13946	 * assume the HBA is not interrupting.
13947	 */
13948	phba = (struct lpfc_hba *) dev_id;
13949
13950	if (unlikely(!phba))
13951		return IRQ_NONE;
13952
13953	/*
13954	 * Stuff needs to be attented to when this function is invoked as an
13955	 * individual interrupt handler in MSI-X multi-message interrupt mode
13956	 */
13957	if (phba->intr_type == MSIX) {
13958		/* Check device state for handling interrupt */
13959		if (lpfc_intr_state_check(phba))
13960			return IRQ_NONE;
13961		/* Need to read HA REG for FCP ring and other ring events */
13962		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13963			return IRQ_HANDLED;
13964		/* Clear up only attention source related to fast-path */
13965		spin_lock_irqsave(&phba->hbalock, iflag);
13966		/*
13967		 * If there is deferred error attention, do not check for
13968		 * any interrupt.
13969		 */
13970		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13971			spin_unlock_irqrestore(&phba->hbalock, iflag);
13972			return IRQ_NONE;
13973		}
13974		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13975			phba->HAregaddr);
13976		readl(phba->HAregaddr); /* flush */
13977		spin_unlock_irqrestore(&phba->hbalock, iflag);
13978	} else
13979		ha_copy = phba->ha_copy;
13980
13981	/*
13982	 * Process all events on FCP ring. Take the optimized path for FCP IO.
13983	 */
13984	ha_copy &= ~(phba->work_ha_mask);
13985
13986	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13987	status >>= (4*LPFC_FCP_RING);
13988	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13989	if (status & HA_RXMASK)
13990		lpfc_sli_handle_fast_ring_event(phba, pring, status);
13991
13992	if (phba->cfg_multi_ring_support == 2) {
13993		/*
13994		 * Process all events on extra ring. Take the optimized path
13995		 * for extra ring IO.
13996		 */
13997		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13998		status >>= (4*LPFC_EXTRA_RING);
13999		if (status & HA_RXMASK) {
14000			lpfc_sli_handle_fast_ring_event(phba,
14001					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
14002					status);
14003		}
14004	}
14005	return IRQ_HANDLED;
14006}  /* lpfc_sli_fp_intr_handler */
14007
14008/**
14009 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
14010 * @irq: Interrupt number.
14011 * @dev_id: The device context pointer.
14012 *
14013 * This function is the HBA device-level interrupt handler to device with
14014 * SLI-3 interface spec, called from the PCI layer when either MSI or
14015 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14016 * requires driver attention. This function invokes the slow-path interrupt
14017 * attention handling function and fast-path interrupt attention handling
14018 * function in turn to process the relevant HBA attention events. This
14019 * function is called without any lock held. It gets the hbalock to access
14020 * and update SLI data structures.
14021 *
14022 * This function returns IRQ_HANDLED when interrupt is handled, else it
14023 * returns IRQ_NONE.
14024 **/
14025irqreturn_t
14026lpfc_sli_intr_handler(int irq, void *dev_id)
14027{
14028	struct lpfc_hba  *phba;
14029	irqreturn_t sp_irq_rc, fp_irq_rc;
14030	unsigned long status1, status2;
14031	uint32_t hc_copy;
14032
14033	/*
14034	 * Get the driver's phba structure from the dev_id and
14035	 * assume the HBA is not interrupting.
14036	 */
14037	phba = (struct lpfc_hba *) dev_id;
14038
14039	if (unlikely(!phba))
14040		return IRQ_NONE;
14041
14042	/* Check device state for handling interrupt */
14043	if (lpfc_intr_state_check(phba))
14044		return IRQ_NONE;
14045
14046	spin_lock(&phba->hbalock);
14047	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14048		spin_unlock(&phba->hbalock);
14049		return IRQ_HANDLED;
14050	}
14051
14052	if (unlikely(!phba->ha_copy)) {
14053		spin_unlock(&phba->hbalock);
14054		return IRQ_NONE;
14055	} else if (phba->ha_copy & HA_ERATT) {
14056		if (phba->hba_flag & HBA_ERATT_HANDLED)
14057			/* ERATT polling has handled ERATT */
14058			phba->ha_copy &= ~HA_ERATT;
14059		else
14060			/* Indicate interrupt handler handles ERATT */
14061			phba->hba_flag |= HBA_ERATT_HANDLED;
14062	}
14063
14064	/*
14065	 * If there is deferred error attention, do not check for any interrupt.
14066	 */
14067	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14068		spin_unlock(&phba->hbalock);
14069		return IRQ_NONE;
14070	}
14071
14072	/* Clear attention sources except link and error attentions */
14073	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14074		spin_unlock(&phba->hbalock);
14075		return IRQ_HANDLED;
14076	}
14077	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14078		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14079		phba->HCregaddr);
14080	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14081	writel(hc_copy, phba->HCregaddr);
14082	readl(phba->HAregaddr); /* flush */
14083	spin_unlock(&phba->hbalock);
14084
14085	/*
14086	 * Invokes slow-path host attention interrupt handling as appropriate.
14087	 */
14088
14089	/* status of events with mailbox and link attention */
14090	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14091
14092	/* status of events with ELS ring */
14093	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
14094	status2 >>= (4*LPFC_ELS_RING);
14095
14096	if (status1 || (status2 & HA_RXMASK))
14097		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14098	else
14099		sp_irq_rc = IRQ_NONE;
14100
14101	/*
14102	 * Invoke fast-path host attention interrupt handling as appropriate.
14103	 */
14104
14105	/* status of events with FCP ring */
14106	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14107	status1 >>= (4*LPFC_FCP_RING);
14108
14109	/* status of events with extra ring */
14110	if (phba->cfg_multi_ring_support == 2) {
14111		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14112		status2 >>= (4*LPFC_EXTRA_RING);
14113	} else
14114		status2 = 0;
14115
14116	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14117		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14118	else
14119		fp_irq_rc = IRQ_NONE;
14120
14121	/* Return device-level interrupt handling status */
14122	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14123}  /* lpfc_sli_intr_handler */
14124
14125/**
14126 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14127 * @phba: pointer to lpfc hba data structure.
14128 *
14129 * This routine is invoked by the worker thread to process all the pending
14130 * SLI4 els abort xri events.
14131 **/
14132void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14133{
14134	struct lpfc_cq_event *cq_event;
14135	unsigned long iflags;
14136
14137	/* First, declare the els xri abort event has been handled */
14138	spin_lock_irqsave(&phba->hbalock, iflags);
14139	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14140	spin_unlock_irqrestore(&phba->hbalock, iflags);
14141
14142	/* Now, handle all the els xri abort events */
14143	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14144	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14145		/* Get the first event from the head of the event queue */
14146		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14147				 cq_event, struct lpfc_cq_event, list);
14148		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14149				       iflags);
14150		/* Notify aborted XRI for ELS work queue */
14151		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14152
14153		/* Free the event processed back to the free pool */
14154		lpfc_sli4_cq_event_release(phba, cq_event);
14155		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14156				  iflags);
14157	}
14158	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14159}
14160
14161/**
14162 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14163 * @phba: Pointer to HBA context object.
14164 * @irspiocbq: Pointer to work-queue completion queue entry.
14165 *
14166 * This routine handles an ELS work-queue completion event and construct
14167 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14168 * discovery engine to handle.
14169 *
14170 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14171 **/
14172static struct lpfc_iocbq *
14173lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14174				  struct lpfc_iocbq *irspiocbq)
14175{
14176	struct lpfc_sli_ring *pring;
14177	struct lpfc_iocbq *cmdiocbq;
14178	struct lpfc_wcqe_complete *wcqe;
14179	unsigned long iflags;
14180
14181	pring = lpfc_phba_elsring(phba);
14182	if (unlikely(!pring))
14183		return NULL;
14184
14185	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14186	spin_lock_irqsave(&pring->ring_lock, iflags);
14187	pring->stats.iocb_event++;
14188	/* Look up the ELS command IOCB and create pseudo response IOCB */
14189	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14190				bf_get(lpfc_wcqe_c_request_tag, wcqe));
14191	if (unlikely(!cmdiocbq)) {
14192		spin_unlock_irqrestore(&pring->ring_lock, iflags);
14193		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14194				"0386 ELS complete with no corresponding "
14195				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14196				wcqe->word0, wcqe->total_data_placed,
14197				wcqe->parameter, wcqe->word3);
14198		lpfc_sli_release_iocbq(phba, irspiocbq);
14199		return NULL;
14200	}
14201
14202	memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14203	memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14204
14205	/* Put the iocb back on the txcmplq */
14206	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14207	spin_unlock_irqrestore(&pring->ring_lock, iflags);
14208
14209	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14210		spin_lock_irqsave(&phba->hbalock, iflags);
14211		irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14212		spin_unlock_irqrestore(&phba->hbalock, iflags);
14213	}
14214
14215	return irspiocbq;
14216}
14217
14218inline struct lpfc_cq_event *
14219lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14220{
14221	struct lpfc_cq_event *cq_event;
14222
14223	/* Allocate a new internal CQ_EVENT entry */
14224	cq_event = lpfc_sli4_cq_event_alloc(phba);
14225	if (!cq_event) {
14226		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14227				"0602 Failed to alloc CQ_EVENT entry\n");
14228		return NULL;
14229	}
14230
14231	/* Move the CQE into the event */
14232	memcpy(&cq_event->cqe, entry, size);
14233	return cq_event;
14234}
14235
14236/**
14237 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14238 * @phba: Pointer to HBA context object.
14239 * @mcqe: Pointer to mailbox completion queue entry.
14240 *
14241 * This routine process a mailbox completion queue entry with asynchronous
14242 * event.
14243 *
14244 * Return: true if work posted to worker thread, otherwise false.
14245 **/
14246static bool
14247lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14248{
14249	struct lpfc_cq_event *cq_event;
14250	unsigned long iflags;
14251
14252	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14253			"0392 Async Event: word0:x%x, word1:x%x, "
14254			"word2:x%x, word3:x%x\n", mcqe->word0,
14255			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14256
14257	cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14258	if (!cq_event)
14259		return false;
14260
14261	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14262	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14263	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14264
14265	/* Set the async event flag */
14266	spin_lock_irqsave(&phba->hbalock, iflags);
14267	phba->hba_flag |= ASYNC_EVENT;
14268	spin_unlock_irqrestore(&phba->hbalock, iflags);
14269
14270	return true;
14271}
14272
14273/**
14274 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14275 * @phba: Pointer to HBA context object.
14276 * @mcqe: Pointer to mailbox completion queue entry.
14277 *
14278 * This routine process a mailbox completion queue entry with mailbox
14279 * completion event.
14280 *
14281 * Return: true if work posted to worker thread, otherwise false.
14282 **/
14283static bool
14284lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14285{
14286	uint32_t mcqe_status;
14287	MAILBOX_t *mbox, *pmbox;
14288	struct lpfc_mqe *mqe;
14289	struct lpfc_vport *vport;
14290	struct lpfc_nodelist *ndlp;
14291	struct lpfc_dmabuf *mp;
14292	unsigned long iflags;
14293	LPFC_MBOXQ_t *pmb;
14294	bool workposted = false;
14295	int rc;
14296
14297	/* If not a mailbox complete MCQE, out by checking mailbox consume */
14298	if (!bf_get(lpfc_trailer_completed, mcqe))
14299		goto out_no_mqe_complete;
14300
14301	/* Get the reference to the active mbox command */
14302	spin_lock_irqsave(&phba->hbalock, iflags);
14303	pmb = phba->sli.mbox_active;
14304	if (unlikely(!pmb)) {
14305		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14306				"1832 No pending MBOX command to handle\n");
14307		spin_unlock_irqrestore(&phba->hbalock, iflags);
14308		goto out_no_mqe_complete;
14309	}
14310	spin_unlock_irqrestore(&phba->hbalock, iflags);
14311	mqe = &pmb->u.mqe;
14312	pmbox = (MAILBOX_t *)&pmb->u.mqe;
14313	mbox = phba->mbox;
14314	vport = pmb->vport;
14315
14316	/* Reset heartbeat timer */
14317	phba->last_completion_time = jiffies;
14318	del_timer(&phba->sli.mbox_tmo);
14319
14320	/* Move mbox data to caller's mailbox region, do endian swapping */
14321	if (pmb->mbox_cmpl && mbox)
14322		lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14323
14324	/*
14325	 * For mcqe errors, conditionally move a modified error code to
14326	 * the mbox so that the error will not be missed.
14327	 */
14328	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14329	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14330		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14331			bf_set(lpfc_mqe_status, mqe,
14332			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
14333	}
14334	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14335		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14336		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14337				      "MBOX dflt rpi: status:x%x rpi:x%x",
14338				      mcqe_status,
14339				      pmbox->un.varWords[0], 0);
14340		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14341			mp = pmb->ctx_buf;
14342			ndlp = pmb->ctx_ndlp;
14343
14344			/* Reg_LOGIN of dflt RPI was successful. Mark the
14345			 * node as having an UNREG_LOGIN in progress to stop
14346			 * an unsolicited PLOGI from the same NPortId from
14347			 * starting another mailbox transaction.
14348			 */
14349			spin_lock_irqsave(&ndlp->lock, iflags);
14350			ndlp->nlp_flag |= NLP_UNREG_INP;
14351			spin_unlock_irqrestore(&ndlp->lock, iflags);
14352			lpfc_unreg_login(phba, vport->vpi,
14353					 pmbox->un.varWords[0], pmb);
14354			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14355			pmb->ctx_buf = mp;
14356
14357			/* No reference taken here.  This is a default
14358			 * RPI reg/immediate unreg cycle. The reference was
14359			 * taken in the reg rpi path and is released when
14360			 * this mailbox completes.
14361			 */
14362			pmb->ctx_ndlp = ndlp;
14363			pmb->vport = vport;
14364			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14365			if (rc != MBX_BUSY)
14366				lpfc_printf_log(phba, KERN_ERR,
14367						LOG_TRACE_EVENT,
14368						"0385 rc should "
14369						"have been MBX_BUSY\n");
14370			if (rc != MBX_NOT_FINISHED)
14371				goto send_current_mbox;
14372		}
14373	}
14374	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14375	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14376	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14377
14378	/* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14379	if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14380		spin_lock_irqsave(&phba->hbalock, iflags);
14381		/* Release the mailbox command posting token */
14382		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14383		phba->sli.mbox_active = NULL;
14384		if (bf_get(lpfc_trailer_consumed, mcqe))
14385			lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14386		spin_unlock_irqrestore(&phba->hbalock, iflags);
14387
14388		/* Post the next mbox command, if there is one */
14389		lpfc_sli4_post_async_mbox(phba);
14390
14391		/* Process cmpl now */
14392		if (pmb->mbox_cmpl)
14393			pmb->mbox_cmpl(phba, pmb);
14394		return false;
14395	}
14396
14397	/* There is mailbox completion work to queue to the worker thread */
14398	spin_lock_irqsave(&phba->hbalock, iflags);
14399	__lpfc_mbox_cmpl_put(phba, pmb);
14400	phba->work_ha |= HA_MBATT;
14401	spin_unlock_irqrestore(&phba->hbalock, iflags);
14402	workposted = true;
14403
14404send_current_mbox:
14405	spin_lock_irqsave(&phba->hbalock, iflags);
14406	/* Release the mailbox command posting token */
14407	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14408	/* Setting active mailbox pointer need to be in sync to flag clear */
14409	phba->sli.mbox_active = NULL;
14410	if (bf_get(lpfc_trailer_consumed, mcqe))
14411		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14412	spin_unlock_irqrestore(&phba->hbalock, iflags);
14413	/* Wake up worker thread to post the next pending mailbox command */
14414	lpfc_worker_wake_up(phba);
14415	return workposted;
14416
14417out_no_mqe_complete:
14418	spin_lock_irqsave(&phba->hbalock, iflags);
14419	if (bf_get(lpfc_trailer_consumed, mcqe))
14420		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14421	spin_unlock_irqrestore(&phba->hbalock, iflags);
14422	return false;
14423}
14424
14425/**
14426 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14427 * @phba: Pointer to HBA context object.
14428 * @cq: Pointer to associated CQ
14429 * @cqe: Pointer to mailbox completion queue entry.
14430 *
14431 * This routine process a mailbox completion queue entry, it invokes the
14432 * proper mailbox complete handling or asynchronous event handling routine
14433 * according to the MCQE's async bit.
14434 *
14435 * Return: true if work posted to worker thread, otherwise false.
14436 **/
14437static bool
14438lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14439			 struct lpfc_cqe *cqe)
14440{
14441	struct lpfc_mcqe mcqe;
14442	bool workposted;
14443
14444	cq->CQ_mbox++;
14445
14446	/* Copy the mailbox MCQE and convert endian order as needed */
14447	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14448
14449	/* Invoke the proper event handling routine */
14450	if (!bf_get(lpfc_trailer_async, &mcqe))
14451		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14452	else
14453		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14454	return workposted;
14455}
14456
14457/**
14458 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14459 * @phba: Pointer to HBA context object.
14460 * @cq: Pointer to associated CQ
14461 * @wcqe: Pointer to work-queue completion queue entry.
14462 *
14463 * This routine handles an ELS work-queue completion event.
14464 *
14465 * Return: true if work posted to worker thread, otherwise false.
14466 **/
14467static bool
14468lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14469			     struct lpfc_wcqe_complete *wcqe)
14470{
14471	struct lpfc_iocbq *irspiocbq;
14472	unsigned long iflags;
14473	struct lpfc_sli_ring *pring = cq->pring;
14474	int txq_cnt = 0;
14475	int txcmplq_cnt = 0;
14476
14477	/* Check for response status */
14478	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14479		/* Log the error status */
14480		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14481				"0357 ELS CQE error: status=x%x: "
14482				"CQE: %08x %08x %08x %08x\n",
14483				bf_get(lpfc_wcqe_c_status, wcqe),
14484				wcqe->word0, wcqe->total_data_placed,
14485				wcqe->parameter, wcqe->word3);
14486	}
14487
14488	/* Get an irspiocbq for later ELS response processing use */
14489	irspiocbq = lpfc_sli_get_iocbq(phba);
14490	if (!irspiocbq) {
14491		if (!list_empty(&pring->txq))
14492			txq_cnt++;
14493		if (!list_empty(&pring->txcmplq))
14494			txcmplq_cnt++;
14495		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14496			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14497			"els_txcmplq_cnt=%d\n",
14498			txq_cnt, phba->iocb_cnt,
14499			txcmplq_cnt);
14500		return false;
14501	}
14502
14503	/* Save off the slow-path queue event for work thread to process */
14504	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14505	spin_lock_irqsave(&phba->hbalock, iflags);
14506	list_add_tail(&irspiocbq->cq_event.list,
14507		      &phba->sli4_hba.sp_queue_event);
14508	phba->hba_flag |= HBA_SP_QUEUE_EVT;
14509	spin_unlock_irqrestore(&phba->hbalock, iflags);
14510
14511	return true;
14512}
14513
14514/**
14515 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14516 * @phba: Pointer to HBA context object.
14517 * @wcqe: Pointer to work-queue completion queue entry.
14518 *
14519 * This routine handles slow-path WQ entry consumed event by invoking the
14520 * proper WQ release routine to the slow-path WQ.
14521 **/
14522static void
14523lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14524			     struct lpfc_wcqe_release *wcqe)
14525{
14526	/* sanity check on queue memory */
14527	if (unlikely(!phba->sli4_hba.els_wq))
14528		return;
14529	/* Check for the slow-path ELS work queue */
14530	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14531		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14532				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14533	else
14534		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14535				"2579 Slow-path wqe consume event carries "
14536				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14537				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14538				phba->sli4_hba.els_wq->queue_id);
14539}
14540
14541/**
14542 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14543 * @phba: Pointer to HBA context object.
14544 * @cq: Pointer to a WQ completion queue.
14545 * @wcqe: Pointer to work-queue completion queue entry.
14546 *
14547 * This routine handles an XRI abort event.
14548 *
14549 * Return: true if work posted to worker thread, otherwise false.
14550 **/
14551static bool
14552lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14553				   struct lpfc_queue *cq,
14554				   struct sli4_wcqe_xri_aborted *wcqe)
14555{
14556	bool workposted = false;
14557	struct lpfc_cq_event *cq_event;
14558	unsigned long iflags;
14559
14560	switch (cq->subtype) {
14561	case LPFC_IO:
14562		lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14563		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14564			/* Notify aborted XRI for NVME work queue */
14565			if (phba->nvmet_support)
14566				lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14567		}
14568		workposted = false;
14569		break;
14570	case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14571	case LPFC_ELS:
14572		cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14573		if (!cq_event) {
14574			workposted = false;
14575			break;
14576		}
14577		cq_event->hdwq = cq->hdwq;
14578		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14579				  iflags);
14580		list_add_tail(&cq_event->list,
14581			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14582		/* Set the els xri abort event flag */
14583		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14584		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14585				       iflags);
14586		workposted = true;
14587		break;
14588	default:
14589		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14590				"0603 Invalid CQ subtype %d: "
14591				"%08x %08x %08x %08x\n",
14592				cq->subtype, wcqe->word0, wcqe->parameter,
14593				wcqe->word2, wcqe->word3);
14594		workposted = false;
14595		break;
14596	}
14597	return workposted;
14598}
14599
14600#define FC_RCTL_MDS_DIAGS	0xF4
14601
14602/**
14603 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14604 * @phba: Pointer to HBA context object.
14605 * @rcqe: Pointer to receive-queue completion queue entry.
14606 *
14607 * This routine process a receive-queue completion queue entry.
14608 *
14609 * Return: true if work posted to worker thread, otherwise false.
14610 **/
14611static bool
14612lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14613{
14614	bool workposted = false;
14615	struct fc_frame_header *fc_hdr;
14616	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14617	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14618	struct lpfc_nvmet_tgtport *tgtp;
14619	struct hbq_dmabuf *dma_buf;
14620	uint32_t status, rq_id;
14621	unsigned long iflags;
14622
14623	/* sanity check on queue memory */
14624	if (unlikely(!hrq) || unlikely(!drq))
14625		return workposted;
14626
14627	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14628		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14629	else
14630		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14631	if (rq_id != hrq->queue_id)
14632		goto out;
14633
14634	status = bf_get(lpfc_rcqe_status, rcqe);
14635	switch (status) {
14636	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14637		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14638				"2537 Receive Frame Truncated!!\n");
14639		fallthrough;
14640	case FC_STATUS_RQ_SUCCESS:
14641		spin_lock_irqsave(&phba->hbalock, iflags);
14642		lpfc_sli4_rq_release(hrq, drq);
14643		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14644		if (!dma_buf) {
14645			hrq->RQ_no_buf_found++;
14646			spin_unlock_irqrestore(&phba->hbalock, iflags);
14647			goto out;
14648		}
14649		hrq->RQ_rcv_buf++;
14650		hrq->RQ_buf_posted--;
14651		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14652
14653		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14654
14655		if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14656		    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14657			spin_unlock_irqrestore(&phba->hbalock, iflags);
14658			/* Handle MDS Loopback frames */
14659			if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
14660				lpfc_sli4_handle_mds_loopback(phba->pport,
14661							      dma_buf);
14662			else
14663				lpfc_in_buf_free(phba, &dma_buf->dbuf);
14664			break;
14665		}
14666
14667		/* save off the frame for the work thread to process */
14668		list_add_tail(&dma_buf->cq_event.list,
14669			      &phba->sli4_hba.sp_queue_event);
14670		/* Frame received */
14671		phba->hba_flag |= HBA_SP_QUEUE_EVT;
14672		spin_unlock_irqrestore(&phba->hbalock, iflags);
14673		workposted = true;
14674		break;
14675	case FC_STATUS_INSUFF_BUF_FRM_DISC:
14676		if (phba->nvmet_support) {
14677			tgtp = phba->targetport->private;
14678			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14679					"6402 RQE Error x%x, posted %d err_cnt "
14680					"%d: %x %x %x\n",
14681					status, hrq->RQ_buf_posted,
14682					hrq->RQ_no_posted_buf,
14683					atomic_read(&tgtp->rcv_fcp_cmd_in),
14684					atomic_read(&tgtp->rcv_fcp_cmd_out),
14685					atomic_read(&tgtp->xmt_fcp_release));
14686		}
14687		fallthrough;
14688
14689	case FC_STATUS_INSUFF_BUF_NEED_BUF:
14690		hrq->RQ_no_posted_buf++;
14691		/* Post more buffers if possible */
14692		spin_lock_irqsave(&phba->hbalock, iflags);
14693		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14694		spin_unlock_irqrestore(&phba->hbalock, iflags);
14695		workposted = true;
14696		break;
14697	case FC_STATUS_RQ_DMA_FAILURE:
14698		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14699				"2564 RQE DMA Error x%x, x%08x x%08x x%08x "
14700				"x%08x\n",
14701				status, rcqe->word0, rcqe->word1,
14702				rcqe->word2, rcqe->word3);
14703
14704		/* If IV set, no further recovery */
14705		if (bf_get(lpfc_rcqe_iv, rcqe))
14706			break;
14707
14708		/* recycle consumed resource */
14709		spin_lock_irqsave(&phba->hbalock, iflags);
14710		lpfc_sli4_rq_release(hrq, drq);
14711		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14712		if (!dma_buf) {
14713			hrq->RQ_no_buf_found++;
14714			spin_unlock_irqrestore(&phba->hbalock, iflags);
14715			break;
14716		}
14717		hrq->RQ_rcv_buf++;
14718		hrq->RQ_buf_posted--;
14719		spin_unlock_irqrestore(&phba->hbalock, iflags);
14720		lpfc_in_buf_free(phba, &dma_buf->dbuf);
14721		break;
14722	default:
14723		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14724				"2565 Unexpected RQE Status x%x, w0-3 x%08x "
14725				"x%08x x%08x x%08x\n",
14726				status, rcqe->word0, rcqe->word1,
14727				rcqe->word2, rcqe->word3);
14728		break;
14729	}
14730out:
14731	return workposted;
14732}
14733
14734/**
14735 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14736 * @phba: Pointer to HBA context object.
14737 * @cq: Pointer to the completion queue.
14738 * @cqe: Pointer to a completion queue entry.
14739 *
14740 * This routine process a slow-path work-queue or receive queue completion queue
14741 * entry.
14742 *
14743 * Return: true if work posted to worker thread, otherwise false.
14744 **/
14745static bool
14746lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14747			 struct lpfc_cqe *cqe)
14748{
14749	struct lpfc_cqe cqevt;
14750	bool workposted = false;
14751
14752	/* Copy the work queue CQE and convert endian order if needed */
14753	lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14754
14755	/* Check and process for different type of WCQE and dispatch */
14756	switch (bf_get(lpfc_cqe_code, &cqevt)) {
14757	case CQE_CODE_COMPL_WQE:
14758		/* Process the WQ/RQ complete event */
14759		phba->last_completion_time = jiffies;
14760		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14761				(struct lpfc_wcqe_complete *)&cqevt);
14762		break;
14763	case CQE_CODE_RELEASE_WQE:
14764		/* Process the WQ release event */
14765		lpfc_sli4_sp_handle_rel_wcqe(phba,
14766				(struct lpfc_wcqe_release *)&cqevt);
14767		break;
14768	case CQE_CODE_XRI_ABORTED:
14769		/* Process the WQ XRI abort event */
14770		phba->last_completion_time = jiffies;
14771		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14772				(struct sli4_wcqe_xri_aborted *)&cqevt);
14773		break;
14774	case CQE_CODE_RECEIVE:
14775	case CQE_CODE_RECEIVE_V1:
14776		/* Process the RQ event */
14777		phba->last_completion_time = jiffies;
14778		workposted = lpfc_sli4_sp_handle_rcqe(phba,
14779				(struct lpfc_rcqe *)&cqevt);
14780		break;
14781	default:
14782		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14783				"0388 Not a valid WCQE code: x%x\n",
14784				bf_get(lpfc_cqe_code, &cqevt));
14785		break;
14786	}
14787	return workposted;
14788}
14789
14790/**
14791 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14792 * @phba: Pointer to HBA context object.
14793 * @eqe: Pointer to fast-path event queue entry.
14794 * @speq: Pointer to slow-path event queue.
14795 *
14796 * This routine process a event queue entry from the slow-path event queue.
14797 * It will check the MajorCode and MinorCode to determine this is for a
14798 * completion event on a completion queue, if not, an error shall be logged
14799 * and just return. Otherwise, it will get to the corresponding completion
14800 * queue and process all the entries on that completion queue, rearm the
14801 * completion queue, and then return.
14802 *
14803 **/
14804static void
14805lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14806	struct lpfc_queue *speq)
14807{
14808	struct lpfc_queue *cq = NULL, *childq;
14809	uint16_t cqid;
14810	int ret = 0;
14811
14812	/* Get the reference to the corresponding CQ */
14813	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14814
14815	list_for_each_entry(childq, &speq->child_list, list) {
14816		if (childq->queue_id == cqid) {
14817			cq = childq;
14818			break;
14819		}
14820	}
14821	if (unlikely(!cq)) {
14822		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14823			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14824					"0365 Slow-path CQ identifier "
14825					"(%d) does not exist\n", cqid);
14826		return;
14827	}
14828
14829	/* Save EQ associated with this CQ */
14830	cq->assoc_qp = speq;
14831
14832	if (is_kdump_kernel())
14833		ret = queue_work(phba->wq, &cq->spwork);
14834	else
14835		ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14836
14837	if (!ret)
14838		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14839				"0390 Cannot schedule queue work "
14840				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14841				cqid, cq->queue_id, raw_smp_processor_id());
14842}
14843
14844/**
14845 * __lpfc_sli4_process_cq - Process elements of a CQ
14846 * @phba: Pointer to HBA context object.
14847 * @cq: Pointer to CQ to be processed
14848 * @handler: Routine to process each cqe
14849 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14850 *
14851 * This routine processes completion queue entries in a CQ. While a valid
14852 * queue element is found, the handler is called. During processing checks
14853 * are made for periodic doorbell writes to let the hardware know of
14854 * element consumption.
14855 *
14856 * If the max limit on cqes to process is hit, or there are no more valid
14857 * entries, the loop stops. If we processed a sufficient number of elements,
14858 * meaning there is sufficient load, rather than rearming and generating
14859 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14860 * indicates no rescheduling.
14861 *
14862 * Returns True if work scheduled, False otherwise.
14863 **/
14864static bool
14865__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14866	bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14867			struct lpfc_cqe *), unsigned long *delay)
14868{
14869	struct lpfc_cqe *cqe;
14870	bool workposted = false;
14871	int count = 0, consumed = 0;
14872	bool arm = true;
14873
14874	/* default - no reschedule */
14875	*delay = 0;
14876
14877	if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14878		goto rearm_and_exit;
14879
14880	/* Process all the entries to the CQ */
14881	cq->q_flag = 0;
14882	cqe = lpfc_sli4_cq_get(cq);
14883	while (cqe) {
14884		workposted |= handler(phba, cq, cqe);
14885		__lpfc_sli4_consume_cqe(phba, cq, cqe);
14886
14887		consumed++;
14888		if (!(++count % cq->max_proc_limit))
14889			break;
14890
14891		if (!(count % cq->notify_interval)) {
14892			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14893						LPFC_QUEUE_NOARM);
14894			consumed = 0;
14895			cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14896		}
14897
14898		if (count == LPFC_NVMET_CQ_NOTIFY)
14899			cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14900
14901		cqe = lpfc_sli4_cq_get(cq);
14902	}
14903	if (count >= phba->cfg_cq_poll_threshold) {
14904		*delay = 1;
14905		arm = false;
14906	}
14907
14908	/* Track the max number of CQEs processed in 1 EQ */
14909	if (count > cq->CQ_max_cqe)
14910		cq->CQ_max_cqe = count;
14911
14912	cq->assoc_qp->EQ_cqe_cnt += count;
14913
14914	/* Catch the no cq entry condition */
14915	if (unlikely(count == 0))
14916		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14917				"0369 No entry from completion queue "
14918				"qid=%d\n", cq->queue_id);
14919
14920	xchg(&cq->queue_claimed, 0);
14921
14922rearm_and_exit:
14923	phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14924			arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14925
14926	return workposted;
14927}
14928
14929/**
14930 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14931 * @cq: pointer to CQ to process
14932 *
14933 * This routine calls the cq processing routine with a handler specific
14934 * to the type of queue bound to it.
14935 *
14936 * The CQ routine returns two values: the first is the calling status,
14937 * which indicates whether work was queued to the  background discovery
14938 * thread. If true, the routine should wakeup the discovery thread;
14939 * the second is the delay parameter. If non-zero, rather than rearming
14940 * the CQ and yet another interrupt, the CQ handler should be queued so
14941 * that it is processed in a subsequent polling action. The value of
14942 * the delay indicates when to reschedule it.
14943 **/
14944static void
14945__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14946{
14947	struct lpfc_hba *phba = cq->phba;
14948	unsigned long delay;
14949	bool workposted = false;
14950	int ret = 0;
14951
14952	/* Process and rearm the CQ */
14953	switch (cq->type) {
14954	case LPFC_MCQ:
14955		workposted |= __lpfc_sli4_process_cq(phba, cq,
14956						lpfc_sli4_sp_handle_mcqe,
14957						&delay);
14958		break;
14959	case LPFC_WCQ:
14960		if (cq->subtype == LPFC_IO)
14961			workposted |= __lpfc_sli4_process_cq(phba, cq,
14962						lpfc_sli4_fp_handle_cqe,
14963						&delay);
14964		else
14965			workposted |= __lpfc_sli4_process_cq(phba, cq,
14966						lpfc_sli4_sp_handle_cqe,
14967						&delay);
14968		break;
14969	default:
14970		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14971				"0370 Invalid completion queue type (%d)\n",
14972				cq->type);
14973		return;
14974	}
14975
14976	if (delay) {
14977		if (is_kdump_kernel())
14978			ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14979						delay);
14980		else
14981			ret = queue_delayed_work_on(cq->chann, phba->wq,
14982						&cq->sched_spwork, delay);
14983		if (!ret)
14984			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14985				"0394 Cannot schedule queue work "
14986				"for cqid=%d on CPU %d\n",
14987				cq->queue_id, cq->chann);
14988	}
14989
14990	/* wake up worker thread if there are works to be done */
14991	if (workposted)
14992		lpfc_worker_wake_up(phba);
14993}
14994
14995/**
14996 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14997 *   interrupt
14998 * @work: pointer to work element
14999 *
15000 * translates from the work handler and calls the slow-path handler.
15001 **/
15002static void
15003lpfc_sli4_sp_process_cq(struct work_struct *work)
15004{
15005	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
15006
15007	__lpfc_sli4_sp_process_cq(cq);
15008}
15009
15010/**
15011 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
15012 * @work: pointer to work element
15013 *
15014 * translates from the work handler and calls the slow-path handler.
15015 **/
15016static void
15017lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15018{
15019	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15020					struct lpfc_queue, sched_spwork);
15021
15022	__lpfc_sli4_sp_process_cq(cq);
15023}
15024
15025/**
15026 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15027 * @phba: Pointer to HBA context object.
15028 * @cq: Pointer to associated CQ
15029 * @wcqe: Pointer to work-queue completion queue entry.
15030 *
15031 * This routine process a fast-path work queue completion entry from fast-path
15032 * event queue for FCP command response completion.
15033 **/
15034static void
15035lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15036			     struct lpfc_wcqe_complete *wcqe)
15037{
15038	struct lpfc_sli_ring *pring = cq->pring;
15039	struct lpfc_iocbq *cmdiocbq;
15040	unsigned long iflags;
15041
15042	/* Check for response status */
15043	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15044		/* If resource errors reported from HBA, reduce queue
15045		 * depth of the SCSI device.
15046		 */
15047		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15048		     IOSTAT_LOCAL_REJECT)) &&
15049		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
15050		     IOERR_NO_RESOURCES))
15051			phba->lpfc_rampdown_queue_depth(phba);
15052
15053		/* Log the cmpl status */
15054		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15055				"0373 FCP CQE cmpl: status=x%x: "
15056				"CQE: %08x %08x %08x %08x\n",
15057				bf_get(lpfc_wcqe_c_status, wcqe),
15058				wcqe->word0, wcqe->total_data_placed,
15059				wcqe->parameter, wcqe->word3);
15060	}
15061
15062	/* Look up the FCP command IOCB and create pseudo response IOCB */
15063	spin_lock_irqsave(&pring->ring_lock, iflags);
15064	pring->stats.iocb_event++;
15065	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15066				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15067	spin_unlock_irqrestore(&pring->ring_lock, iflags);
15068	if (unlikely(!cmdiocbq)) {
15069		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15070				"0374 FCP complete with no corresponding "
15071				"cmdiocb: iotag (%d)\n",
15072				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15073		return;
15074	}
15075#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15076	cmdiocbq->isr_timestamp = cq->isr_timestamp;
15077#endif
15078	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15079		spin_lock_irqsave(&phba->hbalock, iflags);
15080		cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15081		spin_unlock_irqrestore(&phba->hbalock, iflags);
15082	}
15083
15084	if (cmdiocbq->cmd_cmpl) {
15085		/* For FCP the flag is cleared in cmd_cmpl */
15086		if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15087		    cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15088			spin_lock_irqsave(&phba->hbalock, iflags);
15089			cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15090			spin_unlock_irqrestore(&phba->hbalock, iflags);
15091		}
15092
15093		/* Pass the cmd_iocb and the wcqe to the upper layer */
15094		memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15095		       sizeof(struct lpfc_wcqe_complete));
15096		cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15097	} else {
15098		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15099				"0375 FCP cmdiocb not callback function "
15100				"iotag: (%d)\n",
15101				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15102	}
15103}
15104
15105/**
15106 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15107 * @phba: Pointer to HBA context object.
15108 * @cq: Pointer to completion queue.
15109 * @wcqe: Pointer to work-queue completion queue entry.
15110 *
15111 * This routine handles an fast-path WQ entry consumed event by invoking the
15112 * proper WQ release routine to the slow-path WQ.
15113 **/
15114static void
15115lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15116			     struct lpfc_wcqe_release *wcqe)
15117{
15118	struct lpfc_queue *childwq;
15119	bool wqid_matched = false;
15120	uint16_t hba_wqid;
15121
15122	/* Check for fast-path FCP work queue release */
15123	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15124	list_for_each_entry(childwq, &cq->child_list, list) {
15125		if (childwq->queue_id == hba_wqid) {
15126			lpfc_sli4_wq_release(childwq,
15127					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15128			if (childwq->q_flag & HBA_NVMET_WQFULL)
15129				lpfc_nvmet_wqfull_process(phba, childwq);
15130			wqid_matched = true;
15131			break;
15132		}
15133	}
15134	/* Report warning log message if no match found */
15135	if (wqid_matched != true)
15136		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15137				"2580 Fast-path wqe consume event carries "
15138				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15139}
15140
15141/**
15142 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15143 * @phba: Pointer to HBA context object.
15144 * @cq: Pointer to completion queue.
15145 * @rcqe: Pointer to receive-queue completion queue entry.
15146 *
15147 * This routine process a receive-queue completion queue entry.
15148 *
15149 * Return: true if work posted to worker thread, otherwise false.
15150 **/
15151static bool
15152lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15153			    struct lpfc_rcqe *rcqe)
15154{
15155	bool workposted = false;
15156	struct lpfc_queue *hrq;
15157	struct lpfc_queue *drq;
15158	struct rqb_dmabuf *dma_buf;
15159	struct fc_frame_header *fc_hdr;
15160	struct lpfc_nvmet_tgtport *tgtp;
15161	uint32_t status, rq_id;
15162	unsigned long iflags;
15163	uint32_t fctl, idx;
15164
15165	if ((phba->nvmet_support == 0) ||
15166	    (phba->sli4_hba.nvmet_cqset == NULL))
15167		return workposted;
15168
15169	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15170	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15171	drq = phba->sli4_hba.nvmet_mrq_data[idx];
15172
15173	/* sanity check on queue memory */
15174	if (unlikely(!hrq) || unlikely(!drq))
15175		return workposted;
15176
15177	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15178		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15179	else
15180		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15181
15182	if ((phba->nvmet_support == 0) ||
15183	    (rq_id != hrq->queue_id))
15184		return workposted;
15185
15186	status = bf_get(lpfc_rcqe_status, rcqe);
15187	switch (status) {
15188	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15189		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15190				"6126 Receive Frame Truncated!!\n");
15191		fallthrough;
15192	case FC_STATUS_RQ_SUCCESS:
15193		spin_lock_irqsave(&phba->hbalock, iflags);
15194		lpfc_sli4_rq_release(hrq, drq);
15195		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15196		if (!dma_buf) {
15197			hrq->RQ_no_buf_found++;
15198			spin_unlock_irqrestore(&phba->hbalock, iflags);
15199			goto out;
15200		}
15201		spin_unlock_irqrestore(&phba->hbalock, iflags);
15202		hrq->RQ_rcv_buf++;
15203		hrq->RQ_buf_posted--;
15204		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15205
15206		/* Just some basic sanity checks on FCP Command frame */
15207		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15208			fc_hdr->fh_f_ctl[1] << 8 |
15209			fc_hdr->fh_f_ctl[2]);
15210		if (((fctl &
15211		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15212		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15213		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15214			goto drop;
15215
15216		if (fc_hdr->fh_type == FC_TYPE_FCP) {
15217			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15218			lpfc_nvmet_unsol_fcp_event(
15219				phba, idx, dma_buf, cq->isr_timestamp,
15220				cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15221			return false;
15222		}
15223drop:
15224		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15225		break;
15226	case FC_STATUS_INSUFF_BUF_FRM_DISC:
15227		if (phba->nvmet_support) {
15228			tgtp = phba->targetport->private;
15229			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15230					"6401 RQE Error x%x, posted %d err_cnt "
15231					"%d: %x %x %x\n",
15232					status, hrq->RQ_buf_posted,
15233					hrq->RQ_no_posted_buf,
15234					atomic_read(&tgtp->rcv_fcp_cmd_in),
15235					atomic_read(&tgtp->rcv_fcp_cmd_out),
15236					atomic_read(&tgtp->xmt_fcp_release));
15237		}
15238		fallthrough;
15239
15240	case FC_STATUS_INSUFF_BUF_NEED_BUF:
15241		hrq->RQ_no_posted_buf++;
15242		/* Post more buffers if possible */
15243		break;
15244	case FC_STATUS_RQ_DMA_FAILURE:
15245		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15246				"2575 RQE DMA Error x%x, x%08x x%08x x%08x "
15247				"x%08x\n",
15248				status, rcqe->word0, rcqe->word1,
15249				rcqe->word2, rcqe->word3);
15250
15251		/* If IV set, no further recovery */
15252		if (bf_get(lpfc_rcqe_iv, rcqe))
15253			break;
15254
15255		/* recycle consumed resource */
15256		spin_lock_irqsave(&phba->hbalock, iflags);
15257		lpfc_sli4_rq_release(hrq, drq);
15258		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15259		if (!dma_buf) {
15260			hrq->RQ_no_buf_found++;
15261			spin_unlock_irqrestore(&phba->hbalock, iflags);
15262			break;
15263		}
15264		hrq->RQ_rcv_buf++;
15265		hrq->RQ_buf_posted--;
15266		spin_unlock_irqrestore(&phba->hbalock, iflags);
15267		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15268		break;
15269	default:
15270		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15271				"2576 Unexpected RQE Status x%x, w0-3 x%08x "
15272				"x%08x x%08x x%08x\n",
15273				status, rcqe->word0, rcqe->word1,
15274				rcqe->word2, rcqe->word3);
15275		break;
15276	}
15277out:
15278	return workposted;
15279}
15280
15281/**
15282 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15283 * @phba: adapter with cq
15284 * @cq: Pointer to the completion queue.
15285 * @cqe: Pointer to fast-path completion queue entry.
15286 *
15287 * This routine process a fast-path work queue completion entry from fast-path
15288 * event queue for FCP command response completion.
15289 *
15290 * Return: true if work posted to worker thread, otherwise false.
15291 **/
15292static bool
15293lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15294			 struct lpfc_cqe *cqe)
15295{
15296	struct lpfc_wcqe_release wcqe;
15297	bool workposted = false;
15298
15299	/* Copy the work queue CQE and convert endian order if needed */
15300	lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15301
15302	/* Check and process for different type of WCQE and dispatch */
15303	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15304	case CQE_CODE_COMPL_WQE:
15305	case CQE_CODE_NVME_ERSP:
15306		cq->CQ_wq++;
15307		/* Process the WQ complete event */
15308		phba->last_completion_time = jiffies;
15309		if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15310			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15311				(struct lpfc_wcqe_complete *)&wcqe);
15312		break;
15313	case CQE_CODE_RELEASE_WQE:
15314		cq->CQ_release_wqe++;
15315		/* Process the WQ release event */
15316		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15317				(struct lpfc_wcqe_release *)&wcqe);
15318		break;
15319	case CQE_CODE_XRI_ABORTED:
15320		cq->CQ_xri_aborted++;
15321		/* Process the WQ XRI abort event */
15322		phba->last_completion_time = jiffies;
15323		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15324				(struct sli4_wcqe_xri_aborted *)&wcqe);
15325		break;
15326	case CQE_CODE_RECEIVE_V1:
15327	case CQE_CODE_RECEIVE:
15328		phba->last_completion_time = jiffies;
15329		if (cq->subtype == LPFC_NVMET) {
15330			workposted = lpfc_sli4_nvmet_handle_rcqe(
15331				phba, cq, (struct lpfc_rcqe *)&wcqe);
15332		}
15333		break;
15334	default:
15335		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15336				"0144 Not a valid CQE code: x%x\n",
15337				bf_get(lpfc_wcqe_c_code, &wcqe));
15338		break;
15339	}
15340	return workposted;
15341}
15342
15343/**
15344 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15345 * @cq: Pointer to CQ to be processed
15346 *
15347 * This routine calls the cq processing routine with the handler for
15348 * fast path CQEs.
15349 *
15350 * The CQ routine returns two values: the first is the calling status,
15351 * which indicates whether work was queued to the  background discovery
15352 * thread. If true, the routine should wakeup the discovery thread;
15353 * the second is the delay parameter. If non-zero, rather than rearming
15354 * the CQ and yet another interrupt, the CQ handler should be queued so
15355 * that it is processed in a subsequent polling action. The value of
15356 * the delay indicates when to reschedule it.
15357 **/
15358static void
15359__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
15360{
15361	struct lpfc_hba *phba = cq->phba;
15362	unsigned long delay;
15363	bool workposted = false;
15364	int ret;
15365
15366	/* process and rearm the CQ */
15367	workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15368					     &delay);
15369
15370	if (delay) {
15371		if (is_kdump_kernel())
15372			ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15373						delay);
15374		else
15375			ret = queue_delayed_work_on(cq->chann, phba->wq,
15376						&cq->sched_irqwork, delay);
15377		if (!ret)
15378			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15379					"0367 Cannot schedule queue work "
15380					"for cqid=%d on CPU %d\n",
15381					cq->queue_id, cq->chann);
15382	}
15383
15384	/* wake up worker thread if there are works to be done */
15385	if (workposted)
15386		lpfc_worker_wake_up(phba);
15387}
15388
15389/**
15390 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15391 *   interrupt
15392 * @work: pointer to work element
15393 *
15394 * translates from the work handler and calls the fast-path handler.
15395 **/
15396static void
15397lpfc_sli4_hba_process_cq(struct work_struct *work)
15398{
15399	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15400
15401	__lpfc_sli4_hba_process_cq(cq);
15402}
15403
15404/**
15405 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15406 * @phba: Pointer to HBA context object.
15407 * @eq: Pointer to the queue structure.
15408 * @eqe: Pointer to fast-path event queue entry.
15409 * @poll_mode: poll_mode to execute processing the cq.
15410 *
15411 * This routine process a event queue entry from the fast-path event queue.
15412 * It will check the MajorCode and MinorCode to determine this is for a
15413 * completion event on a completion queue, if not, an error shall be logged
15414 * and just return. Otherwise, it will get to the corresponding completion
15415 * queue and process all the entries on the completion queue, rearm the
15416 * completion queue, and then return.
15417 **/
15418static void
15419lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15420			 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
15421{
15422	struct lpfc_queue *cq = NULL;
15423	uint32_t qidx = eq->hdwq;
15424	uint16_t cqid, id;
15425	int ret;
15426
15427	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15428		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15429				"0366 Not a valid completion "
15430				"event: majorcode=x%x, minorcode=x%x\n",
15431				bf_get_le32(lpfc_eqe_major_code, eqe),
15432				bf_get_le32(lpfc_eqe_minor_code, eqe));
15433		return;
15434	}
15435
15436	/* Get the reference to the corresponding CQ */
15437	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15438
15439	/* Use the fast lookup method first */
15440	if (cqid <= phba->sli4_hba.cq_max) {
15441		cq = phba->sli4_hba.cq_lookup[cqid];
15442		if (cq)
15443			goto  work_cq;
15444	}
15445
15446	/* Next check for NVMET completion */
15447	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15448		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15449		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15450			/* Process NVMET unsol rcv */
15451			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15452			goto  process_cq;
15453		}
15454	}
15455
15456	if (phba->sli4_hba.nvmels_cq &&
15457	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15458		/* Process NVME unsol rcv */
15459		cq = phba->sli4_hba.nvmels_cq;
15460	}
15461
15462	/* Otherwise this is a Slow path event */
15463	if (cq == NULL) {
15464		lpfc_sli4_sp_handle_eqe(phba, eqe,
15465					phba->sli4_hba.hdwq[qidx].hba_eq);
15466		return;
15467	}
15468
15469process_cq:
15470	if (unlikely(cqid != cq->queue_id)) {
15471		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15472				"0368 Miss-matched fast-path completion "
15473				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
15474				cqid, cq->queue_id);
15475		return;
15476	}
15477
15478work_cq:
15479#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15480	if (phba->ktime_on)
15481		cq->isr_timestamp = ktime_get_ns();
15482	else
15483		cq->isr_timestamp = 0;
15484#endif
15485
15486	switch (poll_mode) {
15487	case LPFC_THREADED_IRQ:
15488		__lpfc_sli4_hba_process_cq(cq);
15489		break;
15490	case LPFC_QUEUE_WORK:
15491	default:
15492		if (is_kdump_kernel())
15493			ret = queue_work(phba->wq, &cq->irqwork);
15494		else
15495			ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15496		if (!ret)
15497			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15498					"0383 Cannot schedule queue work "
15499					"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15500					cqid, cq->queue_id,
15501					raw_smp_processor_id());
15502		break;
15503	}
15504}
15505
15506/**
15507 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15508 * @work: pointer to work element
15509 *
15510 * translates from the work handler and calls the fast-path handler.
15511 **/
15512static void
15513lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15514{
15515	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15516					struct lpfc_queue, sched_irqwork);
15517
15518	__lpfc_sli4_hba_process_cq(cq);
15519}
15520
15521/**
15522 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15523 * @irq: Interrupt number.
15524 * @dev_id: The device context pointer.
15525 *
15526 * This function is directly called from the PCI layer as an interrupt
15527 * service routine when device with SLI-4 interface spec is enabled with
15528 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15529 * ring event in the HBA. However, when the device is enabled with either
15530 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15531 * device-level interrupt handler. When the PCI slot is in error recovery
15532 * or the HBA is undergoing initialization, the interrupt handler will not
15533 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15534 * the intrrupt context. This function is called without any lock held.
15535 * It gets the hbalock to access and update SLI data structures. Note that,
15536 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15537 * equal to that of FCP CQ index.
15538 *
15539 * The link attention and ELS ring attention events are handled
15540 * by the worker thread. The interrupt handler signals the worker thread
15541 * and returns for these events. This function is called without any lock
15542 * held. It gets the hbalock to access and update SLI data structures.
15543 *
15544 * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
15545 * when interrupt is scheduled to be handled from a threaded irq context, or
15546 * else returns IRQ_NONE.
15547 **/
15548irqreturn_t
15549lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15550{
15551	struct lpfc_hba *phba;
15552	struct lpfc_hba_eq_hdl *hba_eq_hdl;
15553	struct lpfc_queue *fpeq;
15554	unsigned long iflag;
15555	int hba_eqidx;
15556	int ecount = 0;
15557	struct lpfc_eq_intr_info *eqi;
15558
15559	/* Get the driver's phba structure from the dev_id */
15560	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15561	phba = hba_eq_hdl->phba;
15562	hba_eqidx = hba_eq_hdl->idx;
15563
15564	if (unlikely(!phba))
15565		return IRQ_NONE;
15566	if (unlikely(!phba->sli4_hba.hdwq))
15567		return IRQ_NONE;
15568
15569	/* Get to the EQ struct associated with this vector */
15570	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15571	if (unlikely(!fpeq))
15572		return IRQ_NONE;
15573
15574	/* Check device state for handling interrupt */
15575	if (unlikely(lpfc_intr_state_check(phba))) {
15576		/* Check again for link_state with lock held */
15577		spin_lock_irqsave(&phba->hbalock, iflag);
15578		if (phba->link_state < LPFC_LINK_DOWN)
15579			/* Flush, clear interrupt, and rearm the EQ */
15580			lpfc_sli4_eqcq_flush(phba, fpeq);
15581		spin_unlock_irqrestore(&phba->hbalock, iflag);
15582		return IRQ_NONE;
15583	}
15584
15585	switch (fpeq->poll_mode) {
15586	case LPFC_THREADED_IRQ:
15587		/* CGN mgmt is mutually exclusive from irq processing */
15588		if (phba->cmf_active_mode == LPFC_CFG_OFF)
15589			return IRQ_WAKE_THREAD;
15590		fallthrough;
15591	case LPFC_QUEUE_WORK:
15592	default:
15593		eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15594		eqi->icnt++;
15595
15596		fpeq->last_cpu = raw_smp_processor_id();
15597
15598		if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15599		    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15600		    phba->cfg_auto_imax &&
15601		    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15602		    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15603			lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
15604						   LPFC_MAX_AUTO_EQ_DELAY);
15605
15606		/* process and rearm the EQ */
15607		ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
15608					      LPFC_QUEUE_WORK);
15609
15610		if (unlikely(ecount == 0)) {
15611			fpeq->EQ_no_entry++;
15612			if (phba->intr_type == MSIX)
15613				/* MSI-X treated interrupt served as no EQ share INT */
15614				lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15615						"0358 MSI-X interrupt with no EQE\n");
15616			else
15617				/* Non MSI-X treated on interrupt as EQ share INT */
15618				return IRQ_NONE;
15619		}
15620	}
15621
15622	return IRQ_HANDLED;
15623} /* lpfc_sli4_hba_intr_handler */
15624
15625/**
15626 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15627 * @irq: Interrupt number.
15628 * @dev_id: The device context pointer.
15629 *
15630 * This function is the device-level interrupt handler to device with SLI-4
15631 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15632 * interrupt mode is enabled and there is an event in the HBA which requires
15633 * driver attention. This function invokes the slow-path interrupt attention
15634 * handling function and fast-path interrupt attention handling function in
15635 * turn to process the relevant HBA attention events. This function is called
15636 * without any lock held. It gets the hbalock to access and update SLI data
15637 * structures.
15638 *
15639 * This function returns IRQ_HANDLED when interrupt is handled, else it
15640 * returns IRQ_NONE.
15641 **/
15642irqreturn_t
15643lpfc_sli4_intr_handler(int irq, void *dev_id)
15644{
15645	struct lpfc_hba  *phba;
15646	irqreturn_t hba_irq_rc;
15647	bool hba_handled = false;
15648	int qidx;
15649
15650	/* Get the driver's phba structure from the dev_id */
15651	phba = (struct lpfc_hba *)dev_id;
15652
15653	if (unlikely(!phba))
15654		return IRQ_NONE;
15655
15656	/*
15657	 * Invoke fast-path host attention interrupt handling as appropriate.
15658	 */
15659	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15660		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15661					&phba->sli4_hba.hba_eq_hdl[qidx]);
15662		if (hba_irq_rc == IRQ_HANDLED)
15663			hba_handled |= true;
15664	}
15665
15666	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15667} /* lpfc_sli4_intr_handler */
15668
15669void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15670{
15671	struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15672	struct lpfc_queue *eq;
15673
15674	rcu_read_lock();
15675
15676	list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15677		lpfc_sli4_poll_eq(eq);
15678	if (!list_empty(&phba->poll_list))
15679		mod_timer(&phba->cpuhp_poll_timer,
15680			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15681
15682	rcu_read_unlock();
15683}
15684
15685static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15686{
15687	struct lpfc_hba *phba = eq->phba;
15688
15689	/* kickstart slowpath processing if needed */
15690	if (list_empty(&phba->poll_list))
15691		mod_timer(&phba->cpuhp_poll_timer,
15692			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15693
15694	list_add_rcu(&eq->_poll_list, &phba->poll_list);
15695	synchronize_rcu();
15696}
15697
15698static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15699{
15700	struct lpfc_hba *phba = eq->phba;
15701
15702	/* Disable slowpath processing for this eq.  Kick start the eq
15703	 * by RE-ARMING the eq's ASAP
15704	 */
15705	list_del_rcu(&eq->_poll_list);
15706	synchronize_rcu();
15707
15708	if (list_empty(&phba->poll_list))
15709		del_timer_sync(&phba->cpuhp_poll_timer);
15710}
15711
15712void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15713{
15714	struct lpfc_queue *eq, *next;
15715
15716	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15717		list_del(&eq->_poll_list);
15718
15719	INIT_LIST_HEAD(&phba->poll_list);
15720	synchronize_rcu();
15721}
15722
15723static inline void
15724__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15725{
15726	if (mode == eq->mode)
15727		return;
15728	/*
15729	 * currently this function is only called during a hotplug
15730	 * event and the cpu on which this function is executing
15731	 * is going offline.  By now the hotplug has instructed
15732	 * the scheduler to remove this cpu from cpu active mask.
15733	 * So we don't need to work about being put aside by the
15734	 * scheduler for a high priority process.  Yes, the inte-
15735	 * rrupts could come but they are known to retire ASAP.
15736	 */
15737
15738	/* Disable polling in the fastpath */
15739	WRITE_ONCE(eq->mode, mode);
15740	/* flush out the store buffer */
15741	smp_wmb();
15742
15743	/*
15744	 * Add this eq to the polling list and start polling. For
15745	 * a grace period both interrupt handler and poller will
15746	 * try to process the eq _but_ that's fine.  We have a
15747	 * synchronization mechanism in place (queue_claimed) to
15748	 * deal with it.  This is just a draining phase for int-
15749	 * errupt handler (not eq's) as we have guranteed through
15750	 * barrier that all the CPUs have seen the new CQ_POLLED
15751	 * state. which will effectively disable the REARMING of
15752	 * the EQ.  The whole idea is eq's die off eventually as
15753	 * we are not rearming EQ's anymore.
15754	 */
15755	mode ? lpfc_sli4_add_to_poll_list(eq) :
15756	       lpfc_sli4_remove_from_poll_list(eq);
15757}
15758
15759void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15760{
15761	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15762}
15763
15764void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15765{
15766	struct lpfc_hba *phba = eq->phba;
15767
15768	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15769
15770	/* Kick start for the pending io's in h/w.
15771	 * Once we switch back to interrupt processing on a eq
15772	 * the io path completion will only arm eq's when it
15773	 * receives a completion.  But since eq's are in disa-
15774	 * rmed state it doesn't receive a completion.  This
15775	 * creates a deadlock scenaro.
15776	 */
15777	phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15778}
15779
15780/**
15781 * lpfc_sli4_queue_free - free a queue structure and associated memory
15782 * @queue: The queue structure to free.
15783 *
15784 * This function frees a queue structure and the DMAable memory used for
15785 * the host resident queue. This function must be called after destroying the
15786 * queue on the HBA.
15787 **/
15788void
15789lpfc_sli4_queue_free(struct lpfc_queue *queue)
15790{
15791	struct lpfc_dmabuf *dmabuf;
15792
15793	if (!queue)
15794		return;
15795
15796	if (!list_empty(&queue->wq_list))
15797		list_del(&queue->wq_list);
15798
15799	while (!list_empty(&queue->page_list)) {
15800		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15801				 list);
15802		dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15803				  dmabuf->virt, dmabuf->phys);
15804		kfree(dmabuf);
15805	}
15806	if (queue->rqbp) {
15807		lpfc_free_rq_buffer(queue->phba, queue);
15808		kfree(queue->rqbp);
15809	}
15810
15811	if (!list_empty(&queue->cpu_list))
15812		list_del(&queue->cpu_list);
15813
15814	kfree(queue);
15815	return;
15816}
15817
15818/**
15819 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15820 * @phba: The HBA that this queue is being created on.
15821 * @page_size: The size of a queue page
15822 * @entry_size: The size of each queue entry for this queue.
15823 * @entry_count: The number of entries that this queue will handle.
15824 * @cpu: The cpu that will primarily utilize this queue.
15825 *
15826 * This function allocates a queue structure and the DMAable memory used for
15827 * the host resident queue. This function must be called before creating the
15828 * queue on the HBA.
15829 **/
15830struct lpfc_queue *
15831lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15832		      uint32_t entry_size, uint32_t entry_count, int cpu)
15833{
15834	struct lpfc_queue *queue;
15835	struct lpfc_dmabuf *dmabuf;
15836	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15837	uint16_t x, pgcnt;
15838
15839	if (!phba->sli4_hba.pc_sli4_params.supported)
15840		hw_page_size = page_size;
15841
15842	pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15843
15844	/* If needed, Adjust page count to match the max the adapter supports */
15845	if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15846		pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15847
15848	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15849			     GFP_KERNEL, cpu_to_node(cpu));
15850	if (!queue)
15851		return NULL;
15852
15853	INIT_LIST_HEAD(&queue->list);
15854	INIT_LIST_HEAD(&queue->_poll_list);
15855	INIT_LIST_HEAD(&queue->wq_list);
15856	INIT_LIST_HEAD(&queue->wqfull_list);
15857	INIT_LIST_HEAD(&queue->page_list);
15858	INIT_LIST_HEAD(&queue->child_list);
15859	INIT_LIST_HEAD(&queue->cpu_list);
15860
15861	/* Set queue parameters now.  If the system cannot provide memory
15862	 * resources, the free routine needs to know what was allocated.
15863	 */
15864	queue->page_count = pgcnt;
15865	queue->q_pgs = (void **)&queue[1];
15866	queue->entry_cnt_per_pg = hw_page_size / entry_size;
15867	queue->entry_size = entry_size;
15868	queue->entry_count = entry_count;
15869	queue->page_size = hw_page_size;
15870	queue->phba = phba;
15871
15872	for (x = 0; x < queue->page_count; x++) {
15873		dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15874				      dev_to_node(&phba->pcidev->dev));
15875		if (!dmabuf)
15876			goto out_fail;
15877		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15878						  hw_page_size, &dmabuf->phys,
15879						  GFP_KERNEL);
15880		if (!dmabuf->virt) {
15881			kfree(dmabuf);
15882			goto out_fail;
15883		}
15884		dmabuf->buffer_tag = x;
15885		list_add_tail(&dmabuf->list, &queue->page_list);
15886		/* use lpfc_sli4_qe to index a paritcular entry in this page */
15887		queue->q_pgs[x] = dmabuf->virt;
15888	}
15889	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15890	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15891	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15892	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15893
15894	/* notify_interval will be set during q creation */
15895
15896	return queue;
15897out_fail:
15898	lpfc_sli4_queue_free(queue);
15899	return NULL;
15900}
15901
15902/**
15903 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15904 * @phba: HBA structure that indicates port to create a queue on.
15905 * @pci_barset: PCI BAR set flag.
15906 *
15907 * This function shall perform iomap of the specified PCI BAR address to host
15908 * memory address if not already done so and return it. The returned host
15909 * memory address can be NULL.
15910 */
15911static void __iomem *
15912lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15913{
15914	if (!phba->pcidev)
15915		return NULL;
15916
15917	switch (pci_barset) {
15918	case WQ_PCI_BAR_0_AND_1:
15919		return phba->pci_bar0_memmap_p;
15920	case WQ_PCI_BAR_2_AND_3:
15921		return phba->pci_bar2_memmap_p;
15922	case WQ_PCI_BAR_4_AND_5:
15923		return phba->pci_bar4_memmap_p;
15924	default:
15925		break;
15926	}
15927	return NULL;
15928}
15929
15930/**
15931 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15932 * @phba: HBA structure that EQs are on.
15933 * @startq: The starting EQ index to modify
15934 * @numq: The number of EQs (consecutive indexes) to modify
15935 * @usdelay: amount of delay
15936 *
15937 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15938 * is set either by writing to a register (if supported by the SLI Port)
15939 * or by mailbox command. The mailbox command allows several EQs to be
15940 * updated at once.
15941 *
15942 * The @phba struct is used to send a mailbox command to HBA. The @startq
15943 * is used to get the starting EQ index to change. The @numq value is
15944 * used to specify how many consecutive EQ indexes, starting at EQ index,
15945 * are to be changed. This function is asynchronous and will wait for any
15946 * mailbox commands to finish before returning.
15947 *
15948 * On success this function will return a zero. If unable to allocate
15949 * enough memory this function will return -ENOMEM. If a mailbox command
15950 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15951 * have had their delay multipler changed.
15952 **/
15953void
15954lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15955			 uint32_t numq, uint32_t usdelay)
15956{
15957	struct lpfc_mbx_modify_eq_delay *eq_delay;
15958	LPFC_MBOXQ_t *mbox;
15959	struct lpfc_queue *eq;
15960	int cnt = 0, rc, length;
15961	uint32_t shdr_status, shdr_add_status;
15962	uint32_t dmult;
15963	int qidx;
15964	union lpfc_sli4_cfg_shdr *shdr;
15965
15966	if (startq >= phba->cfg_irq_chann)
15967		return;
15968
15969	if (usdelay > 0xFFFF) {
15970		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15971				"6429 usdelay %d too large. Scaled down to "
15972				"0xFFFF.\n", usdelay);
15973		usdelay = 0xFFFF;
15974	}
15975
15976	/* set values by EQ_DELAY register if supported */
15977	if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15978		for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15979			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15980			if (!eq)
15981				continue;
15982
15983			lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15984
15985			if (++cnt >= numq)
15986				break;
15987		}
15988		return;
15989	}
15990
15991	/* Otherwise, set values by mailbox cmd */
15992
15993	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15994	if (!mbox) {
15995		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15996				"6428 Failed allocating mailbox cmd buffer."
15997				" EQ delay was not set.\n");
15998		return;
15999	}
16000	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
16001		  sizeof(struct lpfc_sli4_cfg_mhdr));
16002	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16003			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
16004			 length, LPFC_SLI4_MBX_EMBED);
16005	eq_delay = &mbox->u.mqe.un.eq_delay;
16006
16007	/* Calculate delay multiper from maximum interrupt per second */
16008	dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
16009	if (dmult)
16010		dmult--;
16011	if (dmult > LPFC_DMULT_MAX)
16012		dmult = LPFC_DMULT_MAX;
16013
16014	for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16015		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16016		if (!eq)
16017			continue;
16018		eq->q_mode = usdelay;
16019		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16020		eq_delay->u.request.eq[cnt].phase = 0;
16021		eq_delay->u.request.eq[cnt].delay_multi = dmult;
16022
16023		if (++cnt >= numq)
16024			break;
16025	}
16026	eq_delay->u.request.num_eq = cnt;
16027
16028	mbox->vport = phba->pport;
16029	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16030	mbox->ctx_ndlp = NULL;
16031	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16032	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16033	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16034	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16035	if (shdr_status || shdr_add_status || rc) {
16036		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16037				"2512 MODIFY_EQ_DELAY mailbox failed with "
16038				"status x%x add_status x%x, mbx status x%x\n",
16039				shdr_status, shdr_add_status, rc);
16040	}
16041	mempool_free(mbox, phba->mbox_mem_pool);
16042	return;
16043}
16044
16045/**
16046 * lpfc_eq_create - Create an Event Queue on the HBA
16047 * @phba: HBA structure that indicates port to create a queue on.
16048 * @eq: The queue structure to use to create the event queue.
16049 * @imax: The maximum interrupt per second limit.
16050 *
16051 * This function creates an event queue, as detailed in @eq, on a port,
16052 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16053 *
16054 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16055 * is used to get the entry count and entry size that are necessary to
16056 * determine the number of pages to allocate and use for this queue. This
16057 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16058 * event queue. This function is asynchronous and will wait for the mailbox
16059 * command to finish before continuing.
16060 *
16061 * On success this function will return a zero. If unable to allocate enough
16062 * memory this function will return -ENOMEM. If the queue create mailbox command
16063 * fails this function will return -ENXIO.
16064 **/
16065int
16066lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16067{
16068	struct lpfc_mbx_eq_create *eq_create;
16069	LPFC_MBOXQ_t *mbox;
16070	int rc, length, status = 0;
16071	struct lpfc_dmabuf *dmabuf;
16072	uint32_t shdr_status, shdr_add_status;
16073	union lpfc_sli4_cfg_shdr *shdr;
16074	uint16_t dmult;
16075	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16076
16077	/* sanity check on queue memory */
16078	if (!eq)
16079		return -ENODEV;
16080	if (!phba->sli4_hba.pc_sli4_params.supported)
16081		hw_page_size = SLI4_PAGE_SIZE;
16082
16083	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16084	if (!mbox)
16085		return -ENOMEM;
16086	length = (sizeof(struct lpfc_mbx_eq_create) -
16087		  sizeof(struct lpfc_sli4_cfg_mhdr));
16088	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16089			 LPFC_MBOX_OPCODE_EQ_CREATE,
16090			 length, LPFC_SLI4_MBX_EMBED);
16091	eq_create = &mbox->u.mqe.un.eq_create;
16092	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16093	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16094	       eq->page_count);
16095	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16096	       LPFC_EQE_SIZE);
16097	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16098
16099	/* Use version 2 of CREATE_EQ if eqav is set */
16100	if (phba->sli4_hba.pc_sli4_params.eqav) {
16101		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16102		       LPFC_Q_CREATE_VERSION_2);
16103		bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16104		       phba->sli4_hba.pc_sli4_params.eqav);
16105	}
16106
16107	/* don't setup delay multiplier using EQ_CREATE */
16108	dmult = 0;
16109	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16110	       dmult);
16111	switch (eq->entry_count) {
16112	default:
16113		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16114				"0360 Unsupported EQ count. (%d)\n",
16115				eq->entry_count);
16116		if (eq->entry_count < 256) {
16117			status = -EINVAL;
16118			goto out;
16119		}
16120		fallthrough;	/* otherwise default to smallest count */
16121	case 256:
16122		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16123		       LPFC_EQ_CNT_256);
16124		break;
16125	case 512:
16126		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16127		       LPFC_EQ_CNT_512);
16128		break;
16129	case 1024:
16130		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16131		       LPFC_EQ_CNT_1024);
16132		break;
16133	case 2048:
16134		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16135		       LPFC_EQ_CNT_2048);
16136		break;
16137	case 4096:
16138		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16139		       LPFC_EQ_CNT_4096);
16140		break;
16141	}
16142	list_for_each_entry(dmabuf, &eq->page_list, list) {
16143		memset(dmabuf->virt, 0, hw_page_size);
16144		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16145					putPaddrLow(dmabuf->phys);
16146		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16147					putPaddrHigh(dmabuf->phys);
16148	}
16149	mbox->vport = phba->pport;
16150	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16151	mbox->ctx_buf = NULL;
16152	mbox->ctx_ndlp = NULL;
16153	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16154	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16155	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16156	if (shdr_status || shdr_add_status || rc) {
16157		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16158				"2500 EQ_CREATE mailbox failed with "
16159				"status x%x add_status x%x, mbx status x%x\n",
16160				shdr_status, shdr_add_status, rc);
16161		status = -ENXIO;
16162	}
16163	eq->type = LPFC_EQ;
16164	eq->subtype = LPFC_NONE;
16165	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16166	if (eq->queue_id == 0xFFFF)
16167		status = -ENXIO;
16168	eq->host_index = 0;
16169	eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16170	eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16171out:
16172	mempool_free(mbox, phba->mbox_mem_pool);
16173	return status;
16174}
16175
16176/**
16177 * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
16178 * @irq: Interrupt number.
16179 * @dev_id: The device context pointer.
16180 *
16181 * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
16182 * threaded irq context.
16183 *
16184 * Returns
16185 * IRQ_HANDLED - interrupt is handled
16186 * IRQ_NONE - otherwise
16187 **/
16188irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
16189{
16190	struct lpfc_hba *phba;
16191	struct lpfc_hba_eq_hdl *hba_eq_hdl;
16192	struct lpfc_queue *fpeq;
16193	int ecount = 0;
16194	int hba_eqidx;
16195	struct lpfc_eq_intr_info *eqi;
16196
16197	/* Get the driver's phba structure from the dev_id */
16198	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
16199	phba = hba_eq_hdl->phba;
16200	hba_eqidx = hba_eq_hdl->idx;
16201
16202	if (unlikely(!phba))
16203		return IRQ_NONE;
16204	if (unlikely(!phba->sli4_hba.hdwq))
16205		return IRQ_NONE;
16206
16207	/* Get to the EQ struct associated with this vector */
16208	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16209	if (unlikely(!fpeq))
16210		return IRQ_NONE;
16211
16212	eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
16213	eqi->icnt++;
16214
16215	fpeq->last_cpu = raw_smp_processor_id();
16216
16217	if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
16218	    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
16219	    phba->cfg_auto_imax &&
16220	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
16221	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
16222		lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
16223
16224	/* process and rearm the EQ */
16225	ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
16226				      LPFC_THREADED_IRQ);
16227
16228	if (unlikely(ecount == 0)) {
16229		fpeq->EQ_no_entry++;
16230		if (phba->intr_type == MSIX)
16231			/* MSI-X treated interrupt served as no EQ share INT */
16232			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16233					"3358 MSI-X interrupt with no EQE\n");
16234		else
16235			/* Non MSI-X treated on interrupt as EQ share INT */
16236			return IRQ_NONE;
16237	}
16238	return IRQ_HANDLED;
16239}
16240
16241/**
16242 * lpfc_cq_create - Create a Completion Queue on the HBA
16243 * @phba: HBA structure that indicates port to create a queue on.
16244 * @cq: The queue structure to use to create the completion queue.
16245 * @eq: The event queue to bind this completion queue to.
16246 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16247 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16248 *
16249 * This function creates a completion queue, as detailed in @wq, on a port,
16250 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16251 *
16252 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16253 * is used to get the entry count and entry size that are necessary to
16254 * determine the number of pages to allocate and use for this queue. The @eq
16255 * is used to indicate which event queue to bind this completion queue to. This
16256 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16257 * completion queue. This function is asynchronous and will wait for the mailbox
16258 * command to finish before continuing.
16259 *
16260 * On success this function will return a zero. If unable to allocate enough
16261 * memory this function will return -ENOMEM. If the queue create mailbox command
16262 * fails this function will return -ENXIO.
16263 **/
16264int
16265lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16266	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16267{
16268	struct lpfc_mbx_cq_create *cq_create;
16269	struct lpfc_dmabuf *dmabuf;
16270	LPFC_MBOXQ_t *mbox;
16271	int rc, length, status = 0;
16272	uint32_t shdr_status, shdr_add_status;
16273	union lpfc_sli4_cfg_shdr *shdr;
16274
16275	/* sanity check on queue memory */
16276	if (!cq || !eq)
16277		return -ENODEV;
16278
16279	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16280	if (!mbox)
16281		return -ENOMEM;
16282	length = (sizeof(struct lpfc_mbx_cq_create) -
16283		  sizeof(struct lpfc_sli4_cfg_mhdr));
16284	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16285			 LPFC_MBOX_OPCODE_CQ_CREATE,
16286			 length, LPFC_SLI4_MBX_EMBED);
16287	cq_create = &mbox->u.mqe.un.cq_create;
16288	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16289	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16290		    cq->page_count);
16291	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16292	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16293	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16294	       phba->sli4_hba.pc_sli4_params.cqv);
16295	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16296		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16297		       (cq->page_size / SLI4_PAGE_SIZE));
16298		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16299		       eq->queue_id);
16300		bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16301		       phba->sli4_hba.pc_sli4_params.cqav);
16302	} else {
16303		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16304		       eq->queue_id);
16305	}
16306	switch (cq->entry_count) {
16307	case 2048:
16308	case 4096:
16309		if (phba->sli4_hba.pc_sli4_params.cqv ==
16310		    LPFC_Q_CREATE_VERSION_2) {
16311			cq_create->u.request.context.lpfc_cq_context_count =
16312				cq->entry_count;
16313			bf_set(lpfc_cq_context_count,
16314			       &cq_create->u.request.context,
16315			       LPFC_CQ_CNT_WORD7);
16316			break;
16317		}
16318		fallthrough;
16319	default:
16320		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16321				"0361 Unsupported CQ count: "
16322				"entry cnt %d sz %d pg cnt %d\n",
16323				cq->entry_count, cq->entry_size,
16324				cq->page_count);
16325		if (cq->entry_count < 256) {
16326			status = -EINVAL;
16327			goto out;
16328		}
16329		fallthrough;	/* otherwise default to smallest count */
16330	case 256:
16331		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16332		       LPFC_CQ_CNT_256);
16333		break;
16334	case 512:
16335		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16336		       LPFC_CQ_CNT_512);
16337		break;
16338	case 1024:
16339		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16340		       LPFC_CQ_CNT_1024);
16341		break;
16342	}
16343	list_for_each_entry(dmabuf, &cq->page_list, list) {
16344		memset(dmabuf->virt, 0, cq->page_size);
16345		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16346					putPaddrLow(dmabuf->phys);
16347		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16348					putPaddrHigh(dmabuf->phys);
16349	}
16350	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16351
16352	/* The IOCTL status is embedded in the mailbox subheader. */
16353	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16354	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16355	if (shdr_status || shdr_add_status || rc) {
16356		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16357				"2501 CQ_CREATE mailbox failed with "
16358				"status x%x add_status x%x, mbx status x%x\n",
16359				shdr_status, shdr_add_status, rc);
16360		status = -ENXIO;
16361		goto out;
16362	}
16363	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16364	if (cq->queue_id == 0xFFFF) {
16365		status = -ENXIO;
16366		goto out;
16367	}
16368	/* link the cq onto the parent eq child list */
16369	list_add_tail(&cq->list, &eq->child_list);
16370	/* Set up completion queue's type and subtype */
16371	cq->type = type;
16372	cq->subtype = subtype;
16373	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16374	cq->assoc_qid = eq->queue_id;
16375	cq->assoc_qp = eq;
16376	cq->host_index = 0;
16377	cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16378	cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16379
16380	if (cq->queue_id > phba->sli4_hba.cq_max)
16381		phba->sli4_hba.cq_max = cq->queue_id;
16382out:
16383	mempool_free(mbox, phba->mbox_mem_pool);
16384	return status;
16385}
16386
16387/**
16388 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16389 * @phba: HBA structure that indicates port to create a queue on.
16390 * @cqp: The queue structure array to use to create the completion queues.
16391 * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16392 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16393 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16394 *
16395 * This function creates a set of  completion queue, s to support MRQ
16396 * as detailed in @cqp, on a port,
16397 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16398 *
16399 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16400 * is used to get the entry count and entry size that are necessary to
16401 * determine the number of pages to allocate and use for this queue. The @eq
16402 * is used to indicate which event queue to bind this completion queue to. This
16403 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16404 * completion queue. This function is asynchronous and will wait for the mailbox
16405 * command to finish before continuing.
16406 *
16407 * On success this function will return a zero. If unable to allocate enough
16408 * memory this function will return -ENOMEM. If the queue create mailbox command
16409 * fails this function will return -ENXIO.
16410 **/
16411int
16412lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16413		   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16414		   uint32_t subtype)
16415{
16416	struct lpfc_queue *cq;
16417	struct lpfc_queue *eq;
16418	struct lpfc_mbx_cq_create_set *cq_set;
16419	struct lpfc_dmabuf *dmabuf;
16420	LPFC_MBOXQ_t *mbox;
16421	int rc, length, alloclen, status = 0;
16422	int cnt, idx, numcq, page_idx = 0;
16423	uint32_t shdr_status, shdr_add_status;
16424	union lpfc_sli4_cfg_shdr *shdr;
16425	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16426
16427	/* sanity check on queue memory */
16428	numcq = phba->cfg_nvmet_mrq;
16429	if (!cqp || !hdwq || !numcq)
16430		return -ENODEV;
16431
16432	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16433	if (!mbox)
16434		return -ENOMEM;
16435
16436	length = sizeof(struct lpfc_mbx_cq_create_set);
16437	length += ((numcq * cqp[0]->page_count) *
16438		   sizeof(struct dma_address));
16439	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16440			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16441			LPFC_SLI4_MBX_NEMBED);
16442	if (alloclen < length) {
16443		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16444				"3098 Allocated DMA memory size (%d) is "
16445				"less than the requested DMA memory size "
16446				"(%d)\n", alloclen, length);
16447		status = -ENOMEM;
16448		goto out;
16449	}
16450	cq_set = mbox->sge_array->addr[0];
16451	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16452	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16453
16454	for (idx = 0; idx < numcq; idx++) {
16455		cq = cqp[idx];
16456		eq = hdwq[idx].hba_eq;
16457		if (!cq || !eq) {
16458			status = -ENOMEM;
16459			goto out;
16460		}
16461		if (!phba->sli4_hba.pc_sli4_params.supported)
16462			hw_page_size = cq->page_size;
16463
16464		switch (idx) {
16465		case 0:
16466			bf_set(lpfc_mbx_cq_create_set_page_size,
16467			       &cq_set->u.request,
16468			       (hw_page_size / SLI4_PAGE_SIZE));
16469			bf_set(lpfc_mbx_cq_create_set_num_pages,
16470			       &cq_set->u.request, cq->page_count);
16471			bf_set(lpfc_mbx_cq_create_set_evt,
16472			       &cq_set->u.request, 1);
16473			bf_set(lpfc_mbx_cq_create_set_valid,
16474			       &cq_set->u.request, 1);
16475			bf_set(lpfc_mbx_cq_create_set_cqe_size,
16476			       &cq_set->u.request, 0);
16477			bf_set(lpfc_mbx_cq_create_set_num_cq,
16478			       &cq_set->u.request, numcq);
16479			bf_set(lpfc_mbx_cq_create_set_autovalid,
16480			       &cq_set->u.request,
16481			       phba->sli4_hba.pc_sli4_params.cqav);
16482			switch (cq->entry_count) {
16483			case 2048:
16484			case 4096:
16485				if (phba->sli4_hba.pc_sli4_params.cqv ==
16486				    LPFC_Q_CREATE_VERSION_2) {
16487					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16488					       &cq_set->u.request,
16489						cq->entry_count);
16490					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16491					       &cq_set->u.request,
16492					       LPFC_CQ_CNT_WORD7);
16493					break;
16494				}
16495				fallthrough;
16496			default:
16497				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16498						"3118 Bad CQ count. (%d)\n",
16499						cq->entry_count);
16500				if (cq->entry_count < 256) {
16501					status = -EINVAL;
16502					goto out;
16503				}
16504				fallthrough;	/* otherwise default to smallest */
16505			case 256:
16506				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16507				       &cq_set->u.request, LPFC_CQ_CNT_256);
16508				break;
16509			case 512:
16510				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16511				       &cq_set->u.request, LPFC_CQ_CNT_512);
16512				break;
16513			case 1024:
16514				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16515				       &cq_set->u.request, LPFC_CQ_CNT_1024);
16516				break;
16517			}
16518			bf_set(lpfc_mbx_cq_create_set_eq_id0,
16519			       &cq_set->u.request, eq->queue_id);
16520			break;
16521		case 1:
16522			bf_set(lpfc_mbx_cq_create_set_eq_id1,
16523			       &cq_set->u.request, eq->queue_id);
16524			break;
16525		case 2:
16526			bf_set(lpfc_mbx_cq_create_set_eq_id2,
16527			       &cq_set->u.request, eq->queue_id);
16528			break;
16529		case 3:
16530			bf_set(lpfc_mbx_cq_create_set_eq_id3,
16531			       &cq_set->u.request, eq->queue_id);
16532			break;
16533		case 4:
16534			bf_set(lpfc_mbx_cq_create_set_eq_id4,
16535			       &cq_set->u.request, eq->queue_id);
16536			break;
16537		case 5:
16538			bf_set(lpfc_mbx_cq_create_set_eq_id5,
16539			       &cq_set->u.request, eq->queue_id);
16540			break;
16541		case 6:
16542			bf_set(lpfc_mbx_cq_create_set_eq_id6,
16543			       &cq_set->u.request, eq->queue_id);
16544			break;
16545		case 7:
16546			bf_set(lpfc_mbx_cq_create_set_eq_id7,
16547			       &cq_set->u.request, eq->queue_id);
16548			break;
16549		case 8:
16550			bf_set(lpfc_mbx_cq_create_set_eq_id8,
16551			       &cq_set->u.request, eq->queue_id);
16552			break;
16553		case 9:
16554			bf_set(lpfc_mbx_cq_create_set_eq_id9,
16555			       &cq_set->u.request, eq->queue_id);
16556			break;
16557		case 10:
16558			bf_set(lpfc_mbx_cq_create_set_eq_id10,
16559			       &cq_set->u.request, eq->queue_id);
16560			break;
16561		case 11:
16562			bf_set(lpfc_mbx_cq_create_set_eq_id11,
16563			       &cq_set->u.request, eq->queue_id);
16564			break;
16565		case 12:
16566			bf_set(lpfc_mbx_cq_create_set_eq_id12,
16567			       &cq_set->u.request, eq->queue_id);
16568			break;
16569		case 13:
16570			bf_set(lpfc_mbx_cq_create_set_eq_id13,
16571			       &cq_set->u.request, eq->queue_id);
16572			break;
16573		case 14:
16574			bf_set(lpfc_mbx_cq_create_set_eq_id14,
16575			       &cq_set->u.request, eq->queue_id);
16576			break;
16577		case 15:
16578			bf_set(lpfc_mbx_cq_create_set_eq_id15,
16579			       &cq_set->u.request, eq->queue_id);
16580			break;
16581		}
16582
16583		/* link the cq onto the parent eq child list */
16584		list_add_tail(&cq->list, &eq->child_list);
16585		/* Set up completion queue's type and subtype */
16586		cq->type = type;
16587		cq->subtype = subtype;
16588		cq->assoc_qid = eq->queue_id;
16589		cq->assoc_qp = eq;
16590		cq->host_index = 0;
16591		cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16592		cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16593					 cq->entry_count);
16594		cq->chann = idx;
16595
16596		rc = 0;
16597		list_for_each_entry(dmabuf, &cq->page_list, list) {
16598			memset(dmabuf->virt, 0, hw_page_size);
16599			cnt = page_idx + dmabuf->buffer_tag;
16600			cq_set->u.request.page[cnt].addr_lo =
16601					putPaddrLow(dmabuf->phys);
16602			cq_set->u.request.page[cnt].addr_hi =
16603					putPaddrHigh(dmabuf->phys);
16604			rc++;
16605		}
16606		page_idx += rc;
16607	}
16608
16609	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16610
16611	/* The IOCTL status is embedded in the mailbox subheader. */
16612	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16613	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16614	if (shdr_status || shdr_add_status || rc) {
16615		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16616				"3119 CQ_CREATE_SET mailbox failed with "
16617				"status x%x add_status x%x, mbx status x%x\n",
16618				shdr_status, shdr_add_status, rc);
16619		status = -ENXIO;
16620		goto out;
16621	}
16622	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16623	if (rc == 0xFFFF) {
16624		status = -ENXIO;
16625		goto out;
16626	}
16627
16628	for (idx = 0; idx < numcq; idx++) {
16629		cq = cqp[idx];
16630		cq->queue_id = rc + idx;
16631		if (cq->queue_id > phba->sli4_hba.cq_max)
16632			phba->sli4_hba.cq_max = cq->queue_id;
16633	}
16634
16635out:
16636	lpfc_sli4_mbox_cmd_free(phba, mbox);
16637	return status;
16638}
16639
16640/**
16641 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16642 * @phba: HBA structure that indicates port to create a queue on.
16643 * @mq: The queue structure to use to create the mailbox queue.
16644 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16645 * @cq: The completion queue to associate with this cq.
16646 *
16647 * This function provides failback (fb) functionality when the
16648 * mq_create_ext fails on older FW generations.  It's purpose is identical
16649 * to mq_create_ext otherwise.
16650 *
16651 * This routine cannot fail as all attributes were previously accessed and
16652 * initialized in mq_create_ext.
16653 **/
16654static void
16655lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16656		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16657{
16658	struct lpfc_mbx_mq_create *mq_create;
16659	struct lpfc_dmabuf *dmabuf;
16660	int length;
16661
16662	length = (sizeof(struct lpfc_mbx_mq_create) -
16663		  sizeof(struct lpfc_sli4_cfg_mhdr));
16664	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16665			 LPFC_MBOX_OPCODE_MQ_CREATE,
16666			 length, LPFC_SLI4_MBX_EMBED);
16667	mq_create = &mbox->u.mqe.un.mq_create;
16668	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16669	       mq->page_count);
16670	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16671	       cq->queue_id);
16672	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16673	switch (mq->entry_count) {
16674	case 16:
16675		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16676		       LPFC_MQ_RING_SIZE_16);
16677		break;
16678	case 32:
16679		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16680		       LPFC_MQ_RING_SIZE_32);
16681		break;
16682	case 64:
16683		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16684		       LPFC_MQ_RING_SIZE_64);
16685		break;
16686	case 128:
16687		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16688		       LPFC_MQ_RING_SIZE_128);
16689		break;
16690	}
16691	list_for_each_entry(dmabuf, &mq->page_list, list) {
16692		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16693			putPaddrLow(dmabuf->phys);
16694		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16695			putPaddrHigh(dmabuf->phys);
16696	}
16697}
16698
16699/**
16700 * lpfc_mq_create - Create a mailbox Queue on the HBA
16701 * @phba: HBA structure that indicates port to create a queue on.
16702 * @mq: The queue structure to use to create the mailbox queue.
16703 * @cq: The completion queue to associate with this cq.
16704 * @subtype: The queue's subtype.
16705 *
16706 * This function creates a mailbox queue, as detailed in @mq, on a port,
16707 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16708 *
16709 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16710 * is used to get the entry count and entry size that are necessary to
16711 * determine the number of pages to allocate and use for this queue. This
16712 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16713 * mailbox queue. This function is asynchronous and will wait for the mailbox
16714 * command to finish before continuing.
16715 *
16716 * On success this function will return a zero. If unable to allocate enough
16717 * memory this function will return -ENOMEM. If the queue create mailbox command
16718 * fails this function will return -ENXIO.
16719 **/
16720int32_t
16721lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16722	       struct lpfc_queue *cq, uint32_t subtype)
16723{
16724	struct lpfc_mbx_mq_create *mq_create;
16725	struct lpfc_mbx_mq_create_ext *mq_create_ext;
16726	struct lpfc_dmabuf *dmabuf;
16727	LPFC_MBOXQ_t *mbox;
16728	int rc, length, status = 0;
16729	uint32_t shdr_status, shdr_add_status;
16730	union lpfc_sli4_cfg_shdr *shdr;
16731	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16732
16733	/* sanity check on queue memory */
16734	if (!mq || !cq)
16735		return -ENODEV;
16736	if (!phba->sli4_hba.pc_sli4_params.supported)
16737		hw_page_size = SLI4_PAGE_SIZE;
16738
16739	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16740	if (!mbox)
16741		return -ENOMEM;
16742	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16743		  sizeof(struct lpfc_sli4_cfg_mhdr));
16744	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16745			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16746			 length, LPFC_SLI4_MBX_EMBED);
16747
16748	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16749	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16750	bf_set(lpfc_mbx_mq_create_ext_num_pages,
16751	       &mq_create_ext->u.request, mq->page_count);
16752	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16753	       &mq_create_ext->u.request, 1);
16754	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16755	       &mq_create_ext->u.request, 1);
16756	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16757	       &mq_create_ext->u.request, 1);
16758	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16759	       &mq_create_ext->u.request, 1);
16760	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16761	       &mq_create_ext->u.request, 1);
16762	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16763	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16764	       phba->sli4_hba.pc_sli4_params.mqv);
16765	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16766		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16767		       cq->queue_id);
16768	else
16769		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16770		       cq->queue_id);
16771	switch (mq->entry_count) {
16772	default:
16773		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16774				"0362 Unsupported MQ count. (%d)\n",
16775				mq->entry_count);
16776		if (mq->entry_count < 16) {
16777			status = -EINVAL;
16778			goto out;
16779		}
16780		fallthrough;	/* otherwise default to smallest count */
16781	case 16:
16782		bf_set(lpfc_mq_context_ring_size,
16783		       &mq_create_ext->u.request.context,
16784		       LPFC_MQ_RING_SIZE_16);
16785		break;
16786	case 32:
16787		bf_set(lpfc_mq_context_ring_size,
16788		       &mq_create_ext->u.request.context,
16789		       LPFC_MQ_RING_SIZE_32);
16790		break;
16791	case 64:
16792		bf_set(lpfc_mq_context_ring_size,
16793		       &mq_create_ext->u.request.context,
16794		       LPFC_MQ_RING_SIZE_64);
16795		break;
16796	case 128:
16797		bf_set(lpfc_mq_context_ring_size,
16798		       &mq_create_ext->u.request.context,
16799		       LPFC_MQ_RING_SIZE_128);
16800		break;
16801	}
16802	list_for_each_entry(dmabuf, &mq->page_list, list) {
16803		memset(dmabuf->virt, 0, hw_page_size);
16804		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16805					putPaddrLow(dmabuf->phys);
16806		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16807					putPaddrHigh(dmabuf->phys);
16808	}
16809	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16810	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16811			      &mq_create_ext->u.response);
16812	if (rc != MBX_SUCCESS) {
16813		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16814				"2795 MQ_CREATE_EXT failed with "
16815				"status x%x. Failback to MQ_CREATE.\n",
16816				rc);
16817		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16818		mq_create = &mbox->u.mqe.un.mq_create;
16819		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16820		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16821		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16822				      &mq_create->u.response);
16823	}
16824
16825	/* The IOCTL status is embedded in the mailbox subheader. */
16826	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16827	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16828	if (shdr_status || shdr_add_status || rc) {
16829		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16830				"2502 MQ_CREATE mailbox failed with "
16831				"status x%x add_status x%x, mbx status x%x\n",
16832				shdr_status, shdr_add_status, rc);
16833		status = -ENXIO;
16834		goto out;
16835	}
16836	if (mq->queue_id == 0xFFFF) {
16837		status = -ENXIO;
16838		goto out;
16839	}
16840	mq->type = LPFC_MQ;
16841	mq->assoc_qid = cq->queue_id;
16842	mq->subtype = subtype;
16843	mq->host_index = 0;
16844	mq->hba_index = 0;
16845
16846	/* link the mq onto the parent cq child list */
16847	list_add_tail(&mq->list, &cq->child_list);
16848out:
16849	mempool_free(mbox, phba->mbox_mem_pool);
16850	return status;
16851}
16852
16853/**
16854 * lpfc_wq_create - Create a Work Queue on the HBA
16855 * @phba: HBA structure that indicates port to create a queue on.
16856 * @wq: The queue structure to use to create the work queue.
16857 * @cq: The completion queue to bind this work queue to.
16858 * @subtype: The subtype of the work queue indicating its functionality.
16859 *
16860 * This function creates a work queue, as detailed in @wq, on a port, described
16861 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16862 *
16863 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16864 * is used to get the entry count and entry size that are necessary to
16865 * determine the number of pages to allocate and use for this queue. The @cq
16866 * is used to indicate which completion queue to bind this work queue to. This
16867 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16868 * work queue. This function is asynchronous and will wait for the mailbox
16869 * command to finish before continuing.
16870 *
16871 * On success this function will return a zero. If unable to allocate enough
16872 * memory this function will return -ENOMEM. If the queue create mailbox command
16873 * fails this function will return -ENXIO.
16874 **/
16875int
16876lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16877	       struct lpfc_queue *cq, uint32_t subtype)
16878{
16879	struct lpfc_mbx_wq_create *wq_create;
16880	struct lpfc_dmabuf *dmabuf;
16881	LPFC_MBOXQ_t *mbox;
16882	int rc, length, status = 0;
16883	uint32_t shdr_status, shdr_add_status;
16884	union lpfc_sli4_cfg_shdr *shdr;
16885	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16886	struct dma_address *page;
16887	void __iomem *bar_memmap_p;
16888	uint32_t db_offset;
16889	uint16_t pci_barset;
16890	uint8_t dpp_barset;
16891	uint32_t dpp_offset;
16892	uint8_t wq_create_version;
16893#ifdef CONFIG_X86
16894	unsigned long pg_addr;
16895#endif
16896
16897	/* sanity check on queue memory */
16898	if (!wq || !cq)
16899		return -ENODEV;
16900	if (!phba->sli4_hba.pc_sli4_params.supported)
16901		hw_page_size = wq->page_size;
16902
16903	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16904	if (!mbox)
16905		return -ENOMEM;
16906	length = (sizeof(struct lpfc_mbx_wq_create) -
16907		  sizeof(struct lpfc_sli4_cfg_mhdr));
16908	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16909			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16910			 length, LPFC_SLI4_MBX_EMBED);
16911	wq_create = &mbox->u.mqe.un.wq_create;
16912	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16913	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16914		    wq->page_count);
16915	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16916		    cq->queue_id);
16917
16918	/* wqv is the earliest version supported, NOT the latest */
16919	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16920	       phba->sli4_hba.pc_sli4_params.wqv);
16921
16922	if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16923	    (wq->page_size > SLI4_PAGE_SIZE))
16924		wq_create_version = LPFC_Q_CREATE_VERSION_1;
16925	else
16926		wq_create_version = LPFC_Q_CREATE_VERSION_0;
16927
16928	switch (wq_create_version) {
16929	case LPFC_Q_CREATE_VERSION_1:
16930		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16931		       wq->entry_count);
16932		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16933		       LPFC_Q_CREATE_VERSION_1);
16934
16935		switch (wq->entry_size) {
16936		default:
16937		case 64:
16938			bf_set(lpfc_mbx_wq_create_wqe_size,
16939			       &wq_create->u.request_1,
16940			       LPFC_WQ_WQE_SIZE_64);
16941			break;
16942		case 128:
16943			bf_set(lpfc_mbx_wq_create_wqe_size,
16944			       &wq_create->u.request_1,
16945			       LPFC_WQ_WQE_SIZE_128);
16946			break;
16947		}
16948		/* Request DPP by default */
16949		bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16950		bf_set(lpfc_mbx_wq_create_page_size,
16951		       &wq_create->u.request_1,
16952		       (wq->page_size / SLI4_PAGE_SIZE));
16953		page = wq_create->u.request_1.page;
16954		break;
16955	default:
16956		page = wq_create->u.request.page;
16957		break;
16958	}
16959
16960	list_for_each_entry(dmabuf, &wq->page_list, list) {
16961		memset(dmabuf->virt, 0, hw_page_size);
16962		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16963		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16964	}
16965
16966	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16967		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16968
16969	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16970	/* The IOCTL status is embedded in the mailbox subheader. */
16971	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16972	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16973	if (shdr_status || shdr_add_status || rc) {
16974		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16975				"2503 WQ_CREATE mailbox failed with "
16976				"status x%x add_status x%x, mbx status x%x\n",
16977				shdr_status, shdr_add_status, rc);
16978		status = -ENXIO;
16979		goto out;
16980	}
16981
16982	if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16983		wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16984					&wq_create->u.response);
16985	else
16986		wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16987					&wq_create->u.response_1);
16988
16989	if (wq->queue_id == 0xFFFF) {
16990		status = -ENXIO;
16991		goto out;
16992	}
16993
16994	wq->db_format = LPFC_DB_LIST_FORMAT;
16995	if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16996		if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16997			wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16998					       &wq_create->u.response);
16999			if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
17000			    (wq->db_format != LPFC_DB_RING_FORMAT)) {
17001				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17002						"3265 WQ[%d] doorbell format "
17003						"not supported: x%x\n",
17004						wq->queue_id, wq->db_format);
17005				status = -EINVAL;
17006				goto out;
17007			}
17008			pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
17009					    &wq_create->u.response);
17010			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17011								   pci_barset);
17012			if (!bar_memmap_p) {
17013				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17014						"3263 WQ[%d] failed to memmap "
17015						"pci barset:x%x\n",
17016						wq->queue_id, pci_barset);
17017				status = -ENOMEM;
17018				goto out;
17019			}
17020			db_offset = wq_create->u.response.doorbell_offset;
17021			if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17022			    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17023				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17024						"3252 WQ[%d] doorbell offset "
17025						"not supported: x%x\n",
17026						wq->queue_id, db_offset);
17027				status = -EINVAL;
17028				goto out;
17029			}
17030			wq->db_regaddr = bar_memmap_p + db_offset;
17031			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17032					"3264 WQ[%d]: barset:x%x, offset:x%x, "
17033					"format:x%x\n", wq->queue_id,
17034					pci_barset, db_offset, wq->db_format);
17035		} else
17036			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17037	} else {
17038		/* Check if DPP was honored by the firmware */
17039		wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17040				    &wq_create->u.response_1);
17041		if (wq->dpp_enable) {
17042			pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17043					    &wq_create->u.response_1);
17044			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17045								   pci_barset);
17046			if (!bar_memmap_p) {
17047				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17048						"3267 WQ[%d] failed to memmap "
17049						"pci barset:x%x\n",
17050						wq->queue_id, pci_barset);
17051				status = -ENOMEM;
17052				goto out;
17053			}
17054			db_offset = wq_create->u.response_1.doorbell_offset;
17055			wq->db_regaddr = bar_memmap_p + db_offset;
17056			wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17057					    &wq_create->u.response_1);
17058			dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17059					    &wq_create->u.response_1);
17060			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17061								   dpp_barset);
17062			if (!bar_memmap_p) {
17063				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17064						"3268 WQ[%d] failed to memmap "
17065						"pci barset:x%x\n",
17066						wq->queue_id, dpp_barset);
17067				status = -ENOMEM;
17068				goto out;
17069			}
17070			dpp_offset = wq_create->u.response_1.dpp_offset;
17071			wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17072			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17073					"3271 WQ[%d]: barset:x%x, offset:x%x, "
17074					"dpp_id:x%x dpp_barset:x%x "
17075					"dpp_offset:x%x\n",
17076					wq->queue_id, pci_barset, db_offset,
17077					wq->dpp_id, dpp_barset, dpp_offset);
17078
17079#ifdef CONFIG_X86
17080			/* Enable combined writes for DPP aperture */
17081			pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
17082			rc = set_memory_wc(pg_addr, 1);
17083			if (rc) {
17084				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17085					"3272 Cannot setup Combined "
17086					"Write on WQ[%d] - disable DPP\n",
17087					wq->queue_id);
17088				phba->cfg_enable_dpp = 0;
17089			}
17090#else
17091			phba->cfg_enable_dpp = 0;
17092#endif
17093		} else
17094			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17095	}
17096	wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17097	if (wq->pring == NULL) {
17098		status = -ENOMEM;
17099		goto out;
17100	}
17101	wq->type = LPFC_WQ;
17102	wq->assoc_qid = cq->queue_id;
17103	wq->subtype = subtype;
17104	wq->host_index = 0;
17105	wq->hba_index = 0;
17106	wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17107
17108	/* link the wq onto the parent cq child list */
17109	list_add_tail(&wq->list, &cq->child_list);
17110out:
17111	mempool_free(mbox, phba->mbox_mem_pool);
17112	return status;
17113}
17114
17115/**
17116 * lpfc_rq_create - Create a Receive Queue on the HBA
17117 * @phba: HBA structure that indicates port to create a queue on.
17118 * @hrq: The queue structure to use to create the header receive queue.
17119 * @drq: The queue structure to use to create the data receive queue.
17120 * @cq: The completion queue to bind this work queue to.
17121 * @subtype: The subtype of the work queue indicating its functionality.
17122 *
17123 * This function creates a receive buffer queue pair , as detailed in @hrq and
17124 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17125 * to the HBA.
17126 *
17127 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17128 * struct is used to get the entry count that is necessary to determine the
17129 * number of pages to use for this queue. The @cq is used to indicate which
17130 * completion queue to bind received buffers that are posted to these queues to.
17131 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17132 * receive queue pair. This function is asynchronous and will wait for the
17133 * mailbox command to finish before continuing.
17134 *
17135 * On success this function will return a zero. If unable to allocate enough
17136 * memory this function will return -ENOMEM. If the queue create mailbox command
17137 * fails this function will return -ENXIO.
17138 **/
17139int
17140lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17141	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17142{
17143	struct lpfc_mbx_rq_create *rq_create;
17144	struct lpfc_dmabuf *dmabuf;
17145	LPFC_MBOXQ_t *mbox;
17146	int rc, length, status = 0;
17147	uint32_t shdr_status, shdr_add_status;
17148	union lpfc_sli4_cfg_shdr *shdr;
17149	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17150	void __iomem *bar_memmap_p;
17151	uint32_t db_offset;
17152	uint16_t pci_barset;
17153
17154	/* sanity check on queue memory */
17155	if (!hrq || !drq || !cq)
17156		return -ENODEV;
17157	if (!phba->sli4_hba.pc_sli4_params.supported)
17158		hw_page_size = SLI4_PAGE_SIZE;
17159
17160	if (hrq->entry_count != drq->entry_count)
17161		return -EINVAL;
17162	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17163	if (!mbox)
17164		return -ENOMEM;
17165	length = (sizeof(struct lpfc_mbx_rq_create) -
17166		  sizeof(struct lpfc_sli4_cfg_mhdr));
17167	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17168			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17169			 length, LPFC_SLI4_MBX_EMBED);
17170	rq_create = &mbox->u.mqe.un.rq_create;
17171	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17172	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17173	       phba->sli4_hba.pc_sli4_params.rqv);
17174	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17175		bf_set(lpfc_rq_context_rqe_count_1,
17176		       &rq_create->u.request.context,
17177		       hrq->entry_count);
17178		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17179		bf_set(lpfc_rq_context_rqe_size,
17180		       &rq_create->u.request.context,
17181		       LPFC_RQE_SIZE_8);
17182		bf_set(lpfc_rq_context_page_size,
17183		       &rq_create->u.request.context,
17184		       LPFC_RQ_PAGE_SIZE_4096);
17185	} else {
17186		switch (hrq->entry_count) {
17187		default:
17188			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17189					"2535 Unsupported RQ count. (%d)\n",
17190					hrq->entry_count);
17191			if (hrq->entry_count < 512) {
17192				status = -EINVAL;
17193				goto out;
17194			}
17195			fallthrough;	/* otherwise default to smallest count */
17196		case 512:
17197			bf_set(lpfc_rq_context_rqe_count,
17198			       &rq_create->u.request.context,
17199			       LPFC_RQ_RING_SIZE_512);
17200			break;
17201		case 1024:
17202			bf_set(lpfc_rq_context_rqe_count,
17203			       &rq_create->u.request.context,
17204			       LPFC_RQ_RING_SIZE_1024);
17205			break;
17206		case 2048:
17207			bf_set(lpfc_rq_context_rqe_count,
17208			       &rq_create->u.request.context,
17209			       LPFC_RQ_RING_SIZE_2048);
17210			break;
17211		case 4096:
17212			bf_set(lpfc_rq_context_rqe_count,
17213			       &rq_create->u.request.context,
17214			       LPFC_RQ_RING_SIZE_4096);
17215			break;
17216		}
17217		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17218		       LPFC_HDR_BUF_SIZE);
17219	}
17220	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17221	       cq->queue_id);
17222	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17223	       hrq->page_count);
17224	list_for_each_entry(dmabuf, &hrq->page_list, list) {
17225		memset(dmabuf->virt, 0, hw_page_size);
17226		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17227					putPaddrLow(dmabuf->phys);
17228		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17229					putPaddrHigh(dmabuf->phys);
17230	}
17231	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17232		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17233
17234	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17235	/* The IOCTL status is embedded in the mailbox subheader. */
17236	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17237	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17238	if (shdr_status || shdr_add_status || rc) {
17239		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17240				"2504 RQ_CREATE mailbox failed with "
17241				"status x%x add_status x%x, mbx status x%x\n",
17242				shdr_status, shdr_add_status, rc);
17243		status = -ENXIO;
17244		goto out;
17245	}
17246	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17247	if (hrq->queue_id == 0xFFFF) {
17248		status = -ENXIO;
17249		goto out;
17250	}
17251
17252	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17253		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17254					&rq_create->u.response);
17255		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17256		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17257			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17258					"3262 RQ [%d] doorbell format not "
17259					"supported: x%x\n", hrq->queue_id,
17260					hrq->db_format);
17261			status = -EINVAL;
17262			goto out;
17263		}
17264
17265		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17266				    &rq_create->u.response);
17267		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17268		if (!bar_memmap_p) {
17269			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17270					"3269 RQ[%d] failed to memmap pci "
17271					"barset:x%x\n", hrq->queue_id,
17272					pci_barset);
17273			status = -ENOMEM;
17274			goto out;
17275		}
17276
17277		db_offset = rq_create->u.response.doorbell_offset;
17278		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17279		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17280			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17281					"3270 RQ[%d] doorbell offset not "
17282					"supported: x%x\n", hrq->queue_id,
17283					db_offset);
17284			status = -EINVAL;
17285			goto out;
17286		}
17287		hrq->db_regaddr = bar_memmap_p + db_offset;
17288		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17289				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17290				"format:x%x\n", hrq->queue_id, pci_barset,
17291				db_offset, hrq->db_format);
17292	} else {
17293		hrq->db_format = LPFC_DB_RING_FORMAT;
17294		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17295	}
17296	hrq->type = LPFC_HRQ;
17297	hrq->assoc_qid = cq->queue_id;
17298	hrq->subtype = subtype;
17299	hrq->host_index = 0;
17300	hrq->hba_index = 0;
17301	hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17302
17303	/* now create the data queue */
17304	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17305			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17306			 length, LPFC_SLI4_MBX_EMBED);
17307	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17308	       phba->sli4_hba.pc_sli4_params.rqv);
17309	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17310		bf_set(lpfc_rq_context_rqe_count_1,
17311		       &rq_create->u.request.context, hrq->entry_count);
17312		if (subtype == LPFC_NVMET)
17313			rq_create->u.request.context.buffer_size =
17314				LPFC_NVMET_DATA_BUF_SIZE;
17315		else
17316			rq_create->u.request.context.buffer_size =
17317				LPFC_DATA_BUF_SIZE;
17318		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17319		       LPFC_RQE_SIZE_8);
17320		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17321		       (PAGE_SIZE/SLI4_PAGE_SIZE));
17322	} else {
17323		switch (drq->entry_count) {
17324		default:
17325			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17326					"2536 Unsupported RQ count. (%d)\n",
17327					drq->entry_count);
17328			if (drq->entry_count < 512) {
17329				status = -EINVAL;
17330				goto out;
17331			}
17332			fallthrough;	/* otherwise default to smallest count */
17333		case 512:
17334			bf_set(lpfc_rq_context_rqe_count,
17335			       &rq_create->u.request.context,
17336			       LPFC_RQ_RING_SIZE_512);
17337			break;
17338		case 1024:
17339			bf_set(lpfc_rq_context_rqe_count,
17340			       &rq_create->u.request.context,
17341			       LPFC_RQ_RING_SIZE_1024);
17342			break;
17343		case 2048:
17344			bf_set(lpfc_rq_context_rqe_count,
17345			       &rq_create->u.request.context,
17346			       LPFC_RQ_RING_SIZE_2048);
17347			break;
17348		case 4096:
17349			bf_set(lpfc_rq_context_rqe_count,
17350			       &rq_create->u.request.context,
17351			       LPFC_RQ_RING_SIZE_4096);
17352			break;
17353		}
17354		if (subtype == LPFC_NVMET)
17355			bf_set(lpfc_rq_context_buf_size,
17356			       &rq_create->u.request.context,
17357			       LPFC_NVMET_DATA_BUF_SIZE);
17358		else
17359			bf_set(lpfc_rq_context_buf_size,
17360			       &rq_create->u.request.context,
17361			       LPFC_DATA_BUF_SIZE);
17362	}
17363	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17364	       cq->queue_id);
17365	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17366	       drq->page_count);
17367	list_for_each_entry(dmabuf, &drq->page_list, list) {
17368		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17369					putPaddrLow(dmabuf->phys);
17370		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17371					putPaddrHigh(dmabuf->phys);
17372	}
17373	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17374		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17375	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17376	/* The IOCTL status is embedded in the mailbox subheader. */
17377	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17378	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17379	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17380	if (shdr_status || shdr_add_status || rc) {
17381		status = -ENXIO;
17382		goto out;
17383	}
17384	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17385	if (drq->queue_id == 0xFFFF) {
17386		status = -ENXIO;
17387		goto out;
17388	}
17389	drq->type = LPFC_DRQ;
17390	drq->assoc_qid = cq->queue_id;
17391	drq->subtype = subtype;
17392	drq->host_index = 0;
17393	drq->hba_index = 0;
17394	drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17395
17396	/* link the header and data RQs onto the parent cq child list */
17397	list_add_tail(&hrq->list, &cq->child_list);
17398	list_add_tail(&drq->list, &cq->child_list);
17399
17400out:
17401	mempool_free(mbox, phba->mbox_mem_pool);
17402	return status;
17403}
17404
17405/**
17406 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17407 * @phba: HBA structure that indicates port to create a queue on.
17408 * @hrqp: The queue structure array to use to create the header receive queues.
17409 * @drqp: The queue structure array to use to create the data receive queues.
17410 * @cqp: The completion queue array to bind these receive queues to.
17411 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17412 *
17413 * This function creates a receive buffer queue pair , as detailed in @hrq and
17414 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17415 * to the HBA.
17416 *
17417 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17418 * struct is used to get the entry count that is necessary to determine the
17419 * number of pages to use for this queue. The @cq is used to indicate which
17420 * completion queue to bind received buffers that are posted to these queues to.
17421 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17422 * receive queue pair. This function is asynchronous and will wait for the
17423 * mailbox command to finish before continuing.
17424 *
17425 * On success this function will return a zero. If unable to allocate enough
17426 * memory this function will return -ENOMEM. If the queue create mailbox command
17427 * fails this function will return -ENXIO.
17428 **/
17429int
17430lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17431		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17432		uint32_t subtype)
17433{
17434	struct lpfc_queue *hrq, *drq, *cq;
17435	struct lpfc_mbx_rq_create_v2 *rq_create;
17436	struct lpfc_dmabuf *dmabuf;
17437	LPFC_MBOXQ_t *mbox;
17438	int rc, length, alloclen, status = 0;
17439	int cnt, idx, numrq, page_idx = 0;
17440	uint32_t shdr_status, shdr_add_status;
17441	union lpfc_sli4_cfg_shdr *shdr;
17442	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17443
17444	numrq = phba->cfg_nvmet_mrq;
17445	/* sanity check on array memory */
17446	if (!hrqp || !drqp || !cqp || !numrq)
17447		return -ENODEV;
17448	if (!phba->sli4_hba.pc_sli4_params.supported)
17449		hw_page_size = SLI4_PAGE_SIZE;
17450
17451	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17452	if (!mbox)
17453		return -ENOMEM;
17454
17455	length = sizeof(struct lpfc_mbx_rq_create_v2);
17456	length += ((2 * numrq * hrqp[0]->page_count) *
17457		   sizeof(struct dma_address));
17458
17459	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17460				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17461				    LPFC_SLI4_MBX_NEMBED);
17462	if (alloclen < length) {
17463		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17464				"3099 Allocated DMA memory size (%d) is "
17465				"less than the requested DMA memory size "
17466				"(%d)\n", alloclen, length);
17467		status = -ENOMEM;
17468		goto out;
17469	}
17470
17471
17472
17473	rq_create = mbox->sge_array->addr[0];
17474	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17475
17476	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17477	cnt = 0;
17478
17479	for (idx = 0; idx < numrq; idx++) {
17480		hrq = hrqp[idx];
17481		drq = drqp[idx];
17482		cq  = cqp[idx];
17483
17484		/* sanity check on queue memory */
17485		if (!hrq || !drq || !cq) {
17486			status = -ENODEV;
17487			goto out;
17488		}
17489
17490		if (hrq->entry_count != drq->entry_count) {
17491			status = -EINVAL;
17492			goto out;
17493		}
17494
17495		if (idx == 0) {
17496			bf_set(lpfc_mbx_rq_create_num_pages,
17497			       &rq_create->u.request,
17498			       hrq->page_count);
17499			bf_set(lpfc_mbx_rq_create_rq_cnt,
17500			       &rq_create->u.request, (numrq * 2));
17501			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17502			       1);
17503			bf_set(lpfc_rq_context_base_cq,
17504			       &rq_create->u.request.context,
17505			       cq->queue_id);
17506			bf_set(lpfc_rq_context_data_size,
17507			       &rq_create->u.request.context,
17508			       LPFC_NVMET_DATA_BUF_SIZE);
17509			bf_set(lpfc_rq_context_hdr_size,
17510			       &rq_create->u.request.context,
17511			       LPFC_HDR_BUF_SIZE);
17512			bf_set(lpfc_rq_context_rqe_count_1,
17513			       &rq_create->u.request.context,
17514			       hrq->entry_count);
17515			bf_set(lpfc_rq_context_rqe_size,
17516			       &rq_create->u.request.context,
17517			       LPFC_RQE_SIZE_8);
17518			bf_set(lpfc_rq_context_page_size,
17519			       &rq_create->u.request.context,
17520			       (PAGE_SIZE/SLI4_PAGE_SIZE));
17521		}
17522		rc = 0;
17523		list_for_each_entry(dmabuf, &hrq->page_list, list) {
17524			memset(dmabuf->virt, 0, hw_page_size);
17525			cnt = page_idx + dmabuf->buffer_tag;
17526			rq_create->u.request.page[cnt].addr_lo =
17527					putPaddrLow(dmabuf->phys);
17528			rq_create->u.request.page[cnt].addr_hi =
17529					putPaddrHigh(dmabuf->phys);
17530			rc++;
17531		}
17532		page_idx += rc;
17533
17534		rc = 0;
17535		list_for_each_entry(dmabuf, &drq->page_list, list) {
17536			memset(dmabuf->virt, 0, hw_page_size);
17537			cnt = page_idx + dmabuf->buffer_tag;
17538			rq_create->u.request.page[cnt].addr_lo =
17539					putPaddrLow(dmabuf->phys);
17540			rq_create->u.request.page[cnt].addr_hi =
17541					putPaddrHigh(dmabuf->phys);
17542			rc++;
17543		}
17544		page_idx += rc;
17545
17546		hrq->db_format = LPFC_DB_RING_FORMAT;
17547		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17548		hrq->type = LPFC_HRQ;
17549		hrq->assoc_qid = cq->queue_id;
17550		hrq->subtype = subtype;
17551		hrq->host_index = 0;
17552		hrq->hba_index = 0;
17553		hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17554
17555		drq->db_format = LPFC_DB_RING_FORMAT;
17556		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17557		drq->type = LPFC_DRQ;
17558		drq->assoc_qid = cq->queue_id;
17559		drq->subtype = subtype;
17560		drq->host_index = 0;
17561		drq->hba_index = 0;
17562		drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17563
17564		list_add_tail(&hrq->list, &cq->child_list);
17565		list_add_tail(&drq->list, &cq->child_list);
17566	}
17567
17568	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17569	/* The IOCTL status is embedded in the mailbox subheader. */
17570	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17571	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17572	if (shdr_status || shdr_add_status || rc) {
17573		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17574				"3120 RQ_CREATE mailbox failed with "
17575				"status x%x add_status x%x, mbx status x%x\n",
17576				shdr_status, shdr_add_status, rc);
17577		status = -ENXIO;
17578		goto out;
17579	}
17580	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17581	if (rc == 0xFFFF) {
17582		status = -ENXIO;
17583		goto out;
17584	}
17585
17586	/* Initialize all RQs with associated queue id */
17587	for (idx = 0; idx < numrq; idx++) {
17588		hrq = hrqp[idx];
17589		hrq->queue_id = rc + (2 * idx);
17590		drq = drqp[idx];
17591		drq->queue_id = rc + (2 * idx) + 1;
17592	}
17593
17594out:
17595	lpfc_sli4_mbox_cmd_free(phba, mbox);
17596	return status;
17597}
17598
17599/**
17600 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17601 * @phba: HBA structure that indicates port to destroy a queue on.
17602 * @eq: The queue structure associated with the queue to destroy.
17603 *
17604 * This function destroys a queue, as detailed in @eq by sending an mailbox
17605 * command, specific to the type of queue, to the HBA.
17606 *
17607 * The @eq struct is used to get the queue ID of the queue to destroy.
17608 *
17609 * On success this function will return a zero. If the queue destroy mailbox
17610 * command fails this function will return -ENXIO.
17611 **/
17612int
17613lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17614{
17615	LPFC_MBOXQ_t *mbox;
17616	int rc, length, status = 0;
17617	uint32_t shdr_status, shdr_add_status;
17618	union lpfc_sli4_cfg_shdr *shdr;
17619
17620	/* sanity check on queue memory */
17621	if (!eq)
17622		return -ENODEV;
17623
17624	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17625	if (!mbox)
17626		return -ENOMEM;
17627	length = (sizeof(struct lpfc_mbx_eq_destroy) -
17628		  sizeof(struct lpfc_sli4_cfg_mhdr));
17629	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17630			 LPFC_MBOX_OPCODE_EQ_DESTROY,
17631			 length, LPFC_SLI4_MBX_EMBED);
17632	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17633	       eq->queue_id);
17634	mbox->vport = eq->phba->pport;
17635	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17636
17637	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17638	/* The IOCTL status is embedded in the mailbox subheader. */
17639	shdr = (union lpfc_sli4_cfg_shdr *)
17640		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17641	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17642	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17643	if (shdr_status || shdr_add_status || rc) {
17644		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17645				"2505 EQ_DESTROY mailbox failed with "
17646				"status x%x add_status x%x, mbx status x%x\n",
17647				shdr_status, shdr_add_status, rc);
17648		status = -ENXIO;
17649	}
17650
17651	/* Remove eq from any list */
17652	list_del_init(&eq->list);
17653	mempool_free(mbox, eq->phba->mbox_mem_pool);
17654	return status;
17655}
17656
17657/**
17658 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17659 * @phba: HBA structure that indicates port to destroy a queue on.
17660 * @cq: The queue structure associated with the queue to destroy.
17661 *
17662 * This function destroys a queue, as detailed in @cq by sending an mailbox
17663 * command, specific to the type of queue, to the HBA.
17664 *
17665 * The @cq struct is used to get the queue ID of the queue to destroy.
17666 *
17667 * On success this function will return a zero. If the queue destroy mailbox
17668 * command fails this function will return -ENXIO.
17669 **/
17670int
17671lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17672{
17673	LPFC_MBOXQ_t *mbox;
17674	int rc, length, status = 0;
17675	uint32_t shdr_status, shdr_add_status;
17676	union lpfc_sli4_cfg_shdr *shdr;
17677
17678	/* sanity check on queue memory */
17679	if (!cq)
17680		return -ENODEV;
17681	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17682	if (!mbox)
17683		return -ENOMEM;
17684	length = (sizeof(struct lpfc_mbx_cq_destroy) -
17685		  sizeof(struct lpfc_sli4_cfg_mhdr));
17686	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17687			 LPFC_MBOX_OPCODE_CQ_DESTROY,
17688			 length, LPFC_SLI4_MBX_EMBED);
17689	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17690	       cq->queue_id);
17691	mbox->vport = cq->phba->pport;
17692	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17693	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17694	/* The IOCTL status is embedded in the mailbox subheader. */
17695	shdr = (union lpfc_sli4_cfg_shdr *)
17696		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
17697	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17698	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17699	if (shdr_status || shdr_add_status || rc) {
17700		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17701				"2506 CQ_DESTROY mailbox failed with "
17702				"status x%x add_status x%x, mbx status x%x\n",
17703				shdr_status, shdr_add_status, rc);
17704		status = -ENXIO;
17705	}
17706	/* Remove cq from any list */
17707	list_del_init(&cq->list);
17708	mempool_free(mbox, cq->phba->mbox_mem_pool);
17709	return status;
17710}
17711
17712/**
17713 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17714 * @phba: HBA structure that indicates port to destroy a queue on.
17715 * @mq: The queue structure associated with the queue to destroy.
17716 *
17717 * This function destroys a queue, as detailed in @mq by sending an mailbox
17718 * command, specific to the type of queue, to the HBA.
17719 *
17720 * The @mq struct is used to get the queue ID of the queue to destroy.
17721 *
17722 * On success this function will return a zero. If the queue destroy mailbox
17723 * command fails this function will return -ENXIO.
17724 **/
17725int
17726lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17727{
17728	LPFC_MBOXQ_t *mbox;
17729	int rc, length, status = 0;
17730	uint32_t shdr_status, shdr_add_status;
17731	union lpfc_sli4_cfg_shdr *shdr;
17732
17733	/* sanity check on queue memory */
17734	if (!mq)
17735		return -ENODEV;
17736	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17737	if (!mbox)
17738		return -ENOMEM;
17739	length = (sizeof(struct lpfc_mbx_mq_destroy) -
17740		  sizeof(struct lpfc_sli4_cfg_mhdr));
17741	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17742			 LPFC_MBOX_OPCODE_MQ_DESTROY,
17743			 length, LPFC_SLI4_MBX_EMBED);
17744	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17745	       mq->queue_id);
17746	mbox->vport = mq->phba->pport;
17747	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17748	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17749	/* The IOCTL status is embedded in the mailbox subheader. */
17750	shdr = (union lpfc_sli4_cfg_shdr *)
17751		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17752	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17753	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17754	if (shdr_status || shdr_add_status || rc) {
17755		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17756				"2507 MQ_DESTROY mailbox failed with "
17757				"status x%x add_status x%x, mbx status x%x\n",
17758				shdr_status, shdr_add_status, rc);
17759		status = -ENXIO;
17760	}
17761	/* Remove mq from any list */
17762	list_del_init(&mq->list);
17763	mempool_free(mbox, mq->phba->mbox_mem_pool);
17764	return status;
17765}
17766
17767/**
17768 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17769 * @phba: HBA structure that indicates port to destroy a queue on.
17770 * @wq: The queue structure associated with the queue to destroy.
17771 *
17772 * This function destroys a queue, as detailed in @wq by sending an mailbox
17773 * command, specific to the type of queue, to the HBA.
17774 *
17775 * The @wq struct is used to get the queue ID of the queue to destroy.
17776 *
17777 * On success this function will return a zero. If the queue destroy mailbox
17778 * command fails this function will return -ENXIO.
17779 **/
17780int
17781lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17782{
17783	LPFC_MBOXQ_t *mbox;
17784	int rc, length, status = 0;
17785	uint32_t shdr_status, shdr_add_status;
17786	union lpfc_sli4_cfg_shdr *shdr;
17787
17788	/* sanity check on queue memory */
17789	if (!wq)
17790		return -ENODEV;
17791	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17792	if (!mbox)
17793		return -ENOMEM;
17794	length = (sizeof(struct lpfc_mbx_wq_destroy) -
17795		  sizeof(struct lpfc_sli4_cfg_mhdr));
17796	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17797			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17798			 length, LPFC_SLI4_MBX_EMBED);
17799	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17800	       wq->queue_id);
17801	mbox->vport = wq->phba->pport;
17802	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17803	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17804	shdr = (union lpfc_sli4_cfg_shdr *)
17805		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17806	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17807	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17808	if (shdr_status || shdr_add_status || rc) {
17809		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17810				"2508 WQ_DESTROY mailbox failed with "
17811				"status x%x add_status x%x, mbx status x%x\n",
17812				shdr_status, shdr_add_status, rc);
17813		status = -ENXIO;
17814	}
17815	/* Remove wq from any list */
17816	list_del_init(&wq->list);
17817	kfree(wq->pring);
17818	wq->pring = NULL;
17819	mempool_free(mbox, wq->phba->mbox_mem_pool);
17820	return status;
17821}
17822
17823/**
17824 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17825 * @phba: HBA structure that indicates port to destroy a queue on.
17826 * @hrq: The queue structure associated with the queue to destroy.
17827 * @drq: The queue structure associated with the queue to destroy.
17828 *
17829 * This function destroys a queue, as detailed in @rq by sending an mailbox
17830 * command, specific to the type of queue, to the HBA.
17831 *
17832 * The @rq struct is used to get the queue ID of the queue to destroy.
17833 *
17834 * On success this function will return a zero. If the queue destroy mailbox
17835 * command fails this function will return -ENXIO.
17836 **/
17837int
17838lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17839		struct lpfc_queue *drq)
17840{
17841	LPFC_MBOXQ_t *mbox;
17842	int rc, length, status = 0;
17843	uint32_t shdr_status, shdr_add_status;
17844	union lpfc_sli4_cfg_shdr *shdr;
17845
17846	/* sanity check on queue memory */
17847	if (!hrq || !drq)
17848		return -ENODEV;
17849	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17850	if (!mbox)
17851		return -ENOMEM;
17852	length = (sizeof(struct lpfc_mbx_rq_destroy) -
17853		  sizeof(struct lpfc_sli4_cfg_mhdr));
17854	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17855			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17856			 length, LPFC_SLI4_MBX_EMBED);
17857	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17858	       hrq->queue_id);
17859	mbox->vport = hrq->phba->pport;
17860	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17861	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17862	/* The IOCTL status is embedded in the mailbox subheader. */
17863	shdr = (union lpfc_sli4_cfg_shdr *)
17864		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17865	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17866	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17867	if (shdr_status || shdr_add_status || rc) {
17868		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17869				"2509 RQ_DESTROY mailbox failed with "
17870				"status x%x add_status x%x, mbx status x%x\n",
17871				shdr_status, shdr_add_status, rc);
17872		mempool_free(mbox, hrq->phba->mbox_mem_pool);
17873		return -ENXIO;
17874	}
17875	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17876	       drq->queue_id);
17877	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17878	shdr = (union lpfc_sli4_cfg_shdr *)
17879		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17880	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17881	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17882	if (shdr_status || shdr_add_status || rc) {
17883		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17884				"2510 RQ_DESTROY mailbox failed with "
17885				"status x%x add_status x%x, mbx status x%x\n",
17886				shdr_status, shdr_add_status, rc);
17887		status = -ENXIO;
17888	}
17889	list_del_init(&hrq->list);
17890	list_del_init(&drq->list);
17891	mempool_free(mbox, hrq->phba->mbox_mem_pool);
17892	return status;
17893}
17894
17895/**
17896 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17897 * @phba: The virtual port for which this call being executed.
17898 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17899 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17900 * @xritag: the xritag that ties this io to the SGL pages.
17901 *
17902 * This routine will post the sgl pages for the IO that has the xritag
17903 * that is in the iocbq structure. The xritag is assigned during iocbq
17904 * creation and persists for as long as the driver is loaded.
17905 * if the caller has fewer than 256 scatter gather segments to map then
17906 * pdma_phys_addr1 should be 0.
17907 * If the caller needs to map more than 256 scatter gather segment then
17908 * pdma_phys_addr1 should be a valid physical address.
17909 * physical address for SGLs must be 64 byte aligned.
17910 * If you are going to map 2 SGL's then the first one must have 256 entries
17911 * the second sgl can have between 1 and 256 entries.
17912 *
17913 * Return codes:
17914 * 	0 - Success
17915 * 	-ENXIO, -ENOMEM - Failure
17916 **/
17917int
17918lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17919		dma_addr_t pdma_phys_addr0,
17920		dma_addr_t pdma_phys_addr1,
17921		uint16_t xritag)
17922{
17923	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17924	LPFC_MBOXQ_t *mbox;
17925	int rc;
17926	uint32_t shdr_status, shdr_add_status;
17927	uint32_t mbox_tmo;
17928	union lpfc_sli4_cfg_shdr *shdr;
17929
17930	if (xritag == NO_XRI) {
17931		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17932				"0364 Invalid param:\n");
17933		return -EINVAL;
17934	}
17935
17936	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17937	if (!mbox)
17938		return -ENOMEM;
17939
17940	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17941			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17942			sizeof(struct lpfc_mbx_post_sgl_pages) -
17943			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17944
17945	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17946				&mbox->u.mqe.un.post_sgl_pages;
17947	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17948	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17949
17950	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
17951				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17952	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17953				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17954
17955	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
17956				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17957	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17958				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17959	if (!phba->sli4_hba.intr_enable)
17960		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17961	else {
17962		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17963		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17964	}
17965	/* The IOCTL status is embedded in the mailbox subheader. */
17966	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17967	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17968	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17969	if (!phba->sli4_hba.intr_enable)
17970		mempool_free(mbox, phba->mbox_mem_pool);
17971	else if (rc != MBX_TIMEOUT)
17972		mempool_free(mbox, phba->mbox_mem_pool);
17973	if (shdr_status || shdr_add_status || rc) {
17974		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17975				"2511 POST_SGL mailbox failed with "
17976				"status x%x add_status x%x, mbx status x%x\n",
17977				shdr_status, shdr_add_status, rc);
17978	}
17979	return 0;
17980}
17981
17982/**
17983 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17984 * @phba: pointer to lpfc hba data structure.
17985 *
17986 * This routine is invoked to post rpi header templates to the
17987 * HBA consistent with the SLI-4 interface spec.  This routine
17988 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17989 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17990 *
17991 * Returns
17992 *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17993 *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
17994 **/
17995static uint16_t
17996lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17997{
17998	unsigned long xri;
17999
18000	/*
18001	 * Fetch the next logical xri.  Because this index is logical,
18002	 * the driver starts at 0 each time.
18003	 */
18004	spin_lock_irq(&phba->hbalock);
18005	xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
18006				 phba->sli4_hba.max_cfg_param.max_xri);
18007	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
18008		spin_unlock_irq(&phba->hbalock);
18009		return NO_XRI;
18010	} else {
18011		set_bit(xri, phba->sli4_hba.xri_bmask);
18012		phba->sli4_hba.max_cfg_param.xri_used++;
18013	}
18014	spin_unlock_irq(&phba->hbalock);
18015	return xri;
18016}
18017
18018/**
18019 * __lpfc_sli4_free_xri - Release an xri for reuse.
18020 * @phba: pointer to lpfc hba data structure.
18021 * @xri: xri to release.
18022 *
18023 * This routine is invoked to release an xri to the pool of
18024 * available rpis maintained by the driver.
18025 **/
18026static void
18027__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18028{
18029	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18030		phba->sli4_hba.max_cfg_param.xri_used--;
18031	}
18032}
18033
18034/**
18035 * lpfc_sli4_free_xri - Release an xri for reuse.
18036 * @phba: pointer to lpfc hba data structure.
18037 * @xri: xri to release.
18038 *
18039 * This routine is invoked to release an xri to the pool of
18040 * available rpis maintained by the driver.
18041 **/
18042void
18043lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18044{
18045	spin_lock_irq(&phba->hbalock);
18046	__lpfc_sli4_free_xri(phba, xri);
18047	spin_unlock_irq(&phba->hbalock);
18048}
18049
18050/**
18051 * lpfc_sli4_next_xritag - Get an xritag for the io
18052 * @phba: Pointer to HBA context object.
18053 *
18054 * This function gets an xritag for the iocb. If there is no unused xritag
18055 * it will return 0xffff.
18056 * The function returns the allocated xritag if successful, else returns zero.
18057 * Zero is not a valid xritag.
18058 * The caller is not required to hold any lock.
18059 **/
18060uint16_t
18061lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18062{
18063	uint16_t xri_index;
18064
18065	xri_index = lpfc_sli4_alloc_xri(phba);
18066	if (xri_index == NO_XRI)
18067		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18068				"2004 Failed to allocate XRI.last XRITAG is %d"
18069				" Max XRI is %d, Used XRI is %d\n",
18070				xri_index,
18071				phba->sli4_hba.max_cfg_param.max_xri,
18072				phba->sli4_hba.max_cfg_param.xri_used);
18073	return xri_index;
18074}
18075
18076/**
18077 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18078 * @phba: pointer to lpfc hba data structure.
18079 * @post_sgl_list: pointer to els sgl entry list.
18080 * @post_cnt: number of els sgl entries on the list.
18081 *
18082 * This routine is invoked to post a block of driver's sgl pages to the
18083 * HBA using non-embedded mailbox command. No Lock is held. This routine
18084 * is only called when the driver is loading and after all IO has been
18085 * stopped.
18086 **/
18087static int
18088lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18089			    struct list_head *post_sgl_list,
18090			    int post_cnt)
18091{
18092	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18093	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18094	struct sgl_page_pairs *sgl_pg_pairs;
18095	void *viraddr;
18096	LPFC_MBOXQ_t *mbox;
18097	uint32_t reqlen, alloclen, pg_pairs;
18098	uint32_t mbox_tmo;
18099	uint16_t xritag_start = 0;
18100	int rc = 0;
18101	uint32_t shdr_status, shdr_add_status;
18102	union lpfc_sli4_cfg_shdr *shdr;
18103
18104	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18105		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18106	if (reqlen > SLI4_PAGE_SIZE) {
18107		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18108				"2559 Block sgl registration required DMA "
18109				"size (%d) great than a page\n", reqlen);
18110		return -ENOMEM;
18111	}
18112
18113	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18114	if (!mbox)
18115		return -ENOMEM;
18116
18117	/* Allocate DMA memory and set up the non-embedded mailbox command */
18118	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18119			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18120			 LPFC_SLI4_MBX_NEMBED);
18121
18122	if (alloclen < reqlen) {
18123		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18124				"0285 Allocated DMA memory size (%d) is "
18125				"less than the requested DMA memory "
18126				"size (%d)\n", alloclen, reqlen);
18127		lpfc_sli4_mbox_cmd_free(phba, mbox);
18128		return -ENOMEM;
18129	}
18130	/* Set up the SGL pages in the non-embedded DMA pages */
18131	viraddr = mbox->sge_array->addr[0];
18132	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18133	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18134
18135	pg_pairs = 0;
18136	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18137		/* Set up the sge entry */
18138		sgl_pg_pairs->sgl_pg0_addr_lo =
18139				cpu_to_le32(putPaddrLow(sglq_entry->phys));
18140		sgl_pg_pairs->sgl_pg0_addr_hi =
18141				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18142		sgl_pg_pairs->sgl_pg1_addr_lo =
18143				cpu_to_le32(putPaddrLow(0));
18144		sgl_pg_pairs->sgl_pg1_addr_hi =
18145				cpu_to_le32(putPaddrHigh(0));
18146
18147		/* Keep the first xritag on the list */
18148		if (pg_pairs == 0)
18149			xritag_start = sglq_entry->sli4_xritag;
18150		sgl_pg_pairs++;
18151		pg_pairs++;
18152	}
18153
18154	/* Complete initialization and perform endian conversion. */
18155	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18156	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18157	sgl->word0 = cpu_to_le32(sgl->word0);
18158
18159	if (!phba->sli4_hba.intr_enable)
18160		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18161	else {
18162		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18163		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18164	}
18165	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18166	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18167	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18168	if (!phba->sli4_hba.intr_enable)
18169		lpfc_sli4_mbox_cmd_free(phba, mbox);
18170	else if (rc != MBX_TIMEOUT)
18171		lpfc_sli4_mbox_cmd_free(phba, mbox);
18172	if (shdr_status || shdr_add_status || rc) {
18173		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18174				"2513 POST_SGL_BLOCK mailbox command failed "
18175				"status x%x add_status x%x mbx status x%x\n",
18176				shdr_status, shdr_add_status, rc);
18177		rc = -ENXIO;
18178	}
18179	return rc;
18180}
18181
18182/**
18183 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18184 * @phba: pointer to lpfc hba data structure.
18185 * @nblist: pointer to nvme buffer list.
18186 * @count: number of scsi buffers on the list.
18187 *
18188 * This routine is invoked to post a block of @count scsi sgl pages from a
18189 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18190 * No Lock is held.
18191 *
18192 **/
18193static int
18194lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18195			    int count)
18196{
18197	struct lpfc_io_buf *lpfc_ncmd;
18198	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18199	struct sgl_page_pairs *sgl_pg_pairs;
18200	void *viraddr;
18201	LPFC_MBOXQ_t *mbox;
18202	uint32_t reqlen, alloclen, pg_pairs;
18203	uint32_t mbox_tmo;
18204	uint16_t xritag_start = 0;
18205	int rc = 0;
18206	uint32_t shdr_status, shdr_add_status;
18207	dma_addr_t pdma_phys_bpl1;
18208	union lpfc_sli4_cfg_shdr *shdr;
18209
18210	/* Calculate the requested length of the dma memory */
18211	reqlen = count * sizeof(struct sgl_page_pairs) +
18212		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18213	if (reqlen > SLI4_PAGE_SIZE) {
18214		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18215				"6118 Block sgl registration required DMA "
18216				"size (%d) great than a page\n", reqlen);
18217		return -ENOMEM;
18218	}
18219	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18220	if (!mbox) {
18221		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18222				"6119 Failed to allocate mbox cmd memory\n");
18223		return -ENOMEM;
18224	}
18225
18226	/* Allocate DMA memory and set up the non-embedded mailbox command */
18227	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18228				    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18229				    reqlen, LPFC_SLI4_MBX_NEMBED);
18230
18231	if (alloclen < reqlen) {
18232		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18233				"6120 Allocated DMA memory size (%d) is "
18234				"less than the requested DMA memory "
18235				"size (%d)\n", alloclen, reqlen);
18236		lpfc_sli4_mbox_cmd_free(phba, mbox);
18237		return -ENOMEM;
18238	}
18239
18240	/* Get the first SGE entry from the non-embedded DMA memory */
18241	viraddr = mbox->sge_array->addr[0];
18242
18243	/* Set up the SGL pages in the non-embedded DMA pages */
18244	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18245	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18246
18247	pg_pairs = 0;
18248	list_for_each_entry(lpfc_ncmd, nblist, list) {
18249		/* Set up the sge entry */
18250		sgl_pg_pairs->sgl_pg0_addr_lo =
18251			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18252		sgl_pg_pairs->sgl_pg0_addr_hi =
18253			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18254		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18255			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18256						SGL_PAGE_SIZE;
18257		else
18258			pdma_phys_bpl1 = 0;
18259		sgl_pg_pairs->sgl_pg1_addr_lo =
18260			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18261		sgl_pg_pairs->sgl_pg1_addr_hi =
18262			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18263		/* Keep the first xritag on the list */
18264		if (pg_pairs == 0)
18265			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18266		sgl_pg_pairs++;
18267		pg_pairs++;
18268	}
18269	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18270	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18271	/* Perform endian conversion if necessary */
18272	sgl->word0 = cpu_to_le32(sgl->word0);
18273
18274	if (!phba->sli4_hba.intr_enable) {
18275		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18276	} else {
18277		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18278		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18279	}
18280	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18281	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18282	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18283	if (!phba->sli4_hba.intr_enable)
18284		lpfc_sli4_mbox_cmd_free(phba, mbox);
18285	else if (rc != MBX_TIMEOUT)
18286		lpfc_sli4_mbox_cmd_free(phba, mbox);
18287	if (shdr_status || shdr_add_status || rc) {
18288		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18289				"6125 POST_SGL_BLOCK mailbox command failed "
18290				"status x%x add_status x%x mbx status x%x\n",
18291				shdr_status, shdr_add_status, rc);
18292		rc = -ENXIO;
18293	}
18294	return rc;
18295}
18296
18297/**
18298 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18299 * @phba: pointer to lpfc hba data structure.
18300 * @post_nblist: pointer to the nvme buffer list.
18301 * @sb_count: number of nvme buffers.
18302 *
18303 * This routine walks a list of nvme buffers that was passed in. It attempts
18304 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18305 * uses the non-embedded SGL block post mailbox commands to post to the port.
18306 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18307 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18308 * must be local list, thus no lock is needed when manipulate the list.
18309 *
18310 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18311 **/
18312int
18313lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18314			   struct list_head *post_nblist, int sb_count)
18315{
18316	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18317	int status, sgl_size;
18318	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18319	dma_addr_t pdma_phys_sgl1;
18320	int last_xritag = NO_XRI;
18321	int cur_xritag;
18322	LIST_HEAD(prep_nblist);
18323	LIST_HEAD(blck_nblist);
18324	LIST_HEAD(nvme_nblist);
18325
18326	/* sanity check */
18327	if (sb_count <= 0)
18328		return -EINVAL;
18329
18330	sgl_size = phba->cfg_sg_dma_buf_size;
18331	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18332		list_del_init(&lpfc_ncmd->list);
18333		block_cnt++;
18334		if ((last_xritag != NO_XRI) &&
18335		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18336			/* a hole in xri block, form a sgl posting block */
18337			list_splice_init(&prep_nblist, &blck_nblist);
18338			post_cnt = block_cnt - 1;
18339			/* prepare list for next posting block */
18340			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18341			block_cnt = 1;
18342		} else {
18343			/* prepare list for next posting block */
18344			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18345			/* enough sgls for non-embed sgl mbox command */
18346			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18347				list_splice_init(&prep_nblist, &blck_nblist);
18348				post_cnt = block_cnt;
18349				block_cnt = 0;
18350			}
18351		}
18352		num_posting++;
18353		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18354
18355		/* end of repost sgl list condition for NVME buffers */
18356		if (num_posting == sb_count) {
18357			if (post_cnt == 0) {
18358				/* last sgl posting block */
18359				list_splice_init(&prep_nblist, &blck_nblist);
18360				post_cnt = block_cnt;
18361			} else if (block_cnt == 1) {
18362				/* last single sgl with non-contiguous xri */
18363				if (sgl_size > SGL_PAGE_SIZE)
18364					pdma_phys_sgl1 =
18365						lpfc_ncmd->dma_phys_sgl +
18366						SGL_PAGE_SIZE;
18367				else
18368					pdma_phys_sgl1 = 0;
18369				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18370				status = lpfc_sli4_post_sgl(
18371						phba, lpfc_ncmd->dma_phys_sgl,
18372						pdma_phys_sgl1, cur_xritag);
18373				if (status) {
18374					/* Post error.  Buffer unavailable. */
18375					lpfc_ncmd->flags |=
18376						LPFC_SBUF_NOT_POSTED;
18377				} else {
18378					/* Post success. Bffer available. */
18379					lpfc_ncmd->flags &=
18380						~LPFC_SBUF_NOT_POSTED;
18381					lpfc_ncmd->status = IOSTAT_SUCCESS;
18382					num_posted++;
18383				}
18384				/* success, put on NVME buffer sgl list */
18385				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18386			}
18387		}
18388
18389		/* continue until a nembed page worth of sgls */
18390		if (post_cnt == 0)
18391			continue;
18392
18393		/* post block of NVME buffer list sgls */
18394		status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18395						     post_cnt);
18396
18397		/* don't reset xirtag due to hole in xri block */
18398		if (block_cnt == 0)
18399			last_xritag = NO_XRI;
18400
18401		/* reset NVME buffer post count for next round of posting */
18402		post_cnt = 0;
18403
18404		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18405		while (!list_empty(&blck_nblist)) {
18406			list_remove_head(&blck_nblist, lpfc_ncmd,
18407					 struct lpfc_io_buf, list);
18408			if (status) {
18409				/* Post error.  Mark buffer unavailable. */
18410				lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18411			} else {
18412				/* Post success, Mark buffer available. */
18413				lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18414				lpfc_ncmd->status = IOSTAT_SUCCESS;
18415				num_posted++;
18416			}
18417			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18418		}
18419	}
18420	/* Push NVME buffers with sgl posted to the available list */
18421	lpfc_io_buf_replenish(phba, &nvme_nblist);
18422
18423	return num_posted;
18424}
18425
18426/**
18427 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18428 * @phba: pointer to lpfc_hba struct that the frame was received on
18429 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18430 *
18431 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18432 * valid type of frame that the LPFC driver will handle. This function will
18433 * return a zero if the frame is a valid frame or a non zero value when the
18434 * frame does not pass the check.
18435 **/
18436static int
18437lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18438{
18439	/*  make rctl_names static to save stack space */
18440	struct fc_vft_header *fc_vft_hdr;
18441	uint32_t *header = (uint32_t *) fc_hdr;
18442
18443#define FC_RCTL_MDS_DIAGS	0xF4
18444
18445	switch (fc_hdr->fh_r_ctl) {
18446	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
18447	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
18448	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
18449	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
18450	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
18451	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
18452	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
18453	case FC_RCTL_DD_CMD_STATUS:	/* command status */
18454	case FC_RCTL_ELS_REQ:	/* extended link services request */
18455	case FC_RCTL_ELS_REP:	/* extended link services reply */
18456	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
18457	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
18458	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
18459	case FC_RCTL_BA_RMC: 	/* remove connection */
18460	case FC_RCTL_BA_ACC:	/* basic accept */
18461	case FC_RCTL_BA_RJT:	/* basic reject */
18462	case FC_RCTL_BA_PRMT:
18463	case FC_RCTL_ACK_1:	/* acknowledge_1 */
18464	case FC_RCTL_ACK_0:	/* acknowledge_0 */
18465	case FC_RCTL_P_RJT:	/* port reject */
18466	case FC_RCTL_F_RJT:	/* fabric reject */
18467	case FC_RCTL_P_BSY:	/* port busy */
18468	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
18469	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
18470	case FC_RCTL_LCR:	/* link credit reset */
18471	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18472	case FC_RCTL_END:	/* end */
18473		break;
18474	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
18475		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18476		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18477		return lpfc_fc_frame_check(phba, fc_hdr);
18478	case FC_RCTL_BA_NOP:	/* basic link service NOP */
18479	default:
18480		goto drop;
18481	}
18482
18483	switch (fc_hdr->fh_type) {
18484	case FC_TYPE_BLS:
18485	case FC_TYPE_ELS:
18486	case FC_TYPE_FCP:
18487	case FC_TYPE_CT:
18488	case FC_TYPE_NVME:
18489		break;
18490	case FC_TYPE_IP:
18491	case FC_TYPE_ILS:
18492	default:
18493		goto drop;
18494	}
18495
18496	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18497			"2538 Received frame rctl:x%x, type:x%x, "
18498			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18499			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18500			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18501			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18502			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18503			be32_to_cpu(header[6]));
18504	return 0;
18505drop:
18506	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18507			"2539 Dropped frame rctl:x%x type:x%x\n",
18508			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18509	return 1;
18510}
18511
18512/**
18513 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18514 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18515 *
18516 * This function processes the FC header to retrieve the VFI from the VF
18517 * header, if one exists. This function will return the VFI if one exists
18518 * or 0 if no VSAN Header exists.
18519 **/
18520static uint32_t
18521lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18522{
18523	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18524
18525	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18526		return 0;
18527	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18528}
18529
18530/**
18531 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18532 * @phba: Pointer to the HBA structure to search for the vport on
18533 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18534 * @fcfi: The FC Fabric ID that the frame came from
18535 * @did: Destination ID to match against
18536 *
18537 * This function searches the @phba for a vport that matches the content of the
18538 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18539 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18540 * returns the matching vport pointer or NULL if unable to match frame to a
18541 * vport.
18542 **/
18543static struct lpfc_vport *
18544lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18545		       uint16_t fcfi, uint32_t did)
18546{
18547	struct lpfc_vport **vports;
18548	struct lpfc_vport *vport = NULL;
18549	int i;
18550
18551	if (did == Fabric_DID)
18552		return phba->pport;
18553	if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
18554	    phba->link_state != LPFC_HBA_READY)
18555		return phba->pport;
18556
18557	vports = lpfc_create_vport_work_array(phba);
18558	if (vports != NULL) {
18559		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18560			if (phba->fcf.fcfi == fcfi &&
18561			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18562			    vports[i]->fc_myDID == did) {
18563				vport = vports[i];
18564				break;
18565			}
18566		}
18567	}
18568	lpfc_destroy_vport_work_array(phba, vports);
18569	return vport;
18570}
18571
18572/**
18573 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18574 * @vport: The vport to work on.
18575 *
18576 * This function updates the receive sequence time stamp for this vport. The
18577 * receive sequence time stamp indicates the time that the last frame of the
18578 * the sequence that has been idle for the longest amount of time was received.
18579 * the driver uses this time stamp to indicate if any received sequences have
18580 * timed out.
18581 **/
18582static void
18583lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18584{
18585	struct lpfc_dmabuf *h_buf;
18586	struct hbq_dmabuf *dmabuf = NULL;
18587
18588	/* get the oldest sequence on the rcv list */
18589	h_buf = list_get_first(&vport->rcv_buffer_list,
18590			       struct lpfc_dmabuf, list);
18591	if (!h_buf)
18592		return;
18593	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18594	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18595}
18596
18597/**
18598 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18599 * @vport: The vport that the received sequences were sent to.
18600 *
18601 * This function cleans up all outstanding received sequences. This is called
18602 * by the driver when a link event or user action invalidates all the received
18603 * sequences.
18604 **/
18605void
18606lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18607{
18608	struct lpfc_dmabuf *h_buf, *hnext;
18609	struct lpfc_dmabuf *d_buf, *dnext;
18610	struct hbq_dmabuf *dmabuf = NULL;
18611
18612	/* start with the oldest sequence on the rcv list */
18613	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18614		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18615		list_del_init(&dmabuf->hbuf.list);
18616		list_for_each_entry_safe(d_buf, dnext,
18617					 &dmabuf->dbuf.list, list) {
18618			list_del_init(&d_buf->list);
18619			lpfc_in_buf_free(vport->phba, d_buf);
18620		}
18621		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18622	}
18623}
18624
18625/**
18626 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18627 * @vport: The vport that the received sequences were sent to.
18628 *
18629 * This function determines whether any received sequences have timed out by
18630 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18631 * indicates that there is at least one timed out sequence this routine will
18632 * go through the received sequences one at a time from most inactive to most
18633 * active to determine which ones need to be cleaned up. Once it has determined
18634 * that a sequence needs to be cleaned up it will simply free up the resources
18635 * without sending an abort.
18636 **/
18637void
18638lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18639{
18640	struct lpfc_dmabuf *h_buf, *hnext;
18641	struct lpfc_dmabuf *d_buf, *dnext;
18642	struct hbq_dmabuf *dmabuf = NULL;
18643	unsigned long timeout;
18644	int abort_count = 0;
18645
18646	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18647		   vport->rcv_buffer_time_stamp);
18648	if (list_empty(&vport->rcv_buffer_list) ||
18649	    time_before(jiffies, timeout))
18650		return;
18651	/* start with the oldest sequence on the rcv list */
18652	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18653		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18654		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18655			   dmabuf->time_stamp);
18656		if (time_before(jiffies, timeout))
18657			break;
18658		abort_count++;
18659		list_del_init(&dmabuf->hbuf.list);
18660		list_for_each_entry_safe(d_buf, dnext,
18661					 &dmabuf->dbuf.list, list) {
18662			list_del_init(&d_buf->list);
18663			lpfc_in_buf_free(vport->phba, d_buf);
18664		}
18665		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18666	}
18667	if (abort_count)
18668		lpfc_update_rcv_time_stamp(vport);
18669}
18670
18671/**
18672 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18673 * @vport: pointer to a vitural port
18674 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18675 *
18676 * This function searches through the existing incomplete sequences that have
18677 * been sent to this @vport. If the frame matches one of the incomplete
18678 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18679 * make up that sequence. If no sequence is found that matches this frame then
18680 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18681 * This function returns a pointer to the first dmabuf in the sequence list that
18682 * the frame was linked to.
18683 **/
18684static struct hbq_dmabuf *
18685lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18686{
18687	struct fc_frame_header *new_hdr;
18688	struct fc_frame_header *temp_hdr;
18689	struct lpfc_dmabuf *d_buf;
18690	struct lpfc_dmabuf *h_buf;
18691	struct hbq_dmabuf *seq_dmabuf = NULL;
18692	struct hbq_dmabuf *temp_dmabuf = NULL;
18693	uint8_t	found = 0;
18694
18695	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18696	dmabuf->time_stamp = jiffies;
18697	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18698
18699	/* Use the hdr_buf to find the sequence that this frame belongs to */
18700	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18701		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18702		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18703		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18704		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18705			continue;
18706		/* found a pending sequence that matches this frame */
18707		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18708		break;
18709	}
18710	if (!seq_dmabuf) {
18711		/*
18712		 * This indicates first frame received for this sequence.
18713		 * Queue the buffer on the vport's rcv_buffer_list.
18714		 */
18715		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18716		lpfc_update_rcv_time_stamp(vport);
18717		return dmabuf;
18718	}
18719	temp_hdr = seq_dmabuf->hbuf.virt;
18720	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18721		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18722		list_del_init(&seq_dmabuf->hbuf.list);
18723		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18724		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18725		lpfc_update_rcv_time_stamp(vport);
18726		return dmabuf;
18727	}
18728	/* move this sequence to the tail to indicate a young sequence */
18729	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18730	seq_dmabuf->time_stamp = jiffies;
18731	lpfc_update_rcv_time_stamp(vport);
18732	if (list_empty(&seq_dmabuf->dbuf.list)) {
18733		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18734		return seq_dmabuf;
18735	}
18736	/* find the correct place in the sequence to insert this frame */
18737	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18738	while (!found) {
18739		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18740		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18741		/*
18742		 * If the frame's sequence count is greater than the frame on
18743		 * the list then insert the frame right after this frame
18744		 */
18745		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18746			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18747			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18748			found = 1;
18749			break;
18750		}
18751
18752		if (&d_buf->list == &seq_dmabuf->dbuf.list)
18753			break;
18754		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18755	}
18756
18757	if (found)
18758		return seq_dmabuf;
18759	return NULL;
18760}
18761
18762/**
18763 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18764 * @vport: pointer to a vitural port
18765 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18766 *
18767 * This function tries to abort from the partially assembed sequence, described
18768 * by the information from basic abbort @dmabuf. It checks to see whether such
18769 * partially assembled sequence held by the driver. If so, it shall free up all
18770 * the frames from the partially assembled sequence.
18771 *
18772 * Return
18773 * true  -- if there is matching partially assembled sequence present and all
18774 *          the frames freed with the sequence;
18775 * false -- if there is no matching partially assembled sequence present so
18776 *          nothing got aborted in the lower layer driver
18777 **/
18778static bool
18779lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18780			    struct hbq_dmabuf *dmabuf)
18781{
18782	struct fc_frame_header *new_hdr;
18783	struct fc_frame_header *temp_hdr;
18784	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18785	struct hbq_dmabuf *seq_dmabuf = NULL;
18786
18787	/* Use the hdr_buf to find the sequence that matches this frame */
18788	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18789	INIT_LIST_HEAD(&dmabuf->hbuf.list);
18790	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18791	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18792		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18793		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18794		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18795		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18796			continue;
18797		/* found a pending sequence that matches this frame */
18798		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18799		break;
18800	}
18801
18802	/* Free up all the frames from the partially assembled sequence */
18803	if (seq_dmabuf) {
18804		list_for_each_entry_safe(d_buf, n_buf,
18805					 &seq_dmabuf->dbuf.list, list) {
18806			list_del_init(&d_buf->list);
18807			lpfc_in_buf_free(vport->phba, d_buf);
18808		}
18809		return true;
18810	}
18811	return false;
18812}
18813
18814/**
18815 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18816 * @vport: pointer to a vitural port
18817 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18818 *
18819 * This function tries to abort from the assembed sequence from upper level
18820 * protocol, described by the information from basic abbort @dmabuf. It
18821 * checks to see whether such pending context exists at upper level protocol.
18822 * If so, it shall clean up the pending context.
18823 *
18824 * Return
18825 * true  -- if there is matching pending context of the sequence cleaned
18826 *          at ulp;
18827 * false -- if there is no matching pending context of the sequence present
18828 *          at ulp.
18829 **/
18830static bool
18831lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18832{
18833	struct lpfc_hba *phba = vport->phba;
18834	int handled;
18835
18836	/* Accepting abort at ulp with SLI4 only */
18837	if (phba->sli_rev < LPFC_SLI_REV4)
18838		return false;
18839
18840	/* Register all caring upper level protocols to attend abort */
18841	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18842	if (handled)
18843		return true;
18844
18845	return false;
18846}
18847
18848/**
18849 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18850 * @phba: Pointer to HBA context object.
18851 * @cmd_iocbq: pointer to the command iocbq structure.
18852 * @rsp_iocbq: pointer to the response iocbq structure.
18853 *
18854 * This function handles the sequence abort response iocb command complete
18855 * event. It properly releases the memory allocated to the sequence abort
18856 * accept iocb.
18857 **/
18858static void
18859lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18860			     struct lpfc_iocbq *cmd_iocbq,
18861			     struct lpfc_iocbq *rsp_iocbq)
18862{
18863	if (cmd_iocbq) {
18864		lpfc_nlp_put(cmd_iocbq->ndlp);
18865		lpfc_sli_release_iocbq(phba, cmd_iocbq);
18866	}
18867
18868	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
18869	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18870		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18871			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18872			get_job_ulpstatus(phba, rsp_iocbq),
18873			get_job_word4(phba, rsp_iocbq));
18874}
18875
18876/**
18877 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18878 * @phba: Pointer to HBA context object.
18879 * @xri: xri id in transaction.
18880 *
18881 * This function validates the xri maps to the known range of XRIs allocated an
18882 * used by the driver.
18883 **/
18884uint16_t
18885lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18886		      uint16_t xri)
18887{
18888	uint16_t i;
18889
18890	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18891		if (xri == phba->sli4_hba.xri_ids[i])
18892			return i;
18893	}
18894	return NO_XRI;
18895}
18896
18897/**
18898 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18899 * @vport: pointer to a virtual port.
18900 * @fc_hdr: pointer to a FC frame header.
18901 * @aborted: was the partially assembled receive sequence successfully aborted
18902 *
18903 * This function sends a basic response to a previous unsol sequence abort
18904 * event after aborting the sequence handling.
18905 **/
18906void
18907lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18908			struct fc_frame_header *fc_hdr, bool aborted)
18909{
18910	struct lpfc_hba *phba = vport->phba;
18911	struct lpfc_iocbq *ctiocb = NULL;
18912	struct lpfc_nodelist *ndlp;
18913	uint16_t oxid, rxid, xri, lxri;
18914	uint32_t sid, fctl;
18915	union lpfc_wqe128 *icmd;
18916	int rc;
18917
18918	if (!lpfc_is_link_up(phba))
18919		return;
18920
18921	sid = sli4_sid_from_fc_hdr(fc_hdr);
18922	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18923	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18924
18925	ndlp = lpfc_findnode_did(vport, sid);
18926	if (!ndlp) {
18927		ndlp = lpfc_nlp_init(vport, sid);
18928		if (!ndlp) {
18929			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18930					 "1268 Failed to allocate ndlp for "
18931					 "oxid:x%x SID:x%x\n", oxid, sid);
18932			return;
18933		}
18934		/* Put ndlp onto vport node list */
18935		lpfc_enqueue_node(vport, ndlp);
18936	}
18937
18938	/* Allocate buffer for rsp iocb */
18939	ctiocb = lpfc_sli_get_iocbq(phba);
18940	if (!ctiocb)
18941		return;
18942
18943	icmd = &ctiocb->wqe;
18944
18945	/* Extract the F_CTL field from FC_HDR */
18946	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18947
18948	ctiocb->ndlp = lpfc_nlp_get(ndlp);
18949	if (!ctiocb->ndlp) {
18950		lpfc_sli_release_iocbq(phba, ctiocb);
18951		return;
18952	}
18953
18954	ctiocb->vport = vport;
18955	ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18956	ctiocb->sli4_lxritag = NO_XRI;
18957	ctiocb->sli4_xritag = NO_XRI;
18958	ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18959
18960	if (fctl & FC_FC_EX_CTX)
18961		/* Exchange responder sent the abort so we
18962		 * own the oxid.
18963		 */
18964		xri = oxid;
18965	else
18966		xri = rxid;
18967	lxri = lpfc_sli4_xri_inrange(phba, xri);
18968	if (lxri != NO_XRI)
18969		lpfc_set_rrq_active(phba, ndlp, lxri,
18970			(xri == oxid) ? rxid : oxid, 0);
18971	/* For BA_ABTS from exchange responder, if the logical xri with
18972	 * the oxid maps to the FCP XRI range, the port no longer has
18973	 * that exchange context, send a BLS_RJT. Override the IOCB for
18974	 * a BA_RJT.
18975	 */
18976	if ((fctl & FC_FC_EX_CTX) &&
18977	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18978		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18979		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18980		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18981		       FC_BA_RJT_INV_XID);
18982		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18983		       FC_BA_RJT_UNABLE);
18984	}
18985
18986	/* If BA_ABTS failed to abort a partially assembled receive sequence,
18987	 * the driver no longer has that exchange, send a BLS_RJT. Override
18988	 * the IOCB for a BA_RJT.
18989	 */
18990	if (aborted == false) {
18991		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18992		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18993		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18994		       FC_BA_RJT_INV_XID);
18995		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18996		       FC_BA_RJT_UNABLE);
18997	}
18998
18999	if (fctl & FC_FC_EX_CTX) {
19000		/* ABTS sent by responder to CT exchange, construction
19001		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
19002		 * field and RX_ID from ABTS for RX_ID field.
19003		 */
19004		ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
19005		bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
19006	} else {
19007		/* ABTS sent by initiator to CT exchange, construction
19008		 * of BA_ACC will need to allocate a new XRI as for the
19009		 * XRI_TAG field.
19010		 */
19011		ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
19012	}
19013
19014	/* OX_ID is invariable to who sent ABTS to CT exchange */
19015	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
19016	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
19017
19018	/* Use CT=VPI */
19019	bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
19020	       ndlp->nlp_DID);
19021	bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
19022	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19023	bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
19024
19025	/* Xmit CT abts response on exchange <xid> */
19026	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19027			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19028			 ctiocb->abort_rctl, oxid, phba->link_state);
19029
19030	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19031	if (rc == IOCB_ERROR) {
19032		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19033				 "2925 Failed to issue CT ABTS RSP x%x on "
19034				 "xri x%x, Data x%x\n",
19035				 ctiocb->abort_rctl, oxid,
19036				 phba->link_state);
19037		lpfc_nlp_put(ndlp);
19038		ctiocb->ndlp = NULL;
19039		lpfc_sli_release_iocbq(phba, ctiocb);
19040	}
19041
19042	/* if only usage of this nodelist is BLS response, release initial ref
19043	 * to free ndlp when transmit completes
19044	 */
19045	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
19046	    !(ndlp->nlp_flag & NLP_DROPPED) &&
19047	    !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
19048		ndlp->nlp_flag |= NLP_DROPPED;
19049		lpfc_nlp_put(ndlp);
19050	}
19051}
19052
19053/**
19054 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19055 * @vport: Pointer to the vport on which this sequence was received
19056 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19057 *
19058 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19059 * receive sequence is only partially assembed by the driver, it shall abort
19060 * the partially assembled frames for the sequence. Otherwise, if the
19061 * unsolicited receive sequence has been completely assembled and passed to
19062 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19063 * unsolicited sequence has been aborted. After that, it will issue a basic
19064 * accept to accept the abort.
19065 **/
19066static void
19067lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19068			     struct hbq_dmabuf *dmabuf)
19069{
19070	struct lpfc_hba *phba = vport->phba;
19071	struct fc_frame_header fc_hdr;
19072	uint32_t fctl;
19073	bool aborted;
19074
19075	/* Make a copy of fc_hdr before the dmabuf being released */
19076	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19077	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19078
19079	if (fctl & FC_FC_EX_CTX) {
19080		/* ABTS by responder to exchange, no cleanup needed */
19081		aborted = true;
19082	} else {
19083		/* ABTS by initiator to exchange, need to do cleanup */
19084		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19085		if (aborted == false)
19086			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19087	}
19088	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19089
19090	if (phba->nvmet_support) {
19091		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19092		return;
19093	}
19094
19095	/* Respond with BA_ACC or BA_RJT accordingly */
19096	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19097}
19098
19099/**
19100 * lpfc_seq_complete - Indicates if a sequence is complete
19101 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19102 *
19103 * This function checks the sequence, starting with the frame described by
19104 * @dmabuf, to see if all the frames associated with this sequence are present.
19105 * the frames associated with this sequence are linked to the @dmabuf using the
19106 * dbuf list. This function looks for two major things. 1) That the first frame
19107 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19108 * set. 3) That there are no holes in the sequence count. The function will
19109 * return 1 when the sequence is complete, otherwise it will return 0.
19110 **/
19111static int
19112lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19113{
19114	struct fc_frame_header *hdr;
19115	struct lpfc_dmabuf *d_buf;
19116	struct hbq_dmabuf *seq_dmabuf;
19117	uint32_t fctl;
19118	int seq_count = 0;
19119
19120	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19121	/* make sure first fame of sequence has a sequence count of zero */
19122	if (hdr->fh_seq_cnt != seq_count)
19123		return 0;
19124	fctl = (hdr->fh_f_ctl[0] << 16 |
19125		hdr->fh_f_ctl[1] << 8 |
19126		hdr->fh_f_ctl[2]);
19127	/* If last frame of sequence we can return success. */
19128	if (fctl & FC_FC_END_SEQ)
19129		return 1;
19130	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19131		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19132		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19133		/* If there is a hole in the sequence count then fail. */
19134		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19135			return 0;
19136		fctl = (hdr->fh_f_ctl[0] << 16 |
19137			hdr->fh_f_ctl[1] << 8 |
19138			hdr->fh_f_ctl[2]);
19139		/* If last frame of sequence we can return success. */
19140		if (fctl & FC_FC_END_SEQ)
19141			return 1;
19142	}
19143	return 0;
19144}
19145
19146/**
19147 * lpfc_prep_seq - Prep sequence for ULP processing
19148 * @vport: Pointer to the vport on which this sequence was received
19149 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19150 *
19151 * This function takes a sequence, described by a list of frames, and creates
19152 * a list of iocbq structures to describe the sequence. This iocbq list will be
19153 * used to issue to the generic unsolicited sequence handler. This routine
19154 * returns a pointer to the first iocbq in the list. If the function is unable
19155 * to allocate an iocbq then it throw out the received frames that were not
19156 * able to be described and return a pointer to the first iocbq. If unable to
19157 * allocate any iocbqs (including the first) this function will return NULL.
19158 **/
19159static struct lpfc_iocbq *
19160lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19161{
19162	struct hbq_dmabuf *hbq_buf;
19163	struct lpfc_dmabuf *d_buf, *n_buf;
19164	struct lpfc_iocbq *first_iocbq, *iocbq;
19165	struct fc_frame_header *fc_hdr;
19166	uint32_t sid;
19167	uint32_t len, tot_len;
19168
19169	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19170	/* remove from receive buffer list */
19171	list_del_init(&seq_dmabuf->hbuf.list);
19172	lpfc_update_rcv_time_stamp(vport);
19173	/* get the Remote Port's SID */
19174	sid = sli4_sid_from_fc_hdr(fc_hdr);
19175	tot_len = 0;
19176	/* Get an iocbq struct to fill in. */
19177	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19178	if (first_iocbq) {
19179		/* Initialize the first IOCB. */
19180		first_iocbq->wcqe_cmpl.total_data_placed = 0;
19181		bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19182		       IOSTAT_SUCCESS);
19183		first_iocbq->vport = vport;
19184
19185		/* Check FC Header to see what TYPE of frame we are rcv'ing */
19186		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19187			bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19188			       sli4_did_from_fc_hdr(fc_hdr));
19189		}
19190
19191		bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19192		       NO_XRI);
19193		bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19194		       be16_to_cpu(fc_hdr->fh_ox_id));
19195
19196		/* put the first buffer into the first iocb */
19197		tot_len = bf_get(lpfc_rcqe_length,
19198				 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19199
19200		first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19201		first_iocbq->bpl_dmabuf = NULL;
19202		/* Keep track of the BDE count */
19203		first_iocbq->wcqe_cmpl.word3 = 1;
19204
19205		if (tot_len > LPFC_DATA_BUF_SIZE)
19206			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19207				LPFC_DATA_BUF_SIZE;
19208		else
19209			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19210
19211		first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19212		bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19213		       sid);
19214	}
19215	iocbq = first_iocbq;
19216	/*
19217	 * Each IOCBq can have two Buffers assigned, so go through the list
19218	 * of buffers for this sequence and save two buffers in each IOCBq
19219	 */
19220	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19221		if (!iocbq) {
19222			lpfc_in_buf_free(vport->phba, d_buf);
19223			continue;
19224		}
19225		if (!iocbq->bpl_dmabuf) {
19226			iocbq->bpl_dmabuf = d_buf;
19227			iocbq->wcqe_cmpl.word3++;
19228			/* We need to get the size out of the right CQE */
19229			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19230			len = bf_get(lpfc_rcqe_length,
19231				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19232			iocbq->unsol_rcv_len = len;
19233			iocbq->wcqe_cmpl.total_data_placed += len;
19234			tot_len += len;
19235		} else {
19236			iocbq = lpfc_sli_get_iocbq(vport->phba);
19237			if (!iocbq) {
19238				if (first_iocbq) {
19239					bf_set(lpfc_wcqe_c_status,
19240					       &first_iocbq->wcqe_cmpl,
19241					       IOSTAT_SUCCESS);
19242					first_iocbq->wcqe_cmpl.parameter =
19243						IOERR_NO_RESOURCES;
19244				}
19245				lpfc_in_buf_free(vport->phba, d_buf);
19246				continue;
19247			}
19248			/* We need to get the size out of the right CQE */
19249			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19250			len = bf_get(lpfc_rcqe_length,
19251				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19252			iocbq->cmd_dmabuf = d_buf;
19253			iocbq->bpl_dmabuf = NULL;
19254			iocbq->wcqe_cmpl.word3 = 1;
19255
19256			if (len > LPFC_DATA_BUF_SIZE)
19257				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19258					LPFC_DATA_BUF_SIZE;
19259			else
19260				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19261					len;
19262
19263			tot_len += len;
19264			iocbq->wcqe_cmpl.total_data_placed = tot_len;
19265			bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19266			       sid);
19267			list_add_tail(&iocbq->list, &first_iocbq->list);
19268		}
19269	}
19270	/* Free the sequence's header buffer */
19271	if (!first_iocbq)
19272		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19273
19274	return first_iocbq;
19275}
19276
19277static void
19278lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19279			  struct hbq_dmabuf *seq_dmabuf)
19280{
19281	struct fc_frame_header *fc_hdr;
19282	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19283	struct lpfc_hba *phba = vport->phba;
19284
19285	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19286	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19287	if (!iocbq) {
19288		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19289				"2707 Ring %d handler: Failed to allocate "
19290				"iocb Rctl x%x Type x%x received\n",
19291				LPFC_ELS_RING,
19292				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19293		return;
19294	}
19295	if (!lpfc_complete_unsol_iocb(phba,
19296				      phba->sli4_hba.els_wq->pring,
19297				      iocbq, fc_hdr->fh_r_ctl,
19298				      fc_hdr->fh_type)) {
19299		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19300				"2540 Ring %d handler: unexpected Rctl "
19301				"x%x Type x%x received\n",
19302				LPFC_ELS_RING,
19303				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19304		lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19305	}
19306
19307	/* Free iocb created in lpfc_prep_seq */
19308	list_for_each_entry_safe(curr_iocb, next_iocb,
19309				 &iocbq->list, list) {
19310		list_del_init(&curr_iocb->list);
19311		lpfc_sli_release_iocbq(phba, curr_iocb);
19312	}
19313	lpfc_sli_release_iocbq(phba, iocbq);
19314}
19315
19316static void
19317lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19318			    struct lpfc_iocbq *rspiocb)
19319{
19320	struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19321
19322	if (pcmd && pcmd->virt)
19323		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19324	kfree(pcmd);
19325	lpfc_sli_release_iocbq(phba, cmdiocb);
19326	lpfc_drain_txq(phba);
19327}
19328
19329static void
19330lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19331			      struct hbq_dmabuf *dmabuf)
19332{
19333	struct fc_frame_header *fc_hdr;
19334	struct lpfc_hba *phba = vport->phba;
19335	struct lpfc_iocbq *iocbq = NULL;
19336	union  lpfc_wqe128 *pwqe;
19337	struct lpfc_dmabuf *pcmd = NULL;
19338	uint32_t frame_len;
19339	int rc;
19340	unsigned long iflags;
19341
19342	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19343	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19344
19345	/* Send the received frame back */
19346	iocbq = lpfc_sli_get_iocbq(phba);
19347	if (!iocbq) {
19348		/* Queue cq event and wakeup worker thread to process it */
19349		spin_lock_irqsave(&phba->hbalock, iflags);
19350		list_add_tail(&dmabuf->cq_event.list,
19351			      &phba->sli4_hba.sp_queue_event);
19352		phba->hba_flag |= HBA_SP_QUEUE_EVT;
19353		spin_unlock_irqrestore(&phba->hbalock, iflags);
19354		lpfc_worker_wake_up(phba);
19355		return;
19356	}
19357
19358	/* Allocate buffer for command payload */
19359	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19360	if (pcmd)
19361		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19362					    &pcmd->phys);
19363	if (!pcmd || !pcmd->virt)
19364		goto exit;
19365
19366	INIT_LIST_HEAD(&pcmd->list);
19367
19368	/* copyin the payload */
19369	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19370
19371	iocbq->cmd_dmabuf = pcmd;
19372	iocbq->vport = vport;
19373	iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19374	iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19375	iocbq->num_bdes = 0;
19376
19377	pwqe = &iocbq->wqe;
19378	/* fill in BDE's for command */
19379	pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19380	pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19381	pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19382	pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19383
19384	pwqe->send_frame.frame_len = frame_len;
19385	pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19386	pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19387	pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19388	pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19389	pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19390	pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19391
19392	pwqe->generic.wqe_com.word7 = 0;
19393	pwqe->generic.wqe_com.word10 = 0;
19394
19395	bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19396	bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19397	bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19398	bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19399	bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19400	bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19401	bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19402	bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19403	bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19404	bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19405	bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19406	bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19407	pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19408
19409	iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19410
19411	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19412	if (rc == IOCB_ERROR)
19413		goto exit;
19414
19415	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19416	return;
19417
19418exit:
19419	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19420			"2023 Unable to process MDS loopback frame\n");
19421	if (pcmd && pcmd->virt)
19422		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19423	kfree(pcmd);
19424	if (iocbq)
19425		lpfc_sli_release_iocbq(phba, iocbq);
19426	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19427}
19428
19429/**
19430 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19431 * @phba: Pointer to HBA context object.
19432 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19433 *
19434 * This function is called with no lock held. This function processes all
19435 * the received buffers and gives it to upper layers when a received buffer
19436 * indicates that it is the final frame in the sequence. The interrupt
19437 * service routine processes received buffers at interrupt contexts.
19438 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19439 * appropriate receive function when the final frame in a sequence is received.
19440 **/
19441void
19442lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19443				 struct hbq_dmabuf *dmabuf)
19444{
19445	struct hbq_dmabuf *seq_dmabuf;
19446	struct fc_frame_header *fc_hdr;
19447	struct lpfc_vport *vport;
19448	uint32_t fcfi;
19449	uint32_t did;
19450
19451	/* Process each received buffer */
19452	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19453
19454	if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19455	    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19456		vport = phba->pport;
19457		/* Handle MDS Loopback frames */
19458		if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
19459			lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19460		else
19461			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19462		return;
19463	}
19464
19465	/* check to see if this a valid type of frame */
19466	if (lpfc_fc_frame_check(phba, fc_hdr)) {
19467		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19468		return;
19469	}
19470
19471	if ((bf_get(lpfc_cqe_code,
19472		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19473		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19474			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19475	else
19476		fcfi = bf_get(lpfc_rcqe_fcf_id,
19477			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19478
19479	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19480		vport = phba->pport;
19481		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19482				"2023 MDS Loopback %d bytes\n",
19483				bf_get(lpfc_rcqe_length,
19484				       &dmabuf->cq_event.cqe.rcqe_cmpl));
19485		/* Handle MDS Loopback frames */
19486		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19487		return;
19488	}
19489
19490	/* d_id this frame is directed to */
19491	did = sli4_did_from_fc_hdr(fc_hdr);
19492
19493	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19494	if (!vport) {
19495		/* throw out the frame */
19496		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19497		return;
19498	}
19499
19500	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19501	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19502		(did != Fabric_DID)) {
19503		/*
19504		 * Throw out the frame if we are not pt2pt.
19505		 * The pt2pt protocol allows for discovery frames
19506		 * to be received without a registered VPI.
19507		 */
19508		if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
19509		    phba->link_state == LPFC_HBA_READY) {
19510			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19511			return;
19512		}
19513	}
19514
19515	/* Handle the basic abort sequence (BA_ABTS) event */
19516	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19517		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19518		return;
19519	}
19520
19521	/* Link this frame */
19522	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19523	if (!seq_dmabuf) {
19524		/* unable to add frame to vport - throw it out */
19525		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19526		return;
19527	}
19528	/* If not last frame in sequence continue processing frames. */
19529	if (!lpfc_seq_complete(seq_dmabuf))
19530		return;
19531
19532	/* Send the complete sequence to the upper layer protocol */
19533	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19534}
19535
19536/**
19537 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19538 * @phba: pointer to lpfc hba data structure.
19539 *
19540 * This routine is invoked to post rpi header templates to the
19541 * HBA consistent with the SLI-4 interface spec.  This routine
19542 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19543 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19544 *
19545 * This routine does not require any locks.  It's usage is expected
19546 * to be driver load or reset recovery when the driver is
19547 * sequential.
19548 *
19549 * Return codes
19550 * 	0 - successful
19551 *      -EIO - The mailbox failed to complete successfully.
19552 * 	When this error occurs, the driver is not guaranteed
19553 *	to have any rpi regions posted to the device and
19554 *	must either attempt to repost the regions or take a
19555 *	fatal error.
19556 **/
19557int
19558lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19559{
19560	struct lpfc_rpi_hdr *rpi_page;
19561	uint32_t rc = 0;
19562	uint16_t lrpi = 0;
19563
19564	/* SLI4 ports that support extents do not require RPI headers. */
19565	if (!phba->sli4_hba.rpi_hdrs_in_use)
19566		goto exit;
19567	if (phba->sli4_hba.extents_in_use)
19568		return -EIO;
19569
19570	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19571		/*
19572		 * Assign the rpi headers a physical rpi only if the driver
19573		 * has not initialized those resources.  A port reset only
19574		 * needs the headers posted.
19575		 */
19576		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19577		    LPFC_RPI_RSRC_RDY)
19578			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19579
19580		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19581		if (rc != MBX_SUCCESS) {
19582			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19583					"2008 Error %d posting all rpi "
19584					"headers\n", rc);
19585			rc = -EIO;
19586			break;
19587		}
19588	}
19589
19590 exit:
19591	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19592	       LPFC_RPI_RSRC_RDY);
19593	return rc;
19594}
19595
19596/**
19597 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19598 * @phba: pointer to lpfc hba data structure.
19599 * @rpi_page:  pointer to the rpi memory region.
19600 *
19601 * This routine is invoked to post a single rpi header to the
19602 * HBA consistent with the SLI-4 interface spec.  This memory region
19603 * maps up to 64 rpi context regions.
19604 *
19605 * Return codes
19606 * 	0 - successful
19607 * 	-ENOMEM - No available memory
19608 *      -EIO - The mailbox failed to complete successfully.
19609 **/
19610int
19611lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19612{
19613	LPFC_MBOXQ_t *mboxq;
19614	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19615	uint32_t rc = 0;
19616	uint32_t shdr_status, shdr_add_status;
19617	union lpfc_sli4_cfg_shdr *shdr;
19618
19619	/* SLI4 ports that support extents do not require RPI headers. */
19620	if (!phba->sli4_hba.rpi_hdrs_in_use)
19621		return rc;
19622	if (phba->sli4_hba.extents_in_use)
19623		return -EIO;
19624
19625	/* The port is notified of the header region via a mailbox command. */
19626	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19627	if (!mboxq) {
19628		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19629				"2001 Unable to allocate memory for issuing "
19630				"SLI_CONFIG_SPECIAL mailbox command\n");
19631		return -ENOMEM;
19632	}
19633
19634	/* Post all rpi memory regions to the port. */
19635	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19636	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19637			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19638			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19639			 sizeof(struct lpfc_sli4_cfg_mhdr),
19640			 LPFC_SLI4_MBX_EMBED);
19641
19642
19643	/* Post the physical rpi to the port for this rpi header. */
19644	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19645	       rpi_page->start_rpi);
19646	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19647	       hdr_tmpl, rpi_page->page_count);
19648
19649	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19650	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19651	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19652	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19653	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19654	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19655	mempool_free(mboxq, phba->mbox_mem_pool);
19656	if (shdr_status || shdr_add_status || rc) {
19657		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19658				"2514 POST_RPI_HDR mailbox failed with "
19659				"status x%x add_status x%x, mbx status x%x\n",
19660				shdr_status, shdr_add_status, rc);
19661		rc = -ENXIO;
19662	} else {
19663		/*
19664		 * The next_rpi stores the next logical module-64 rpi value used
19665		 * to post physical rpis in subsequent rpi postings.
19666		 */
19667		spin_lock_irq(&phba->hbalock);
19668		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19669		spin_unlock_irq(&phba->hbalock);
19670	}
19671	return rc;
19672}
19673
19674/**
19675 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19676 * @phba: pointer to lpfc hba data structure.
19677 *
19678 * This routine is invoked to post rpi header templates to the
19679 * HBA consistent with the SLI-4 interface spec.  This routine
19680 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19681 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19682 *
19683 * Returns
19684 * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19685 * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
19686 **/
19687int
19688lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19689{
19690	unsigned long rpi;
19691	uint16_t max_rpi, rpi_limit;
19692	uint16_t rpi_remaining, lrpi = 0;
19693	struct lpfc_rpi_hdr *rpi_hdr;
19694	unsigned long iflag;
19695
19696	/*
19697	 * Fetch the next logical rpi.  Because this index is logical,
19698	 * the  driver starts at 0 each time.
19699	 */
19700	spin_lock_irqsave(&phba->hbalock, iflag);
19701	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19702	rpi_limit = phba->sli4_hba.next_rpi;
19703
19704	rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19705	if (rpi >= rpi_limit)
19706		rpi = LPFC_RPI_ALLOC_ERROR;
19707	else {
19708		set_bit(rpi, phba->sli4_hba.rpi_bmask);
19709		phba->sli4_hba.max_cfg_param.rpi_used++;
19710		phba->sli4_hba.rpi_count++;
19711	}
19712	lpfc_printf_log(phba, KERN_INFO,
19713			LOG_NODE | LOG_DISCOVERY,
19714			"0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19715			(int) rpi, max_rpi, rpi_limit);
19716
19717	/*
19718	 * Don't try to allocate more rpi header regions if the device limit
19719	 * has been exhausted.
19720	 */
19721	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19722	    (phba->sli4_hba.rpi_count >= max_rpi)) {
19723		spin_unlock_irqrestore(&phba->hbalock, iflag);
19724		return rpi;
19725	}
19726
19727	/*
19728	 * RPI header postings are not required for SLI4 ports capable of
19729	 * extents.
19730	 */
19731	if (!phba->sli4_hba.rpi_hdrs_in_use) {
19732		spin_unlock_irqrestore(&phba->hbalock, iflag);
19733		return rpi;
19734	}
19735
19736	/*
19737	 * If the driver is running low on rpi resources, allocate another
19738	 * page now.  Note that the next_rpi value is used because
19739	 * it represents how many are actually in use whereas max_rpi notes
19740	 * how many are supported max by the device.
19741	 */
19742	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19743	spin_unlock_irqrestore(&phba->hbalock, iflag);
19744	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19745		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19746		if (!rpi_hdr) {
19747			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19748					"2002 Error Could not grow rpi "
19749					"count\n");
19750		} else {
19751			lrpi = rpi_hdr->start_rpi;
19752			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19753			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19754		}
19755	}
19756
19757	return rpi;
19758}
19759
19760/**
19761 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19762 * @phba: pointer to lpfc hba data structure.
19763 * @rpi: rpi to free
19764 *
19765 * This routine is invoked to release an rpi to the pool of
19766 * available rpis maintained by the driver.
19767 **/
19768static void
19769__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19770{
19771	/*
19772	 * if the rpi value indicates a prior unreg has already
19773	 * been done, skip the unreg.
19774	 */
19775	if (rpi == LPFC_RPI_ALLOC_ERROR)
19776		return;
19777
19778	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19779		phba->sli4_hba.rpi_count--;
19780		phba->sli4_hba.max_cfg_param.rpi_used--;
19781	} else {
19782		lpfc_printf_log(phba, KERN_INFO,
19783				LOG_NODE | LOG_DISCOVERY,
19784				"2016 rpi %x not inuse\n",
19785				rpi);
19786	}
19787}
19788
19789/**
19790 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19791 * @phba: pointer to lpfc hba data structure.
19792 * @rpi: rpi to free
19793 *
19794 * This routine is invoked to release an rpi to the pool of
19795 * available rpis maintained by the driver.
19796 **/
19797void
19798lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19799{
19800	spin_lock_irq(&phba->hbalock);
19801	__lpfc_sli4_free_rpi(phba, rpi);
19802	spin_unlock_irq(&phba->hbalock);
19803}
19804
19805/**
19806 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19807 * @phba: pointer to lpfc hba data structure.
19808 *
19809 * This routine is invoked to remove the memory region that
19810 * provided rpi via a bitmask.
19811 **/
19812void
19813lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19814{
19815	kfree(phba->sli4_hba.rpi_bmask);
19816	kfree(phba->sli4_hba.rpi_ids);
19817	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19818}
19819
19820/**
19821 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19822 * @ndlp: pointer to lpfc nodelist data structure.
19823 * @cmpl: completion call-back.
19824 * @iocbq: data to load as mbox ctx_u information
19825 *
19826 * This routine is invoked to remove the memory region that
19827 * provided rpi via a bitmask.
19828 **/
19829int
19830lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19831		     void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
19832		     struct lpfc_iocbq *iocbq)
19833{
19834	LPFC_MBOXQ_t *mboxq;
19835	struct lpfc_hba *phba = ndlp->phba;
19836	int rc;
19837
19838	/* The port is notified of the header region via a mailbox command. */
19839	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19840	if (!mboxq)
19841		return -ENOMEM;
19842
19843	/* If cmpl assigned, then this nlp_get pairs with
19844	 * lpfc_mbx_cmpl_resume_rpi.
19845	 *
19846	 * Else cmpl is NULL, then this nlp_get pairs with
19847	 * lpfc_sli_def_mbox_cmpl.
19848	 */
19849	if (!lpfc_nlp_get(ndlp)) {
19850		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19851				"2122 %s: Failed to get nlp ref\n",
19852				__func__);
19853		mempool_free(mboxq, phba->mbox_mem_pool);
19854		return -EIO;
19855	}
19856
19857	/* Post all rpi memory regions to the port. */
19858	lpfc_resume_rpi(mboxq, ndlp);
19859	if (cmpl) {
19860		mboxq->mbox_cmpl = cmpl;
19861		mboxq->ctx_u.save_iocb = iocbq;
19862	} else
19863		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19864	mboxq->ctx_ndlp = ndlp;
19865	mboxq->vport = ndlp->vport;
19866	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19867	if (rc == MBX_NOT_FINISHED) {
19868		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19869				"2010 Resume RPI Mailbox failed "
19870				"status %d, mbxStatus x%x\n", rc,
19871				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19872		lpfc_nlp_put(ndlp);
19873		mempool_free(mboxq, phba->mbox_mem_pool);
19874		return -EIO;
19875	}
19876	return 0;
19877}
19878
19879/**
19880 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19881 * @vport: Pointer to the vport for which the vpi is being initialized
19882 *
19883 * This routine is invoked to activate a vpi with the port.
19884 *
19885 * Returns:
19886 *    0 success
19887 *    -Evalue otherwise
19888 **/
19889int
19890lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19891{
19892	LPFC_MBOXQ_t *mboxq;
19893	int rc = 0;
19894	int retval = MBX_SUCCESS;
19895	uint32_t mbox_tmo;
19896	struct lpfc_hba *phba = vport->phba;
19897	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19898	if (!mboxq)
19899		return -ENOMEM;
19900	lpfc_init_vpi(phba, mboxq, vport->vpi);
19901	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19902	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19903	if (rc != MBX_SUCCESS) {
19904		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19905				"2022 INIT VPI Mailbox failed "
19906				"status %d, mbxStatus x%x\n", rc,
19907				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19908		retval = -EIO;
19909	}
19910	if (rc != MBX_TIMEOUT)
19911		mempool_free(mboxq, vport->phba->mbox_mem_pool);
19912
19913	return retval;
19914}
19915
19916/**
19917 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19918 * @phba: pointer to lpfc hba data structure.
19919 * @mboxq: Pointer to mailbox object.
19920 *
19921 * This routine is invoked to manually add a single FCF record. The caller
19922 * must pass a completely initialized FCF_Record.  This routine takes
19923 * care of the nonembedded mailbox operations.
19924 **/
19925static void
19926lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19927{
19928	void *virt_addr;
19929	union lpfc_sli4_cfg_shdr *shdr;
19930	uint32_t shdr_status, shdr_add_status;
19931
19932	virt_addr = mboxq->sge_array->addr[0];
19933	/* The IOCTL status is embedded in the mailbox subheader. */
19934	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19935	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19936	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19937
19938	if ((shdr_status || shdr_add_status) &&
19939		(shdr_status != STATUS_FCF_IN_USE))
19940		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19941			"2558 ADD_FCF_RECORD mailbox failed with "
19942			"status x%x add_status x%x\n",
19943			shdr_status, shdr_add_status);
19944
19945	lpfc_sli4_mbox_cmd_free(phba, mboxq);
19946}
19947
19948/**
19949 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19950 * @phba: pointer to lpfc hba data structure.
19951 * @fcf_record:  pointer to the initialized fcf record to add.
19952 *
19953 * This routine is invoked to manually add a single FCF record. The caller
19954 * must pass a completely initialized FCF_Record.  This routine takes
19955 * care of the nonembedded mailbox operations.
19956 **/
19957int
19958lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19959{
19960	int rc = 0;
19961	LPFC_MBOXQ_t *mboxq;
19962	uint8_t *bytep;
19963	void *virt_addr;
19964	struct lpfc_mbx_sge sge;
19965	uint32_t alloc_len, req_len;
19966	uint32_t fcfindex;
19967
19968	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19969	if (!mboxq) {
19970		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19971			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
19972		return -ENOMEM;
19973	}
19974
19975	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19976		  sizeof(uint32_t);
19977
19978	/* Allocate DMA memory and set up the non-embedded mailbox command */
19979	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19980				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19981				     req_len, LPFC_SLI4_MBX_NEMBED);
19982	if (alloc_len < req_len) {
19983		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19984			"2523 Allocated DMA memory size (x%x) is "
19985			"less than the requested DMA memory "
19986			"size (x%x)\n", alloc_len, req_len);
19987		lpfc_sli4_mbox_cmd_free(phba, mboxq);
19988		return -ENOMEM;
19989	}
19990
19991	/*
19992	 * Get the first SGE entry from the non-embedded DMA memory.  This
19993	 * routine only uses a single SGE.
19994	 */
19995	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19996	virt_addr = mboxq->sge_array->addr[0];
19997	/*
19998	 * Configure the FCF record for FCFI 0.  This is the driver's
19999	 * hardcoded default and gets used in nonFIP mode.
20000	 */
20001	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
20002	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
20003	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
20004
20005	/*
20006	 * Copy the fcf_index and the FCF Record Data. The data starts after
20007	 * the FCoE header plus word10. The data copy needs to be endian
20008	 * correct.
20009	 */
20010	bytep += sizeof(uint32_t);
20011	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
20012	mboxq->vport = phba->pport;
20013	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
20014	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20015	if (rc == MBX_NOT_FINISHED) {
20016		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20017			"2515 ADD_FCF_RECORD mailbox failed with "
20018			"status 0x%x\n", rc);
20019		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20020		rc = -EIO;
20021	} else
20022		rc = 0;
20023
20024	return rc;
20025}
20026
20027/**
20028 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20029 * @phba: pointer to lpfc hba data structure.
20030 * @fcf_record:  pointer to the fcf record to write the default data.
20031 * @fcf_index: FCF table entry index.
20032 *
20033 * This routine is invoked to build the driver's default FCF record.  The
20034 * values used are hardcoded.  This routine handles memory initialization.
20035 *
20036 **/
20037void
20038lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20039				struct fcf_record *fcf_record,
20040				uint16_t fcf_index)
20041{
20042	memset(fcf_record, 0, sizeof(struct fcf_record));
20043	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20044	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20045	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20046	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20047	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20048	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20049	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20050	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20051	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20052	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20053	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20054	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20055	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20056	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20057	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20058	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20059		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20060	/* Set the VLAN bit map */
20061	if (phba->valid_vlan) {
20062		fcf_record->vlan_bitmap[phba->vlan_id / 8]
20063			= 1 << (phba->vlan_id % 8);
20064	}
20065}
20066
20067/**
20068 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20069 * @phba: pointer to lpfc hba data structure.
20070 * @fcf_index: FCF table entry offset.
20071 *
20072 * This routine is invoked to scan the entire FCF table by reading FCF
20073 * record and processing it one at a time starting from the @fcf_index
20074 * for initial FCF discovery or fast FCF failover rediscovery.
20075 *
20076 * Return 0 if the mailbox command is submitted successfully, none 0
20077 * otherwise.
20078 **/
20079int
20080lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20081{
20082	int rc = 0, error;
20083	LPFC_MBOXQ_t *mboxq;
20084
20085	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20086	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20087	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20088	if (!mboxq) {
20089		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20090				"2000 Failed to allocate mbox for "
20091				"READ_FCF cmd\n");
20092		error = -ENOMEM;
20093		goto fail_fcf_scan;
20094	}
20095	/* Construct the read FCF record mailbox command */
20096	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20097	if (rc) {
20098		error = -EINVAL;
20099		goto fail_fcf_scan;
20100	}
20101	/* Issue the mailbox command asynchronously */
20102	mboxq->vport = phba->pport;
20103	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20104
20105	spin_lock_irq(&phba->hbalock);
20106	phba->hba_flag |= FCF_TS_INPROG;
20107	spin_unlock_irq(&phba->hbalock);
20108
20109	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20110	if (rc == MBX_NOT_FINISHED)
20111		error = -EIO;
20112	else {
20113		/* Reset eligible FCF count for new scan */
20114		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20115			phba->fcf.eligible_fcf_cnt = 0;
20116		error = 0;
20117	}
20118fail_fcf_scan:
20119	if (error) {
20120		if (mboxq)
20121			lpfc_sli4_mbox_cmd_free(phba, mboxq);
20122		/* FCF scan failed, clear FCF_TS_INPROG flag */
20123		spin_lock_irq(&phba->hbalock);
20124		phba->hba_flag &= ~FCF_TS_INPROG;
20125		spin_unlock_irq(&phba->hbalock);
20126	}
20127	return error;
20128}
20129
20130/**
20131 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20132 * @phba: pointer to lpfc hba data structure.
20133 * @fcf_index: FCF table entry offset.
20134 *
20135 * This routine is invoked to read an FCF record indicated by @fcf_index
20136 * and to use it for FLOGI roundrobin FCF failover.
20137 *
20138 * Return 0 if the mailbox command is submitted successfully, none 0
20139 * otherwise.
20140 **/
20141int
20142lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20143{
20144	int rc = 0, error;
20145	LPFC_MBOXQ_t *mboxq;
20146
20147	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20148	if (!mboxq) {
20149		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20150				"2763 Failed to allocate mbox for "
20151				"READ_FCF cmd\n");
20152		error = -ENOMEM;
20153		goto fail_fcf_read;
20154	}
20155	/* Construct the read FCF record mailbox command */
20156	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20157	if (rc) {
20158		error = -EINVAL;
20159		goto fail_fcf_read;
20160	}
20161	/* Issue the mailbox command asynchronously */
20162	mboxq->vport = phba->pport;
20163	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20164	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20165	if (rc == MBX_NOT_FINISHED)
20166		error = -EIO;
20167	else
20168		error = 0;
20169
20170fail_fcf_read:
20171	if (error && mboxq)
20172		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20173	return error;
20174}
20175
20176/**
20177 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20178 * @phba: pointer to lpfc hba data structure.
20179 * @fcf_index: FCF table entry offset.
20180 *
20181 * This routine is invoked to read an FCF record indicated by @fcf_index to
20182 * determine whether it's eligible for FLOGI roundrobin failover list.
20183 *
20184 * Return 0 if the mailbox command is submitted successfully, none 0
20185 * otherwise.
20186 **/
20187int
20188lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20189{
20190	int rc = 0, error;
20191	LPFC_MBOXQ_t *mboxq;
20192
20193	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20194	if (!mboxq) {
20195		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20196				"2758 Failed to allocate mbox for "
20197				"READ_FCF cmd\n");
20198				error = -ENOMEM;
20199				goto fail_fcf_read;
20200	}
20201	/* Construct the read FCF record mailbox command */
20202	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20203	if (rc) {
20204		error = -EINVAL;
20205		goto fail_fcf_read;
20206	}
20207	/* Issue the mailbox command asynchronously */
20208	mboxq->vport = phba->pport;
20209	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20210	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20211	if (rc == MBX_NOT_FINISHED)
20212		error = -EIO;
20213	else
20214		error = 0;
20215
20216fail_fcf_read:
20217	if (error && mboxq)
20218		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20219	return error;
20220}
20221
20222/**
20223 * lpfc_check_next_fcf_pri_level
20224 * @phba: pointer to the lpfc_hba struct for this port.
20225 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20226 * routine when the rr_bmask is empty. The FCF indecies are put into the
20227 * rr_bmask based on their priority level. Starting from the highest priority
20228 * to the lowest. The most likely FCF candidate will be in the highest
20229 * priority group. When this routine is called it searches the fcf_pri list for
20230 * next lowest priority group and repopulates the rr_bmask with only those
20231 * fcf_indexes.
20232 * returns:
20233 * 1=success 0=failure
20234 **/
20235static int
20236lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20237{
20238	uint16_t next_fcf_pri;
20239	uint16_t last_index;
20240	struct lpfc_fcf_pri *fcf_pri;
20241	int rc;
20242	int ret = 0;
20243
20244	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20245			LPFC_SLI4_FCF_TBL_INDX_MAX);
20246	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20247			"3060 Last IDX %d\n", last_index);
20248
20249	/* Verify the priority list has 2 or more entries */
20250	spin_lock_irq(&phba->hbalock);
20251	if (list_empty(&phba->fcf.fcf_pri_list) ||
20252	    list_is_singular(&phba->fcf.fcf_pri_list)) {
20253		spin_unlock_irq(&phba->hbalock);
20254		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20255			"3061 Last IDX %d\n", last_index);
20256		return 0; /* Empty rr list */
20257	}
20258	spin_unlock_irq(&phba->hbalock);
20259
20260	next_fcf_pri = 0;
20261	/*
20262	 * Clear the rr_bmask and set all of the bits that are at this
20263	 * priority.
20264	 */
20265	memset(phba->fcf.fcf_rr_bmask, 0,
20266			sizeof(*phba->fcf.fcf_rr_bmask));
20267	spin_lock_irq(&phba->hbalock);
20268	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20269		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20270			continue;
20271		/*
20272		 * the 1st priority that has not FLOGI failed
20273		 * will be the highest.
20274		 */
20275		if (!next_fcf_pri)
20276			next_fcf_pri = fcf_pri->fcf_rec.priority;
20277		spin_unlock_irq(&phba->hbalock);
20278		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20279			rc = lpfc_sli4_fcf_rr_index_set(phba,
20280						fcf_pri->fcf_rec.fcf_index);
20281			if (rc)
20282				return 0;
20283		}
20284		spin_lock_irq(&phba->hbalock);
20285	}
20286	/*
20287	 * if next_fcf_pri was not set above and the list is not empty then
20288	 * we have failed flogis on all of them. So reset flogi failed
20289	 * and start at the beginning.
20290	 */
20291	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20292		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20293			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20294			/*
20295			 * the 1st priority that has not FLOGI failed
20296			 * will be the highest.
20297			 */
20298			if (!next_fcf_pri)
20299				next_fcf_pri = fcf_pri->fcf_rec.priority;
20300			spin_unlock_irq(&phba->hbalock);
20301			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20302				rc = lpfc_sli4_fcf_rr_index_set(phba,
20303						fcf_pri->fcf_rec.fcf_index);
20304				if (rc)
20305					return 0;
20306			}
20307			spin_lock_irq(&phba->hbalock);
20308		}
20309	} else
20310		ret = 1;
20311	spin_unlock_irq(&phba->hbalock);
20312
20313	return ret;
20314}
20315/**
20316 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20317 * @phba: pointer to lpfc hba data structure.
20318 *
20319 * This routine is to get the next eligible FCF record index in a round
20320 * robin fashion. If the next eligible FCF record index equals to the
20321 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20322 * shall be returned, otherwise, the next eligible FCF record's index
20323 * shall be returned.
20324 **/
20325uint16_t
20326lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20327{
20328	uint16_t next_fcf_index;
20329
20330initial_priority:
20331	/* Search start from next bit of currently registered FCF index */
20332	next_fcf_index = phba->fcf.current_rec.fcf_indx;
20333
20334next_priority:
20335	/* Determine the next fcf index to check */
20336	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20337	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20338				       LPFC_SLI4_FCF_TBL_INDX_MAX,
20339				       next_fcf_index);
20340
20341	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
20342	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20343		/*
20344		 * If we have wrapped then we need to clear the bits that
20345		 * have been tested so that we can detect when we should
20346		 * change the priority level.
20347		 */
20348		next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20349					       LPFC_SLI4_FCF_TBL_INDX_MAX);
20350	}
20351
20352
20353	/* Check roundrobin failover list empty condition */
20354	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20355		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20356		/*
20357		 * If next fcf index is not found check if there are lower
20358		 * Priority level fcf's in the fcf_priority list.
20359		 * Set up the rr_bmask with all of the avaiable fcf bits
20360		 * at that level and continue the selection process.
20361		 */
20362		if (lpfc_check_next_fcf_pri_level(phba))
20363			goto initial_priority;
20364		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20365				"2844 No roundrobin failover FCF available\n");
20366
20367		return LPFC_FCOE_FCF_NEXT_NONE;
20368	}
20369
20370	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20371		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20372		LPFC_FCF_FLOGI_FAILED) {
20373		if (list_is_singular(&phba->fcf.fcf_pri_list))
20374			return LPFC_FCOE_FCF_NEXT_NONE;
20375
20376		goto next_priority;
20377	}
20378
20379	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20380			"2845 Get next roundrobin failover FCF (x%x)\n",
20381			next_fcf_index);
20382
20383	return next_fcf_index;
20384}
20385
20386/**
20387 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20388 * @phba: pointer to lpfc hba data structure.
20389 * @fcf_index: index into the FCF table to 'set'
20390 *
20391 * This routine sets the FCF record index in to the eligible bmask for
20392 * roundrobin failover search. It checks to make sure that the index
20393 * does not go beyond the range of the driver allocated bmask dimension
20394 * before setting the bit.
20395 *
20396 * Returns 0 if the index bit successfully set, otherwise, it returns
20397 * -EINVAL.
20398 **/
20399int
20400lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20401{
20402	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20403		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20404				"2610 FCF (x%x) reached driver's book "
20405				"keeping dimension:x%x\n",
20406				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20407		return -EINVAL;
20408	}
20409	/* Set the eligible FCF record index bmask */
20410	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20411
20412	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20413			"2790 Set FCF (x%x) to roundrobin FCF failover "
20414			"bmask\n", fcf_index);
20415
20416	return 0;
20417}
20418
20419/**
20420 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20421 * @phba: pointer to lpfc hba data structure.
20422 * @fcf_index: index into the FCF table to 'clear'
20423 *
20424 * This routine clears the FCF record index from the eligible bmask for
20425 * roundrobin failover search. It checks to make sure that the index
20426 * does not go beyond the range of the driver allocated bmask dimension
20427 * before clearing the bit.
20428 **/
20429void
20430lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20431{
20432	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20433	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20434		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20435				"2762 FCF (x%x) reached driver's book "
20436				"keeping dimension:x%x\n",
20437				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20438		return;
20439	}
20440	/* Clear the eligible FCF record index bmask */
20441	spin_lock_irq(&phba->hbalock);
20442	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20443				 list) {
20444		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20445			list_del_init(&fcf_pri->list);
20446			break;
20447		}
20448	}
20449	spin_unlock_irq(&phba->hbalock);
20450	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20451
20452	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20453			"2791 Clear FCF (x%x) from roundrobin failover "
20454			"bmask\n", fcf_index);
20455}
20456
20457/**
20458 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20459 * @phba: pointer to lpfc hba data structure.
20460 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20461 *
20462 * This routine is the completion routine for the rediscover FCF table mailbox
20463 * command. If the mailbox command returned failure, it will try to stop the
20464 * FCF rediscover wait timer.
20465 **/
20466static void
20467lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20468{
20469	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20470	uint32_t shdr_status, shdr_add_status;
20471
20472	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20473
20474	shdr_status = bf_get(lpfc_mbox_hdr_status,
20475			     &redisc_fcf->header.cfg_shdr.response);
20476	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20477			     &redisc_fcf->header.cfg_shdr.response);
20478	if (shdr_status || shdr_add_status) {
20479		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20480				"2746 Requesting for FCF rediscovery failed "
20481				"status x%x add_status x%x\n",
20482				shdr_status, shdr_add_status);
20483		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20484			spin_lock_irq(&phba->hbalock);
20485			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20486			spin_unlock_irq(&phba->hbalock);
20487			/*
20488			 * CVL event triggered FCF rediscover request failed,
20489			 * last resort to re-try current registered FCF entry.
20490			 */
20491			lpfc_retry_pport_discovery(phba);
20492		} else {
20493			spin_lock_irq(&phba->hbalock);
20494			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20495			spin_unlock_irq(&phba->hbalock);
20496			/*
20497			 * DEAD FCF event triggered FCF rediscover request
20498			 * failed, last resort to fail over as a link down
20499			 * to FCF registration.
20500			 */
20501			lpfc_sli4_fcf_dead_failthrough(phba);
20502		}
20503	} else {
20504		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20505				"2775 Start FCF rediscover quiescent timer\n");
20506		/*
20507		 * Start FCF rediscovery wait timer for pending FCF
20508		 * before rescan FCF record table.
20509		 */
20510		lpfc_fcf_redisc_wait_start_timer(phba);
20511	}
20512
20513	mempool_free(mbox, phba->mbox_mem_pool);
20514}
20515
20516/**
20517 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20518 * @phba: pointer to lpfc hba data structure.
20519 *
20520 * This routine is invoked to request for rediscovery of the entire FCF table
20521 * by the port.
20522 **/
20523int
20524lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20525{
20526	LPFC_MBOXQ_t *mbox;
20527	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20528	int rc, length;
20529
20530	/* Cancel retry delay timers to all vports before FCF rediscover */
20531	lpfc_cancel_all_vport_retry_delay_timer(phba);
20532
20533	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20534	if (!mbox) {
20535		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20536				"2745 Failed to allocate mbox for "
20537				"requesting FCF rediscover.\n");
20538		return -ENOMEM;
20539	}
20540
20541	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20542		  sizeof(struct lpfc_sli4_cfg_mhdr));
20543	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20544			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20545			 length, LPFC_SLI4_MBX_EMBED);
20546
20547	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20548	/* Set count to 0 for invalidating the entire FCF database */
20549	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20550
20551	/* Issue the mailbox command asynchronously */
20552	mbox->vport = phba->pport;
20553	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20554	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20555
20556	if (rc == MBX_NOT_FINISHED) {
20557		mempool_free(mbox, phba->mbox_mem_pool);
20558		return -EIO;
20559	}
20560	return 0;
20561}
20562
20563/**
20564 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20565 * @phba: pointer to lpfc hba data structure.
20566 *
20567 * This function is the failover routine as a last resort to the FCF DEAD
20568 * event when driver failed to perform fast FCF failover.
20569 **/
20570void
20571lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20572{
20573	uint32_t link_state;
20574
20575	/*
20576	 * Last resort as FCF DEAD event failover will treat this as
20577	 * a link down, but save the link state because we don't want
20578	 * it to be changed to Link Down unless it is already down.
20579	 */
20580	link_state = phba->link_state;
20581	lpfc_linkdown(phba);
20582	phba->link_state = link_state;
20583
20584	/* Unregister FCF if no devices connected to it */
20585	lpfc_unregister_unused_fcf(phba);
20586}
20587
20588/**
20589 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20590 * @phba: pointer to lpfc hba data structure.
20591 * @rgn23_data: pointer to configure region 23 data.
20592 *
20593 * This function gets SLI3 port configure region 23 data through memory dump
20594 * mailbox command. When it successfully retrieves data, the size of the data
20595 * will be returned, otherwise, 0 will be returned.
20596 **/
20597static uint32_t
20598lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20599{
20600	LPFC_MBOXQ_t *pmb = NULL;
20601	MAILBOX_t *mb;
20602	uint32_t offset = 0;
20603	int rc;
20604
20605	if (!rgn23_data)
20606		return 0;
20607
20608	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20609	if (!pmb) {
20610		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20611				"2600 failed to allocate mailbox memory\n");
20612		return 0;
20613	}
20614	mb = &pmb->u.mb;
20615
20616	do {
20617		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20618		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20619
20620		if (rc != MBX_SUCCESS) {
20621			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20622					"2601 failed to read config "
20623					"region 23, rc 0x%x Status 0x%x\n",
20624					rc, mb->mbxStatus);
20625			mb->un.varDmp.word_cnt = 0;
20626		}
20627		/*
20628		 * dump mem may return a zero when finished or we got a
20629		 * mailbox error, either way we are done.
20630		 */
20631		if (mb->un.varDmp.word_cnt == 0)
20632			break;
20633
20634		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20635			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20636
20637		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20638				       rgn23_data + offset,
20639				       mb->un.varDmp.word_cnt);
20640		offset += mb->un.varDmp.word_cnt;
20641	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20642
20643	mempool_free(pmb, phba->mbox_mem_pool);
20644	return offset;
20645}
20646
20647/**
20648 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20649 * @phba: pointer to lpfc hba data structure.
20650 * @rgn23_data: pointer to configure region 23 data.
20651 *
20652 * This function gets SLI4 port configure region 23 data through memory dump
20653 * mailbox command. When it successfully retrieves data, the size of the data
20654 * will be returned, otherwise, 0 will be returned.
20655 **/
20656static uint32_t
20657lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20658{
20659	LPFC_MBOXQ_t *mboxq = NULL;
20660	struct lpfc_dmabuf *mp = NULL;
20661	struct lpfc_mqe *mqe;
20662	uint32_t data_length = 0;
20663	int rc;
20664
20665	if (!rgn23_data)
20666		return 0;
20667
20668	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20669	if (!mboxq) {
20670		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20671				"3105 failed to allocate mailbox memory\n");
20672		return 0;
20673	}
20674
20675	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20676		goto out;
20677	mqe = &mboxq->u.mqe;
20678	mp = mboxq->ctx_buf;
20679	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20680	if (rc)
20681		goto out;
20682	data_length = mqe->un.mb_words[5];
20683	if (data_length == 0)
20684		goto out;
20685	if (data_length > DMP_RGN23_SIZE) {
20686		data_length = 0;
20687		goto out;
20688	}
20689	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20690out:
20691	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20692	return data_length;
20693}
20694
20695/**
20696 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20697 * @phba: pointer to lpfc hba data structure.
20698 *
20699 * This function read region 23 and parse TLV for port status to
20700 * decide if the user disaled the port. If the TLV indicates the
20701 * port is disabled, the hba_flag is set accordingly.
20702 **/
20703void
20704lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20705{
20706	uint8_t *rgn23_data = NULL;
20707	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20708	uint32_t offset = 0;
20709
20710	/* Get adapter Region 23 data */
20711	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20712	if (!rgn23_data)
20713		goto out;
20714
20715	if (phba->sli_rev < LPFC_SLI_REV4)
20716		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20717	else {
20718		if_type = bf_get(lpfc_sli_intf_if_type,
20719				 &phba->sli4_hba.sli_intf);
20720		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20721			goto out;
20722		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20723	}
20724
20725	if (!data_size)
20726		goto out;
20727
20728	/* Check the region signature first */
20729	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20730		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20731			"2619 Config region 23 has bad signature\n");
20732			goto out;
20733	}
20734	offset += 4;
20735
20736	/* Check the data structure version */
20737	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20738		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20739			"2620 Config region 23 has bad version\n");
20740		goto out;
20741	}
20742	offset += 4;
20743
20744	/* Parse TLV entries in the region */
20745	while (offset < data_size) {
20746		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20747			break;
20748		/*
20749		 * If the TLV is not driver specific TLV or driver id is
20750		 * not linux driver id, skip the record.
20751		 */
20752		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20753		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20754		    (rgn23_data[offset + 3] != 0)) {
20755			offset += rgn23_data[offset + 1] * 4 + 4;
20756			continue;
20757		}
20758
20759		/* Driver found a driver specific TLV in the config region */
20760		sub_tlv_len = rgn23_data[offset + 1] * 4;
20761		offset += 4;
20762		tlv_offset = 0;
20763
20764		/*
20765		 * Search for configured port state sub-TLV.
20766		 */
20767		while ((offset < data_size) &&
20768			(tlv_offset < sub_tlv_len)) {
20769			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20770				offset += 4;
20771				tlv_offset += 4;
20772				break;
20773			}
20774			if (rgn23_data[offset] != PORT_STE_TYPE) {
20775				offset += rgn23_data[offset + 1] * 4 + 4;
20776				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20777				continue;
20778			}
20779
20780			/* This HBA contains PORT_STE configured */
20781			if (!rgn23_data[offset + 2])
20782				phba->hba_flag |= LINK_DISABLED;
20783
20784			goto out;
20785		}
20786	}
20787
20788out:
20789	kfree(rgn23_data);
20790	return;
20791}
20792
20793/**
20794 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20795 * @phba: pointer to lpfc hba data structure
20796 * @shdr_status: wr_object rsp's status field
20797 * @shdr_add_status: wr_object rsp's add_status field
20798 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20799 * @shdr_change_status: wr_object rsp's change_status field
20800 * @shdr_csf: wr_object rsp's csf bit
20801 *
20802 * This routine is intended to be called after a firmware write completes.
20803 * It will log next action items to be performed by the user to instantiate
20804 * the newly downloaded firmware or reason for incompatibility.
20805 **/
20806static void
20807lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20808		       u32 shdr_add_status, u32 shdr_add_status_2,
20809		       u32 shdr_change_status, u32 shdr_csf)
20810{
20811	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20812			"4198 %s: flash_id x%02x, asic_rev x%02x, "
20813			"status x%02x, add_status x%02x, add_status_2 x%02x, "
20814			"change_status x%02x, csf %01x\n", __func__,
20815			phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20816			shdr_status, shdr_add_status, shdr_add_status_2,
20817			shdr_change_status, shdr_csf);
20818
20819	if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20820		switch (shdr_add_status_2) {
20821		case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20822			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20823				     "4199 Firmware write failed: "
20824				     "image incompatible with flash x%02x\n",
20825				     phba->sli4_hba.flash_id);
20826			break;
20827		case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20828			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20829				     "4200 Firmware write failed: "
20830				     "image incompatible with ASIC "
20831				     "architecture x%02x\n",
20832				     phba->sli4_hba.asic_rev);
20833			break;
20834		default:
20835			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20836				     "4210 Firmware write failed: "
20837				     "add_status_2 x%02x\n",
20838				     shdr_add_status_2);
20839			break;
20840		}
20841	} else if (!shdr_status && !shdr_add_status) {
20842		if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20843		    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20844			if (shdr_csf)
20845				shdr_change_status =
20846						   LPFC_CHANGE_STATUS_PCI_RESET;
20847		}
20848
20849		switch (shdr_change_status) {
20850		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20851			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20852				     "3198 Firmware write complete: System "
20853				     "reboot required to instantiate\n");
20854			break;
20855		case (LPFC_CHANGE_STATUS_FW_RESET):
20856			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20857				     "3199 Firmware write complete: "
20858				     "Firmware reset required to "
20859				     "instantiate\n");
20860			break;
20861		case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20862			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20863				     "3200 Firmware write complete: Port "
20864				     "Migration or PCI Reset required to "
20865				     "instantiate\n");
20866			break;
20867		case (LPFC_CHANGE_STATUS_PCI_RESET):
20868			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20869				     "3201 Firmware write complete: PCI "
20870				     "Reset required to instantiate\n");
20871			break;
20872		default:
20873			break;
20874		}
20875	}
20876}
20877
20878/**
20879 * lpfc_wr_object - write an object to the firmware
20880 * @phba: HBA structure that indicates port to create a queue on.
20881 * @dmabuf_list: list of dmabufs to write to the port.
20882 * @size: the total byte value of the objects to write to the port.
20883 * @offset: the current offset to be used to start the transfer.
20884 *
20885 * This routine will create a wr_object mailbox command to send to the port.
20886 * the mailbox command will be constructed using the dma buffers described in
20887 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20888 * BDEs that the imbedded mailbox can support. The @offset variable will be
20889 * used to indicate the starting offset of the transfer and will also return
20890 * the offset after the write object mailbox has completed. @size is used to
20891 * determine the end of the object and whether the eof bit should be set.
20892 *
20893 * Return 0 is successful and offset will contain the new offset to use
20894 * for the next write.
20895 * Return negative value for error cases.
20896 **/
20897int
20898lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20899	       uint32_t size, uint32_t *offset)
20900{
20901	struct lpfc_mbx_wr_object *wr_object;
20902	LPFC_MBOXQ_t *mbox;
20903	int rc = 0, i = 0;
20904	int mbox_status = 0;
20905	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20906	uint32_t shdr_change_status = 0, shdr_csf = 0;
20907	uint32_t mbox_tmo;
20908	struct lpfc_dmabuf *dmabuf;
20909	uint32_t written = 0;
20910	bool check_change_status = false;
20911
20912	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20913	if (!mbox)
20914		return -ENOMEM;
20915
20916	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20917			LPFC_MBOX_OPCODE_WRITE_OBJECT,
20918			sizeof(struct lpfc_mbx_wr_object) -
20919			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20920
20921	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20922	wr_object->u.request.write_offset = *offset;
20923	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20924	wr_object->u.request.object_name[0] =
20925		cpu_to_le32(wr_object->u.request.object_name[0]);
20926	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20927	list_for_each_entry(dmabuf, dmabuf_list, list) {
20928		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20929			break;
20930		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20931		wr_object->u.request.bde[i].addrHigh =
20932			putPaddrHigh(dmabuf->phys);
20933		if (written + SLI4_PAGE_SIZE >= size) {
20934			wr_object->u.request.bde[i].tus.f.bdeSize =
20935				(size - written);
20936			written += (size - written);
20937			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20938			bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20939			check_change_status = true;
20940		} else {
20941			wr_object->u.request.bde[i].tus.f.bdeSize =
20942				SLI4_PAGE_SIZE;
20943			written += SLI4_PAGE_SIZE;
20944		}
20945		i++;
20946	}
20947	wr_object->u.request.bde_count = i;
20948	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20949	if (!phba->sli4_hba.intr_enable)
20950		mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20951	else {
20952		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20953		mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20954	}
20955
20956	/* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
20957	rc = mbox_status;
20958
20959	/* The IOCTL status is embedded in the mailbox subheader. */
20960	shdr_status = bf_get(lpfc_mbox_hdr_status,
20961			     &wr_object->header.cfg_shdr.response);
20962	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20963				 &wr_object->header.cfg_shdr.response);
20964	shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20965				   &wr_object->header.cfg_shdr.response);
20966	if (check_change_status) {
20967		shdr_change_status = bf_get(lpfc_wr_object_change_status,
20968					    &wr_object->u.response);
20969		shdr_csf = bf_get(lpfc_wr_object_csf,
20970				  &wr_object->u.response);
20971	}
20972
20973	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20974		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20975				"3025 Write Object mailbox failed with "
20976				"status x%x add_status x%x, add_status_2 x%x, "
20977				"mbx status x%x\n",
20978				shdr_status, shdr_add_status, shdr_add_status_2,
20979				rc);
20980		rc = -ENXIO;
20981		*offset = shdr_add_status;
20982	} else {
20983		*offset += wr_object->u.response.actual_write_length;
20984	}
20985
20986	if (rc || check_change_status)
20987		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20988				       shdr_add_status_2, shdr_change_status,
20989				       shdr_csf);
20990
20991	if (!phba->sli4_hba.intr_enable)
20992		mempool_free(mbox, phba->mbox_mem_pool);
20993	else if (mbox_status != MBX_TIMEOUT)
20994		mempool_free(mbox, phba->mbox_mem_pool);
20995
20996	return rc;
20997}
20998
20999/**
21000 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
21001 * @vport: pointer to vport data structure.
21002 *
21003 * This function iterate through the mailboxq and clean up all REG_LOGIN
21004 * and REG_VPI mailbox commands associated with the vport. This function
21005 * is called when driver want to restart discovery of the vport due to
21006 * a Clear Virtual Link event.
21007 **/
21008void
21009lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
21010{
21011	struct lpfc_hba *phba = vport->phba;
21012	LPFC_MBOXQ_t *mb, *nextmb;
21013	struct lpfc_nodelist *ndlp;
21014	struct lpfc_nodelist *act_mbx_ndlp = NULL;
21015	LIST_HEAD(mbox_cmd_list);
21016	uint8_t restart_loop;
21017
21018	/* Clean up internally queued mailbox commands with the vport */
21019	spin_lock_irq(&phba->hbalock);
21020	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
21021		if (mb->vport != vport)
21022			continue;
21023
21024		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21025			(mb->u.mb.mbxCommand != MBX_REG_VPI))
21026			continue;
21027
21028		list_move_tail(&mb->list, &mbox_cmd_list);
21029	}
21030	/* Clean up active mailbox command with the vport */
21031	mb = phba->sli.mbox_active;
21032	if (mb && (mb->vport == vport)) {
21033		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21034			(mb->u.mb.mbxCommand == MBX_REG_VPI))
21035			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21036		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21037			act_mbx_ndlp = mb->ctx_ndlp;
21038
21039			/* This reference is local to this routine.  The
21040			 * reference is removed at routine exit.
21041			 */
21042			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21043
21044			/* Unregister the RPI when mailbox complete */
21045			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21046		}
21047	}
21048	/* Cleanup any mailbox completions which are not yet processed */
21049	do {
21050		restart_loop = 0;
21051		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21052			/*
21053			 * If this mailox is already processed or it is
21054			 * for another vport ignore it.
21055			 */
21056			if ((mb->vport != vport) ||
21057				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21058				continue;
21059
21060			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21061				(mb->u.mb.mbxCommand != MBX_REG_VPI))
21062				continue;
21063
21064			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21065			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21066				ndlp = mb->ctx_ndlp;
21067				/* Unregister the RPI when mailbox complete */
21068				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21069				restart_loop = 1;
21070				spin_unlock_irq(&phba->hbalock);
21071				spin_lock(&ndlp->lock);
21072				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21073				spin_unlock(&ndlp->lock);
21074				spin_lock_irq(&phba->hbalock);
21075				break;
21076			}
21077		}
21078	} while (restart_loop);
21079
21080	spin_unlock_irq(&phba->hbalock);
21081
21082	/* Release the cleaned-up mailbox commands */
21083	while (!list_empty(&mbox_cmd_list)) {
21084		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21085		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21086			ndlp = mb->ctx_ndlp;
21087			mb->ctx_ndlp = NULL;
21088			if (ndlp) {
21089				spin_lock(&ndlp->lock);
21090				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21091				spin_unlock(&ndlp->lock);
21092				lpfc_nlp_put(ndlp);
21093			}
21094		}
21095		lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
21096	}
21097
21098	/* Release the ndlp with the cleaned-up active mailbox command */
21099	if (act_mbx_ndlp) {
21100		spin_lock(&act_mbx_ndlp->lock);
21101		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21102		spin_unlock(&act_mbx_ndlp->lock);
21103		lpfc_nlp_put(act_mbx_ndlp);
21104	}
21105}
21106
21107/**
21108 * lpfc_drain_txq - Drain the txq
21109 * @phba: Pointer to HBA context object.
21110 *
21111 * This function attempt to submit IOCBs on the txq
21112 * to the adapter.  For SLI4 adapters, the txq contains
21113 * ELS IOCBs that have been deferred because the there
21114 * are no SGLs.  This congestion can occur with large
21115 * vport counts during node discovery.
21116 **/
21117
21118uint32_t
21119lpfc_drain_txq(struct lpfc_hba *phba)
21120{
21121	LIST_HEAD(completions);
21122	struct lpfc_sli_ring *pring;
21123	struct lpfc_iocbq *piocbq = NULL;
21124	unsigned long iflags = 0;
21125	char *fail_msg = NULL;
21126	uint32_t txq_cnt = 0;
21127	struct lpfc_queue *wq;
21128	int ret = 0;
21129
21130	if (phba->link_flag & LS_MDS_LOOPBACK) {
21131		/* MDS WQE are posted only to first WQ*/
21132		wq = phba->sli4_hba.hdwq[0].io_wq;
21133		if (unlikely(!wq))
21134			return 0;
21135		pring = wq->pring;
21136	} else {
21137		wq = phba->sli4_hba.els_wq;
21138		if (unlikely(!wq))
21139			return 0;
21140		pring = lpfc_phba_elsring(phba);
21141	}
21142
21143	if (unlikely(!pring) || list_empty(&pring->txq))
21144		return 0;
21145
21146	spin_lock_irqsave(&pring->ring_lock, iflags);
21147	list_for_each_entry(piocbq, &pring->txq, list) {
21148		txq_cnt++;
21149	}
21150
21151	if (txq_cnt > pring->txq_max)
21152		pring->txq_max = txq_cnt;
21153
21154	spin_unlock_irqrestore(&pring->ring_lock, iflags);
21155
21156	while (!list_empty(&pring->txq)) {
21157		spin_lock_irqsave(&pring->ring_lock, iflags);
21158
21159		piocbq = lpfc_sli_ringtx_get(phba, pring);
21160		if (!piocbq) {
21161			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21162			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21163				"2823 txq empty and txq_cnt is %d\n ",
21164				txq_cnt);
21165			break;
21166		}
21167		txq_cnt--;
21168
21169		ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21170
21171		if (ret && ret != IOCB_BUSY) {
21172			fail_msg = " - Cannot send IO ";
21173			piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21174		}
21175		if (fail_msg) {
21176			piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21177			/* Failed means we can't issue and need to cancel */
21178			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21179					"2822 IOCB failed %s iotag 0x%x "
21180					"xri 0x%x %d flg x%x\n",
21181					fail_msg, piocbq->iotag,
21182					piocbq->sli4_xritag, ret,
21183					piocbq->cmd_flag);
21184			list_add_tail(&piocbq->list, &completions);
21185			fail_msg = NULL;
21186		}
21187		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21188		if (txq_cnt == 0 || ret == IOCB_BUSY)
21189			break;
21190	}
21191	/* Cancel all the IOCBs that cannot be issued */
21192	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21193			      IOERR_SLI_ABORTED);
21194
21195	return txq_cnt;
21196}
21197
21198/**
21199 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21200 * @phba: Pointer to HBA context object.
21201 * @pwqeq: Pointer to command WQE.
21202 * @sglq: Pointer to the scatter gather queue object.
21203 *
21204 * This routine converts the bpl or bde that is in the WQE
21205 * to a sgl list for the sli4 hardware. The physical address
21206 * of the bpl/bde is converted back to a virtual address.
21207 * If the WQE contains a BPL then the list of BDE's is
21208 * converted to sli4_sge's. If the WQE contains a single
21209 * BDE then it is converted to a single sli_sge.
21210 * The WQE is still in cpu endianness so the contents of
21211 * the bpl can be used without byte swapping.
21212 *
21213 * Returns valid XRI = Success, NO_XRI = Failure.
21214 */
21215static uint16_t
21216lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21217		 struct lpfc_sglq *sglq)
21218{
21219	uint16_t xritag = NO_XRI;
21220	struct ulp_bde64 *bpl = NULL;
21221	struct ulp_bde64 bde;
21222	struct sli4_sge *sgl  = NULL;
21223	struct lpfc_dmabuf *dmabuf;
21224	union lpfc_wqe128 *wqe;
21225	int numBdes = 0;
21226	int i = 0;
21227	uint32_t offset = 0; /* accumulated offset in the sg request list */
21228	int inbound = 0; /* number of sg reply entries inbound from firmware */
21229	uint32_t cmd;
21230
21231	if (!pwqeq || !sglq)
21232		return xritag;
21233
21234	sgl  = (struct sli4_sge *)sglq->sgl;
21235	wqe = &pwqeq->wqe;
21236	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21237
21238	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21239	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21240		return sglq->sli4_xritag;
21241	numBdes = pwqeq->num_bdes;
21242	if (numBdes) {
21243		/* The addrHigh and addrLow fields within the WQE
21244		 * have not been byteswapped yet so there is no
21245		 * need to swap them back.
21246		 */
21247		if (pwqeq->bpl_dmabuf)
21248			dmabuf = pwqeq->bpl_dmabuf;
21249		else
21250			return xritag;
21251
21252		bpl  = (struct ulp_bde64 *)dmabuf->virt;
21253		if (!bpl)
21254			return xritag;
21255
21256		for (i = 0; i < numBdes; i++) {
21257			/* Should already be byte swapped. */
21258			sgl->addr_hi = bpl->addrHigh;
21259			sgl->addr_lo = bpl->addrLow;
21260
21261			sgl->word2 = le32_to_cpu(sgl->word2);
21262			if ((i+1) == numBdes)
21263				bf_set(lpfc_sli4_sge_last, sgl, 1);
21264			else
21265				bf_set(lpfc_sli4_sge_last, sgl, 0);
21266			/* swap the size field back to the cpu so we
21267			 * can assign it to the sgl.
21268			 */
21269			bde.tus.w = le32_to_cpu(bpl->tus.w);
21270			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21271			/* The offsets in the sgl need to be accumulated
21272			 * separately for the request and reply lists.
21273			 * The request is always first, the reply follows.
21274			 */
21275			switch (cmd) {
21276			case CMD_GEN_REQUEST64_WQE:
21277				/* add up the reply sg entries */
21278				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21279					inbound++;
21280				/* first inbound? reset the offset */
21281				if (inbound == 1)
21282					offset = 0;
21283				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21284				bf_set(lpfc_sli4_sge_type, sgl,
21285					LPFC_SGE_TYPE_DATA);
21286				offset += bde.tus.f.bdeSize;
21287				break;
21288			case CMD_FCP_TRSP64_WQE:
21289				bf_set(lpfc_sli4_sge_offset, sgl, 0);
21290				bf_set(lpfc_sli4_sge_type, sgl,
21291					LPFC_SGE_TYPE_DATA);
21292				break;
21293			case CMD_FCP_TSEND64_WQE:
21294			case CMD_FCP_TRECEIVE64_WQE:
21295				bf_set(lpfc_sli4_sge_type, sgl,
21296					bpl->tus.f.bdeFlags);
21297				if (i < 3)
21298					offset = 0;
21299				else
21300					offset += bde.tus.f.bdeSize;
21301				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21302				break;
21303			}
21304			sgl->word2 = cpu_to_le32(sgl->word2);
21305			bpl++;
21306			sgl++;
21307		}
21308	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21309		/* The addrHigh and addrLow fields of the BDE have not
21310		 * been byteswapped yet so they need to be swapped
21311		 * before putting them in the sgl.
21312		 */
21313		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21314		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21315		sgl->word2 = le32_to_cpu(sgl->word2);
21316		bf_set(lpfc_sli4_sge_last, sgl, 1);
21317		sgl->word2 = cpu_to_le32(sgl->word2);
21318		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21319	}
21320	return sglq->sli4_xritag;
21321}
21322
21323/**
21324 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21325 * @phba: Pointer to HBA context object.
21326 * @qp: Pointer to HDW queue.
21327 * @pwqe: Pointer to command WQE.
21328 **/
21329int
21330lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21331		    struct lpfc_iocbq *pwqe)
21332{
21333	union lpfc_wqe128 *wqe = &pwqe->wqe;
21334	struct lpfc_async_xchg_ctx *ctxp;
21335	struct lpfc_queue *wq;
21336	struct lpfc_sglq *sglq;
21337	struct lpfc_sli_ring *pring;
21338	unsigned long iflags;
21339	uint32_t ret = 0;
21340
21341	/* NVME_LS and NVME_LS ABTS requests. */
21342	if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21343		pring =  phba->sli4_hba.nvmels_wq->pring;
21344		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21345					  qp, wq_access);
21346		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21347		if (!sglq) {
21348			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21349			return WQE_BUSY;
21350		}
21351		pwqe->sli4_lxritag = sglq->sli4_lxritag;
21352		pwqe->sli4_xritag = sglq->sli4_xritag;
21353		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21354			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21355			return WQE_ERROR;
21356		}
21357		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21358		       pwqe->sli4_xritag);
21359		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21360		if (ret) {
21361			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21362			return ret;
21363		}
21364
21365		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21366		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21367
21368		lpfc_sli4_poll_eq(qp->hba_eq);
21369		return 0;
21370	}
21371
21372	/* NVME_FCREQ and NVME_ABTS requests */
21373	if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21374		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21375		wq = qp->io_wq;
21376		pring = wq->pring;
21377
21378		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21379
21380		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21381					  qp, wq_access);
21382		ret = lpfc_sli4_wq_put(wq, wqe);
21383		if (ret) {
21384			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21385			return ret;
21386		}
21387		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21388		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21389
21390		lpfc_sli4_poll_eq(qp->hba_eq);
21391		return 0;
21392	}
21393
21394	/* NVMET requests */
21395	if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21396		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21397		wq = qp->io_wq;
21398		pring = wq->pring;
21399
21400		ctxp = pwqe->context_un.axchg;
21401		sglq = ctxp->ctxbuf->sglq;
21402		if (pwqe->sli4_xritag ==  NO_XRI) {
21403			pwqe->sli4_lxritag = sglq->sli4_lxritag;
21404			pwqe->sli4_xritag = sglq->sli4_xritag;
21405		}
21406		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21407		       pwqe->sli4_xritag);
21408		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21409
21410		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21411					  qp, wq_access);
21412		ret = lpfc_sli4_wq_put(wq, wqe);
21413		if (ret) {
21414			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21415			return ret;
21416		}
21417		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21418		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21419
21420		lpfc_sli4_poll_eq(qp->hba_eq);
21421		return 0;
21422	}
21423	return WQE_ERROR;
21424}
21425
21426/**
21427 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21428 * @phba: Pointer to HBA context object.
21429 * @cmdiocb: Pointer to driver command iocb object.
21430 * @cmpl: completion function.
21431 *
21432 * Fill the appropriate fields for the abort WQE and call
21433 * internal routine lpfc_sli4_issue_wqe to send the WQE
21434 * This function is called with hbalock held and no ring_lock held.
21435 *
21436 * RETURNS 0 - SUCCESS
21437 **/
21438
21439int
21440lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21441			    void *cmpl)
21442{
21443	struct lpfc_vport *vport = cmdiocb->vport;
21444	struct lpfc_iocbq *abtsiocb = NULL;
21445	union lpfc_wqe128 *abtswqe;
21446	struct lpfc_io_buf *lpfc_cmd;
21447	int retval = IOCB_ERROR;
21448	u16 xritag = cmdiocb->sli4_xritag;
21449
21450	/*
21451	 * The scsi command can not be in txq and it is in flight because the
21452	 * pCmd is still pointing at the SCSI command we have to abort. There
21453	 * is no need to search the txcmplq. Just send an abort to the FW.
21454	 */
21455
21456	abtsiocb = __lpfc_sli_get_iocbq(phba);
21457	if (!abtsiocb)
21458		return WQE_NORESOURCE;
21459
21460	/* Indicate the IO is being aborted by the driver. */
21461	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21462
21463	abtswqe = &abtsiocb->wqe;
21464	memset(abtswqe, 0, sizeof(*abtswqe));
21465
21466	if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21467		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21468	bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21469	abtswqe->abort_cmd.rsrvd5 = 0;
21470	abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21471	bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21472	bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21473	bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21474	bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21475	bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21476	bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21477
21478	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
21479	abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21480	abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21481	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21482		abtsiocb->cmd_flag |= LPFC_IO_FCP;
21483	if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21484		abtsiocb->cmd_flag |= LPFC_IO_NVME;
21485	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21486		abtsiocb->cmd_flag |= LPFC_IO_FOF;
21487	abtsiocb->vport = vport;
21488	abtsiocb->cmd_cmpl = cmpl;
21489
21490	lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21491	retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21492
21493	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21494			 "0359 Abort xri x%x, original iotag x%x, "
21495			 "abort cmd iotag x%x retval x%x\n",
21496			 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21497
21498	if (retval) {
21499		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21500		__lpfc_sli_release_iocbq(phba, abtsiocb);
21501	}
21502
21503	return retval;
21504}
21505
21506#ifdef LPFC_MXP_STAT
21507/**
21508 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21509 * @phba: pointer to lpfc hba data structure.
21510 * @hwqid: belong to which HWQ.
21511 *
21512 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21513 * 15 seconds after a test case is running.
21514 *
21515 * The user should call lpfc_debugfs_multixripools_write before running a test
21516 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21517 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21518 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21519 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21520 **/
21521void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21522{
21523	struct lpfc_sli4_hdw_queue *qp;
21524	struct lpfc_multixri_pool *multixri_pool;
21525	struct lpfc_pvt_pool *pvt_pool;
21526	struct lpfc_pbl_pool *pbl_pool;
21527	u32 txcmplq_cnt;
21528
21529	qp = &phba->sli4_hba.hdwq[hwqid];
21530	multixri_pool = qp->p_multixri_pool;
21531	if (!multixri_pool)
21532		return;
21533
21534	if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21535		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21536		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21537		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21538
21539		multixri_pool->stat_pbl_count = pbl_pool->count;
21540		multixri_pool->stat_pvt_count = pvt_pool->count;
21541		multixri_pool->stat_busy_count = txcmplq_cnt;
21542	}
21543
21544	multixri_pool->stat_snapshot_taken++;
21545}
21546#endif
21547
21548/**
21549 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21550 * @phba: pointer to lpfc hba data structure.
21551 * @hwqid: belong to which HWQ.
21552 *
21553 * This routine moves some XRIs from private to public pool when private pool
21554 * is not busy.
21555 **/
21556void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21557{
21558	struct lpfc_multixri_pool *multixri_pool;
21559	u32 io_req_count;
21560	u32 prev_io_req_count;
21561
21562	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21563	if (!multixri_pool)
21564		return;
21565	io_req_count = multixri_pool->io_req_count;
21566	prev_io_req_count = multixri_pool->prev_io_req_count;
21567
21568	if (prev_io_req_count != io_req_count) {
21569		/* Private pool is busy */
21570		multixri_pool->prev_io_req_count = io_req_count;
21571	} else {
21572		/* Private pool is not busy.
21573		 * Move XRIs from private to public pool.
21574		 */
21575		lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21576	}
21577}
21578
21579/**
21580 * lpfc_adjust_high_watermark - Adjust high watermark
21581 * @phba: pointer to lpfc hba data structure.
21582 * @hwqid: belong to which HWQ.
21583 *
21584 * This routine sets high watermark as number of outstanding XRIs,
21585 * but make sure the new value is between xri_limit/2 and xri_limit.
21586 **/
21587void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21588{
21589	u32 new_watermark;
21590	u32 watermark_max;
21591	u32 watermark_min;
21592	u32 xri_limit;
21593	u32 txcmplq_cnt;
21594	u32 abts_io_bufs;
21595	struct lpfc_multixri_pool *multixri_pool;
21596	struct lpfc_sli4_hdw_queue *qp;
21597
21598	qp = &phba->sli4_hba.hdwq[hwqid];
21599	multixri_pool = qp->p_multixri_pool;
21600	if (!multixri_pool)
21601		return;
21602	xri_limit = multixri_pool->xri_limit;
21603
21604	watermark_max = xri_limit;
21605	watermark_min = xri_limit / 2;
21606
21607	txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21608	abts_io_bufs = qp->abts_scsi_io_bufs;
21609	abts_io_bufs += qp->abts_nvme_io_bufs;
21610
21611	new_watermark = txcmplq_cnt + abts_io_bufs;
21612	new_watermark = min(watermark_max, new_watermark);
21613	new_watermark = max(watermark_min, new_watermark);
21614	multixri_pool->pvt_pool.high_watermark = new_watermark;
21615
21616#ifdef LPFC_MXP_STAT
21617	multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21618					  new_watermark);
21619#endif
21620}
21621
21622/**
21623 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21624 * @phba: pointer to lpfc hba data structure.
21625 * @hwqid: belong to which HWQ.
21626 *
21627 * This routine is called from hearbeat timer when pvt_pool is idle.
21628 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21629 * The first step moves (all - low_watermark) amount of XRIs.
21630 * The second step moves the rest of XRIs.
21631 **/
21632void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21633{
21634	struct lpfc_pbl_pool *pbl_pool;
21635	struct lpfc_pvt_pool *pvt_pool;
21636	struct lpfc_sli4_hdw_queue *qp;
21637	struct lpfc_io_buf *lpfc_ncmd;
21638	struct lpfc_io_buf *lpfc_ncmd_next;
21639	unsigned long iflag;
21640	struct list_head tmp_list;
21641	u32 tmp_count;
21642
21643	qp = &phba->sli4_hba.hdwq[hwqid];
21644	pbl_pool = &qp->p_multixri_pool->pbl_pool;
21645	pvt_pool = &qp->p_multixri_pool->pvt_pool;
21646	tmp_count = 0;
21647
21648	lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21649	lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21650
21651	if (pvt_pool->count > pvt_pool->low_watermark) {
21652		/* Step 1: move (all - low_watermark) from pvt_pool
21653		 * to pbl_pool
21654		 */
21655
21656		/* Move low watermark of bufs from pvt_pool to tmp_list */
21657		INIT_LIST_HEAD(&tmp_list);
21658		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21659					 &pvt_pool->list, list) {
21660			list_move_tail(&lpfc_ncmd->list, &tmp_list);
21661			tmp_count++;
21662			if (tmp_count >= pvt_pool->low_watermark)
21663				break;
21664		}
21665
21666		/* Move all bufs from pvt_pool to pbl_pool */
21667		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21668
21669		/* Move all bufs from tmp_list to pvt_pool */
21670		list_splice(&tmp_list, &pvt_pool->list);
21671
21672		pbl_pool->count += (pvt_pool->count - tmp_count);
21673		pvt_pool->count = tmp_count;
21674	} else {
21675		/* Step 2: move the rest from pvt_pool to pbl_pool */
21676		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21677		pbl_pool->count += pvt_pool->count;
21678		pvt_pool->count = 0;
21679	}
21680
21681	spin_unlock(&pvt_pool->lock);
21682	spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21683}
21684
21685/**
21686 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21687 * @phba: pointer to lpfc hba data structure
21688 * @qp: pointer to HDW queue
21689 * @pbl_pool: specified public free XRI pool
21690 * @pvt_pool: specified private free XRI pool
21691 * @count: number of XRIs to move
21692 *
21693 * This routine tries to move some free common bufs from the specified pbl_pool
21694 * to the specified pvt_pool. It might move less than count XRIs if there's not
21695 * enough in public pool.
21696 *
21697 * Return:
21698 *   true - if XRIs are successfully moved from the specified pbl_pool to the
21699 *          specified pvt_pool
21700 *   false - if the specified pbl_pool is empty or locked by someone else
21701 **/
21702static bool
21703_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21704			  struct lpfc_pbl_pool *pbl_pool,
21705			  struct lpfc_pvt_pool *pvt_pool, u32 count)
21706{
21707	struct lpfc_io_buf *lpfc_ncmd;
21708	struct lpfc_io_buf *lpfc_ncmd_next;
21709	unsigned long iflag;
21710	int ret;
21711
21712	ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21713	if (ret) {
21714		if (pbl_pool->count) {
21715			/* Move a batch of XRIs from public to private pool */
21716			lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21717			list_for_each_entry_safe(lpfc_ncmd,
21718						 lpfc_ncmd_next,
21719						 &pbl_pool->list,
21720						 list) {
21721				list_move_tail(&lpfc_ncmd->list,
21722					       &pvt_pool->list);
21723				pvt_pool->count++;
21724				pbl_pool->count--;
21725				count--;
21726				if (count == 0)
21727					break;
21728			}
21729
21730			spin_unlock(&pvt_pool->lock);
21731			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21732			return true;
21733		}
21734		spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21735	}
21736
21737	return false;
21738}
21739
21740/**
21741 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21742 * @phba: pointer to lpfc hba data structure.
21743 * @hwqid: belong to which HWQ.
21744 * @count: number of XRIs to move
21745 *
21746 * This routine tries to find some free common bufs in one of public pools with
21747 * Round Robin method. The search always starts from local hwqid, then the next
21748 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21749 * a batch of free common bufs are moved to private pool on hwqid.
21750 * It might move less than count XRIs if there's not enough in public pool.
21751 **/
21752void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21753{
21754	struct lpfc_multixri_pool *multixri_pool;
21755	struct lpfc_multixri_pool *next_multixri_pool;
21756	struct lpfc_pvt_pool *pvt_pool;
21757	struct lpfc_pbl_pool *pbl_pool;
21758	struct lpfc_sli4_hdw_queue *qp;
21759	u32 next_hwqid;
21760	u32 hwq_count;
21761	int ret;
21762
21763	qp = &phba->sli4_hba.hdwq[hwqid];
21764	multixri_pool = qp->p_multixri_pool;
21765	pvt_pool = &multixri_pool->pvt_pool;
21766	pbl_pool = &multixri_pool->pbl_pool;
21767
21768	/* Check if local pbl_pool is available */
21769	ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21770	if (ret) {
21771#ifdef LPFC_MXP_STAT
21772		multixri_pool->local_pbl_hit_count++;
21773#endif
21774		return;
21775	}
21776
21777	hwq_count = phba->cfg_hdw_queue;
21778
21779	/* Get the next hwqid which was found last time */
21780	next_hwqid = multixri_pool->rrb_next_hwqid;
21781
21782	do {
21783		/* Go to next hwq */
21784		next_hwqid = (next_hwqid + 1) % hwq_count;
21785
21786		next_multixri_pool =
21787			phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21788		pbl_pool = &next_multixri_pool->pbl_pool;
21789
21790		/* Check if the public free xri pool is available */
21791		ret = _lpfc_move_xri_pbl_to_pvt(
21792			phba, qp, pbl_pool, pvt_pool, count);
21793
21794		/* Exit while-loop if success or all hwqid are checked */
21795	} while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21796
21797	/* Starting point for the next time */
21798	multixri_pool->rrb_next_hwqid = next_hwqid;
21799
21800	if (!ret) {
21801		/* stats: all public pools are empty*/
21802		multixri_pool->pbl_empty_count++;
21803	}
21804
21805#ifdef LPFC_MXP_STAT
21806	if (ret) {
21807		if (next_hwqid == hwqid)
21808			multixri_pool->local_pbl_hit_count++;
21809		else
21810			multixri_pool->other_pbl_hit_count++;
21811	}
21812#endif
21813}
21814
21815/**
21816 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21817 * @phba: pointer to lpfc hba data structure.
21818 * @hwqid: belong to which HWQ.
21819 *
21820 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21821 * low watermark.
21822 **/
21823void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21824{
21825	struct lpfc_multixri_pool *multixri_pool;
21826	struct lpfc_pvt_pool *pvt_pool;
21827
21828	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21829	pvt_pool = &multixri_pool->pvt_pool;
21830
21831	if (pvt_pool->count < pvt_pool->low_watermark)
21832		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21833}
21834
21835/**
21836 * lpfc_release_io_buf - Return one IO buf back to free pool
21837 * @phba: pointer to lpfc hba data structure.
21838 * @lpfc_ncmd: IO buf to be returned.
21839 * @qp: belong to which HWQ.
21840 *
21841 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21842 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21843 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21844 * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21845 * lpfc_io_buf_list_put.
21846 **/
21847void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21848			 struct lpfc_sli4_hdw_queue *qp)
21849{
21850	unsigned long iflag;
21851	struct lpfc_pbl_pool *pbl_pool;
21852	struct lpfc_pvt_pool *pvt_pool;
21853	struct lpfc_epd_pool *epd_pool;
21854	u32 txcmplq_cnt;
21855	u32 xri_owned;
21856	u32 xri_limit;
21857	u32 abts_io_bufs;
21858
21859	/* MUST zero fields if buffer is reused by another protocol */
21860	lpfc_ncmd->nvmeCmd = NULL;
21861	lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21862
21863	if (phba->cfg_xpsgl && !phba->nvmet_support &&
21864	    !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21865		lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21866
21867	if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21868		lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21869
21870	if (phba->cfg_xri_rebalancing) {
21871		if (lpfc_ncmd->expedite) {
21872			/* Return to expedite pool */
21873			epd_pool = &phba->epd_pool;
21874			spin_lock_irqsave(&epd_pool->lock, iflag);
21875			list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21876			epd_pool->count++;
21877			spin_unlock_irqrestore(&epd_pool->lock, iflag);
21878			return;
21879		}
21880
21881		/* Avoid invalid access if an IO sneaks in and is being rejected
21882		 * just _after_ xri pools are destroyed in lpfc_offline.
21883		 * Nothing much can be done at this point.
21884		 */
21885		if (!qp->p_multixri_pool)
21886			return;
21887
21888		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21889		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21890
21891		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21892		abts_io_bufs = qp->abts_scsi_io_bufs;
21893		abts_io_bufs += qp->abts_nvme_io_bufs;
21894
21895		xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21896		xri_limit = qp->p_multixri_pool->xri_limit;
21897
21898#ifdef LPFC_MXP_STAT
21899		if (xri_owned <= xri_limit)
21900			qp->p_multixri_pool->below_limit_count++;
21901		else
21902			qp->p_multixri_pool->above_limit_count++;
21903#endif
21904
21905		/* XRI goes to either public or private free xri pool
21906		 *     based on watermark and xri_limit
21907		 */
21908		if ((pvt_pool->count < pvt_pool->low_watermark) ||
21909		    (xri_owned < xri_limit &&
21910		     pvt_pool->count < pvt_pool->high_watermark)) {
21911			lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21912						  qp, free_pvt_pool);
21913			list_add_tail(&lpfc_ncmd->list,
21914				      &pvt_pool->list);
21915			pvt_pool->count++;
21916			spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21917		} else {
21918			lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21919						  qp, free_pub_pool);
21920			list_add_tail(&lpfc_ncmd->list,
21921				      &pbl_pool->list);
21922			pbl_pool->count++;
21923			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21924		}
21925	} else {
21926		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21927					  qp, free_xri);
21928		list_add_tail(&lpfc_ncmd->list,
21929			      &qp->lpfc_io_buf_list_put);
21930		qp->put_io_bufs++;
21931		spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21932				       iflag);
21933	}
21934}
21935
21936/**
21937 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21938 * @phba: pointer to lpfc hba data structure.
21939 * @qp: pointer to HDW queue
21940 * @pvt_pool: pointer to private pool data structure.
21941 * @ndlp: pointer to lpfc nodelist data structure.
21942 *
21943 * This routine tries to get one free IO buf from private pool.
21944 *
21945 * Return:
21946 *   pointer to one free IO buf - if private pool is not empty
21947 *   NULL - if private pool is empty
21948 **/
21949static struct lpfc_io_buf *
21950lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21951				  struct lpfc_sli4_hdw_queue *qp,
21952				  struct lpfc_pvt_pool *pvt_pool,
21953				  struct lpfc_nodelist *ndlp)
21954{
21955	struct lpfc_io_buf *lpfc_ncmd;
21956	struct lpfc_io_buf *lpfc_ncmd_next;
21957	unsigned long iflag;
21958
21959	lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21960	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21961				 &pvt_pool->list, list) {
21962		if (lpfc_test_rrq_active(
21963			phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21964			continue;
21965		list_del(&lpfc_ncmd->list);
21966		pvt_pool->count--;
21967		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21968		return lpfc_ncmd;
21969	}
21970	spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21971
21972	return NULL;
21973}
21974
21975/**
21976 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21977 * @phba: pointer to lpfc hba data structure.
21978 *
21979 * This routine tries to get one free IO buf from expedite pool.
21980 *
21981 * Return:
21982 *   pointer to one free IO buf - if expedite pool is not empty
21983 *   NULL - if expedite pool is empty
21984 **/
21985static struct lpfc_io_buf *
21986lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21987{
21988	struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
21989	struct lpfc_io_buf *lpfc_ncmd_next;
21990	unsigned long iflag;
21991	struct lpfc_epd_pool *epd_pool;
21992
21993	epd_pool = &phba->epd_pool;
21994
21995	spin_lock_irqsave(&epd_pool->lock, iflag);
21996	if (epd_pool->count > 0) {
21997		list_for_each_entry_safe(iter, lpfc_ncmd_next,
21998					 &epd_pool->list, list) {
21999			list_del(&iter->list);
22000			epd_pool->count--;
22001			lpfc_ncmd = iter;
22002			break;
22003		}
22004	}
22005	spin_unlock_irqrestore(&epd_pool->lock, iflag);
22006
22007	return lpfc_ncmd;
22008}
22009
22010/**
22011 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
22012 * @phba: pointer to lpfc hba data structure.
22013 * @ndlp: pointer to lpfc nodelist data structure.
22014 * @hwqid: belong to which HWQ
22015 * @expedite: 1 means this request is urgent.
22016 *
22017 * This routine will do the following actions and then return a pointer to
22018 * one free IO buf.
22019 *
22020 * 1. If private free xri count is empty, move some XRIs from public to
22021 *    private pool.
22022 * 2. Get one XRI from private free xri pool.
22023 * 3. If we fail to get one from pvt_pool and this is an expedite request,
22024 *    get one free xri from expedite pool.
22025 *
22026 * Note: ndlp is only used on SCSI side for RRQ testing.
22027 *       The caller should pass NULL for ndlp on NVME side.
22028 *
22029 * Return:
22030 *   pointer to one free IO buf - if private pool is not empty
22031 *   NULL - if private pool is empty
22032 **/
22033static struct lpfc_io_buf *
22034lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22035				    struct lpfc_nodelist *ndlp,
22036				    int hwqid, int expedite)
22037{
22038	struct lpfc_sli4_hdw_queue *qp;
22039	struct lpfc_multixri_pool *multixri_pool;
22040	struct lpfc_pvt_pool *pvt_pool;
22041	struct lpfc_io_buf *lpfc_ncmd;
22042
22043	qp = &phba->sli4_hba.hdwq[hwqid];
22044	lpfc_ncmd = NULL;
22045	if (!qp) {
22046		lpfc_printf_log(phba, KERN_INFO,
22047				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22048				"5556 NULL qp for hwqid  x%x\n", hwqid);
22049		return lpfc_ncmd;
22050	}
22051	multixri_pool = qp->p_multixri_pool;
22052	if (!multixri_pool) {
22053		lpfc_printf_log(phba, KERN_INFO,
22054				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22055				"5557 NULL multixri for hwqid  x%x\n", hwqid);
22056		return lpfc_ncmd;
22057	}
22058	pvt_pool = &multixri_pool->pvt_pool;
22059	if (!pvt_pool) {
22060		lpfc_printf_log(phba, KERN_INFO,
22061				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22062				"5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
22063		return lpfc_ncmd;
22064	}
22065	multixri_pool->io_req_count++;
22066
22067	/* If pvt_pool is empty, move some XRIs from public to private pool */
22068	if (pvt_pool->count == 0)
22069		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22070
22071	/* Get one XRI from private free xri pool */
22072	lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22073
22074	if (lpfc_ncmd) {
22075		lpfc_ncmd->hdwq = qp;
22076		lpfc_ncmd->hdwq_no = hwqid;
22077	} else if (expedite) {
22078		/* If we fail to get one from pvt_pool and this is an expedite
22079		 * request, get one free xri from expedite pool.
22080		 */
22081		lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22082	}
22083
22084	return lpfc_ncmd;
22085}
22086
22087static inline struct lpfc_io_buf *
22088lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22089{
22090	struct lpfc_sli4_hdw_queue *qp;
22091	struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22092
22093	qp = &phba->sli4_hba.hdwq[idx];
22094	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22095				 &qp->lpfc_io_buf_list_get, list) {
22096		if (lpfc_test_rrq_active(phba, ndlp,
22097					 lpfc_cmd->cur_iocbq.sli4_lxritag))
22098			continue;
22099
22100		if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22101			continue;
22102
22103		list_del_init(&lpfc_cmd->list);
22104		qp->get_io_bufs--;
22105		lpfc_cmd->hdwq = qp;
22106		lpfc_cmd->hdwq_no = idx;
22107		return lpfc_cmd;
22108	}
22109	return NULL;
22110}
22111
22112/**
22113 * lpfc_get_io_buf - Get one IO buffer from free pool
22114 * @phba: The HBA for which this call is being executed.
22115 * @ndlp: pointer to lpfc nodelist data structure.
22116 * @hwqid: belong to which HWQ
22117 * @expedite: 1 means this request is urgent.
22118 *
22119 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22120 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22121 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22122 *
22123 * Note: ndlp is only used on SCSI side for RRQ testing.
22124 *       The caller should pass NULL for ndlp on NVME side.
22125 *
22126 * Return codes:
22127 *   NULL - Error
22128 *   Pointer to lpfc_io_buf - Success
22129 **/
22130struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22131				    struct lpfc_nodelist *ndlp,
22132				    u32 hwqid, int expedite)
22133{
22134	struct lpfc_sli4_hdw_queue *qp;
22135	unsigned long iflag;
22136	struct lpfc_io_buf *lpfc_cmd;
22137
22138	qp = &phba->sli4_hba.hdwq[hwqid];
22139	lpfc_cmd = NULL;
22140	if (!qp) {
22141		lpfc_printf_log(phba, KERN_WARNING,
22142				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22143				"5555 NULL qp for hwqid  x%x\n", hwqid);
22144		return lpfc_cmd;
22145	}
22146
22147	if (phba->cfg_xri_rebalancing)
22148		lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22149			phba, ndlp, hwqid, expedite);
22150	else {
22151		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22152					  qp, alloc_xri_get);
22153		if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22154			lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22155		if (!lpfc_cmd) {
22156			lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22157					  qp, alloc_xri_put);
22158			list_splice(&qp->lpfc_io_buf_list_put,
22159				    &qp->lpfc_io_buf_list_get);
22160			qp->get_io_bufs += qp->put_io_bufs;
22161			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22162			qp->put_io_bufs = 0;
22163			spin_unlock(&qp->io_buf_list_put_lock);
22164			if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22165			    expedite)
22166				lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22167		}
22168		spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22169	}
22170
22171	return lpfc_cmd;
22172}
22173
22174/**
22175 * lpfc_read_object - Retrieve object data from HBA
22176 * @phba: The HBA for which this call is being executed.
22177 * @rdobject: Pathname of object data we want to read.
22178 * @datap: Pointer to where data will be copied to.
22179 * @datasz: size of data area
22180 *
22181 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22182 * The data will be truncated if datasz is not large enough.
22183 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22184 * Returns the actual bytes read from the object.
22185 *
22186 * This routine is hard coded to use a poll completion.  Unlike other
22187 * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
22188 * cleaned up in lpfc_sli4_cmd_mbox_free.  If this routine is modified
22189 * to use interrupt-based completions, code is needed to fully cleanup
22190 * the memory.
22191 */
22192int
22193lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22194		 uint32_t datasz)
22195{
22196	struct lpfc_mbx_read_object *read_object;
22197	LPFC_MBOXQ_t *mbox;
22198	int rc, length, eof, j, byte_cnt = 0;
22199	uint32_t shdr_status, shdr_add_status;
22200	union lpfc_sli4_cfg_shdr *shdr;
22201	struct lpfc_dmabuf *pcmd;
22202	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22203
22204	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22205	if (!mbox)
22206		return -ENOMEM;
22207	length = (sizeof(struct lpfc_mbx_read_object) -
22208		  sizeof(struct lpfc_sli4_cfg_mhdr));
22209	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22210			 LPFC_MBOX_OPCODE_READ_OBJECT,
22211			 length, LPFC_SLI4_MBX_EMBED);
22212	read_object = &mbox->u.mqe.un.read_object;
22213	shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22214
22215	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22216	bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22217	read_object->u.request.rd_object_offset = 0;
22218	read_object->u.request.rd_object_cnt = 1;
22219
22220	memset((void *)read_object->u.request.rd_object_name, 0,
22221	       LPFC_OBJ_NAME_SZ);
22222	scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22223	for (j = 0; j < strlen(rdobject); j++)
22224		read_object->u.request.rd_object_name[j] =
22225			cpu_to_le32(rd_object_name[j]);
22226
22227	pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22228	if (pcmd)
22229		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22230	if (!pcmd || !pcmd->virt) {
22231		kfree(pcmd);
22232		mempool_free(mbox, phba->mbox_mem_pool);
22233		return -ENOMEM;
22234	}
22235	memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22236	read_object->u.request.rd_object_hbuf[0].pa_lo =
22237		putPaddrLow(pcmd->phys);
22238	read_object->u.request.rd_object_hbuf[0].pa_hi =
22239		putPaddrHigh(pcmd->phys);
22240	read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22241
22242	mbox->vport = phba->pport;
22243	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22244	mbox->ctx_ndlp = NULL;
22245
22246	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22247	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22248	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22249
22250	if (shdr_status == STATUS_FAILED &&
22251	    shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22252		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22253				"4674 No port cfg file in FW.\n");
22254		byte_cnt = -ENOENT;
22255	} else if (shdr_status || shdr_add_status || rc) {
22256		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22257				"2625 READ_OBJECT mailbox failed with "
22258				"status x%x add_status x%x, mbx status x%x\n",
22259				shdr_status, shdr_add_status, rc);
22260		byte_cnt = -ENXIO;
22261	} else {
22262		/* Success */
22263		length = read_object->u.response.rd_object_actual_rlen;
22264		eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22265		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22266				"2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22267				length, datasz, eof);
22268
22269		/* Detect the port config file exists but is empty */
22270		if (!length && eof) {
22271			byte_cnt = 0;
22272			goto exit;
22273		}
22274
22275		byte_cnt = length;
22276		lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22277	}
22278
22279 exit:
22280	/* This is an embedded SLI4 mailbox with an external buffer allocated.
22281	 * Free the pcmd and then cleanup with the correct routine.
22282	 */
22283	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22284	kfree(pcmd);
22285	lpfc_sli4_mbox_cmd_free(phba, mbox);
22286	return byte_cnt;
22287}
22288
22289/**
22290 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22291 * @phba: The HBA for which this call is being executed.
22292 * @lpfc_buf: IO buf structure to append the SGL chunk
22293 *
22294 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22295 * and will allocate an SGL chunk if the pool is empty.
22296 *
22297 * Return codes:
22298 *   NULL - Error
22299 *   Pointer to sli4_hybrid_sgl - Success
22300 **/
22301struct sli4_hybrid_sgl *
22302lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22303{
22304	struct sli4_hybrid_sgl *list_entry = NULL;
22305	struct sli4_hybrid_sgl *tmp = NULL;
22306	struct sli4_hybrid_sgl *allocated_sgl = NULL;
22307	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22308	struct list_head *buf_list = &hdwq->sgl_list;
22309	unsigned long iflags;
22310
22311	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22312
22313	if (likely(!list_empty(buf_list))) {
22314		/* break off 1 chunk from the sgl_list */
22315		list_for_each_entry_safe(list_entry, tmp,
22316					 buf_list, list_node) {
22317			list_move_tail(&list_entry->list_node,
22318				       &lpfc_buf->dma_sgl_xtra_list);
22319			break;
22320		}
22321	} else {
22322		/* allocate more */
22323		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22324		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22325				   cpu_to_node(hdwq->io_wq->chann));
22326		if (!tmp) {
22327			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22328					"8353 error kmalloc memory for HDWQ "
22329					"%d %s\n",
22330					lpfc_buf->hdwq_no, __func__);
22331			return NULL;
22332		}
22333
22334		tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22335					      GFP_ATOMIC, &tmp->dma_phys_sgl);
22336		if (!tmp->dma_sgl) {
22337			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22338					"8354 error pool_alloc memory for HDWQ "
22339					"%d %s\n",
22340					lpfc_buf->hdwq_no, __func__);
22341			kfree(tmp);
22342			return NULL;
22343		}
22344
22345		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22346		list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22347	}
22348
22349	allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22350					struct sli4_hybrid_sgl,
22351					list_node);
22352
22353	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22354
22355	return allocated_sgl;
22356}
22357
22358/**
22359 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22360 * @phba: The HBA for which this call is being executed.
22361 * @lpfc_buf: IO buf structure with the SGL chunk
22362 *
22363 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22364 *
22365 * Return codes:
22366 *   0 - Success
22367 *   -EINVAL - Error
22368 **/
22369int
22370lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22371{
22372	int rc = 0;
22373	struct sli4_hybrid_sgl *list_entry = NULL;
22374	struct sli4_hybrid_sgl *tmp = NULL;
22375	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22376	struct list_head *buf_list = &hdwq->sgl_list;
22377	unsigned long iflags;
22378
22379	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22380
22381	if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22382		list_for_each_entry_safe(list_entry, tmp,
22383					 &lpfc_buf->dma_sgl_xtra_list,
22384					 list_node) {
22385			list_move_tail(&list_entry->list_node,
22386				       buf_list);
22387		}
22388	} else {
22389		rc = -EINVAL;
22390	}
22391
22392	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22393	return rc;
22394}
22395
22396/**
22397 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22398 * @phba: phba object
22399 * @hdwq: hdwq to cleanup sgl buff resources on
22400 *
22401 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22402 *
22403 * Return codes:
22404 *   None
22405 **/
22406void
22407lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22408		       struct lpfc_sli4_hdw_queue *hdwq)
22409{
22410	struct list_head *buf_list = &hdwq->sgl_list;
22411	struct sli4_hybrid_sgl *list_entry = NULL;
22412	struct sli4_hybrid_sgl *tmp = NULL;
22413	unsigned long iflags;
22414
22415	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22416
22417	/* Free sgl pool */
22418	list_for_each_entry_safe(list_entry, tmp,
22419				 buf_list, list_node) {
22420		list_del(&list_entry->list_node);
22421		dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22422			      list_entry->dma_sgl,
22423			      list_entry->dma_phys_sgl);
22424		kfree(list_entry);
22425	}
22426
22427	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22428}
22429
22430/**
22431 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22432 * @phba: The HBA for which this call is being executed.
22433 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22434 *
22435 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22436 * and will allocate an CMD/RSP buffer if the pool is empty.
22437 *
22438 * Return codes:
22439 *   NULL - Error
22440 *   Pointer to fcp_cmd_rsp_buf - Success
22441 **/
22442struct fcp_cmd_rsp_buf *
22443lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22444			      struct lpfc_io_buf *lpfc_buf)
22445{
22446	struct fcp_cmd_rsp_buf *list_entry = NULL;
22447	struct fcp_cmd_rsp_buf *tmp = NULL;
22448	struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22449	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22450	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22451	unsigned long iflags;
22452
22453	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22454
22455	if (likely(!list_empty(buf_list))) {
22456		/* break off 1 chunk from the list */
22457		list_for_each_entry_safe(list_entry, tmp,
22458					 buf_list,
22459					 list_node) {
22460			list_move_tail(&list_entry->list_node,
22461				       &lpfc_buf->dma_cmd_rsp_list);
22462			break;
22463		}
22464	} else {
22465		/* allocate more */
22466		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22467		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22468				   cpu_to_node(hdwq->io_wq->chann));
22469		if (!tmp) {
22470			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22471					"8355 error kmalloc memory for HDWQ "
22472					"%d %s\n",
22473					lpfc_buf->hdwq_no, __func__);
22474			return NULL;
22475		}
22476
22477		tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22478						GFP_ATOMIC,
22479						&tmp->fcp_cmd_rsp_dma_handle);
22480
22481		if (!tmp->fcp_cmnd) {
22482			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22483					"8356 error pool_alloc memory for HDWQ "
22484					"%d %s\n",
22485					lpfc_buf->hdwq_no, __func__);
22486			kfree(tmp);
22487			return NULL;
22488		}
22489
22490		tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22491				sizeof(struct fcp_cmnd));
22492
22493		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22494		list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22495	}
22496
22497	allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22498					struct fcp_cmd_rsp_buf,
22499					list_node);
22500
22501	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22502
22503	return allocated_buf;
22504}
22505
22506/**
22507 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22508 * @phba: The HBA for which this call is being executed.
22509 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22510 *
22511 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22512 *
22513 * Return codes:
22514 *   0 - Success
22515 *   -EINVAL - Error
22516 **/
22517int
22518lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22519			      struct lpfc_io_buf *lpfc_buf)
22520{
22521	int rc = 0;
22522	struct fcp_cmd_rsp_buf *list_entry = NULL;
22523	struct fcp_cmd_rsp_buf *tmp = NULL;
22524	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22525	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22526	unsigned long iflags;
22527
22528	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22529
22530	if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22531		list_for_each_entry_safe(list_entry, tmp,
22532					 &lpfc_buf->dma_cmd_rsp_list,
22533					 list_node) {
22534			list_move_tail(&list_entry->list_node,
22535				       buf_list);
22536		}
22537	} else {
22538		rc = -EINVAL;
22539	}
22540
22541	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22542	return rc;
22543}
22544
22545/**
22546 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22547 * @phba: phba object
22548 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22549 *
22550 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22551 *
22552 * Return codes:
22553 *   None
22554 **/
22555void
22556lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22557			       struct lpfc_sli4_hdw_queue *hdwq)
22558{
22559	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22560	struct fcp_cmd_rsp_buf *list_entry = NULL;
22561	struct fcp_cmd_rsp_buf *tmp = NULL;
22562	unsigned long iflags;
22563
22564	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22565
22566	/* Free cmd_rsp buf pool */
22567	list_for_each_entry_safe(list_entry, tmp,
22568				 buf_list,
22569				 list_node) {
22570		list_del(&list_entry->list_node);
22571		dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22572			      list_entry->fcp_cmnd,
22573			      list_entry->fcp_cmd_rsp_dma_handle);
22574		kfree(list_entry);
22575	}
22576
22577	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22578}
22579
22580/**
22581 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22582 * @phba: phba object
22583 * @job: job entry of the command to be posted.
22584 *
22585 * Fill the common fields of the wqe for each of the command.
22586 *
22587 * Return codes:
22588 *	None
22589 **/
22590void
22591lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22592{
22593	u8 cmnd;
22594	u32 *pcmd;
22595	u32 if_type = 0;
22596	u32 fip, abort_tag;
22597	struct lpfc_nodelist *ndlp = NULL;
22598	union lpfc_wqe128 *wqe = &job->wqe;
22599	u8 command_type = ELS_COMMAND_NON_FIP;
22600
22601	fip = phba->hba_flag & HBA_FIP_SUPPORT;
22602	/* The fcp commands will set command type */
22603	if (job->cmd_flag &  LPFC_IO_FCP)
22604		command_type = FCP_COMMAND;
22605	else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22606		command_type = ELS_COMMAND_FIP;
22607	else
22608		command_type = ELS_COMMAND_NON_FIP;
22609
22610	abort_tag = job->iotag;
22611	cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22612
22613	switch (cmnd) {
22614	case CMD_ELS_REQUEST64_WQE:
22615		ndlp = job->ndlp;
22616
22617		if_type = bf_get(lpfc_sli_intf_if_type,
22618				 &phba->sli4_hba.sli_intf);
22619		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22620			pcmd = (u32 *)job->cmd_dmabuf->virt;
22621			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22622				     *pcmd == ELS_CMD_SCR ||
22623				     *pcmd == ELS_CMD_RDF ||
22624				     *pcmd == ELS_CMD_EDC ||
22625				     *pcmd == ELS_CMD_RSCN_XMT ||
22626				     *pcmd == ELS_CMD_FDISC ||
22627				     *pcmd == ELS_CMD_LOGO ||
22628				     *pcmd == ELS_CMD_QFPA ||
22629				     *pcmd == ELS_CMD_UVEM ||
22630				     *pcmd == ELS_CMD_PLOGI)) {
22631				bf_set(els_req64_sp, &wqe->els_req, 1);
22632				bf_set(els_req64_sid, &wqe->els_req,
22633				       job->vport->fc_myDID);
22634
22635				if ((*pcmd == ELS_CMD_FLOGI) &&
22636				    !(phba->fc_topology ==
22637				      LPFC_TOPOLOGY_LOOP))
22638					bf_set(els_req64_sid, &wqe->els_req, 0);
22639
22640				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22641				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22642				       phba->vpi_ids[job->vport->vpi]);
22643			} else if (pcmd) {
22644				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22645				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22646				       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22647			}
22648		}
22649
22650		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22651		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22652
22653		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22654		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22655		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22656		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22657		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22658		break;
22659	case CMD_XMIT_ELS_RSP64_WQE:
22660		ndlp = job->ndlp;
22661
22662		/* word4 */
22663		wqe->xmit_els_rsp.word4 = 0;
22664
22665		if_type = bf_get(lpfc_sli_intf_if_type,
22666				 &phba->sli4_hba.sli_intf);
22667		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22668			if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
22669				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22670				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22671				       job->vport->fc_myDID);
22672				if (job->vport->fc_myDID == Fabric_DID) {
22673					bf_set(wqe_els_did,
22674					       &wqe->xmit_els_rsp.wqe_dest, 0);
22675				}
22676			}
22677		}
22678
22679		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22680		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22681		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22682		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22683		       LPFC_WQE_LENLOC_WORD3);
22684		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22685
22686		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22687			bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22688			bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22689			       job->vport->fc_myDID);
22690			bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22691		}
22692
22693		if (phba->sli_rev == LPFC_SLI_REV4) {
22694			bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22695			       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22696
22697			if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22698				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22699				       phba->vpi_ids[job->vport->vpi]);
22700		}
22701		command_type = OTHER_COMMAND;
22702		break;
22703	case CMD_GEN_REQUEST64_WQE:
22704		/* Word 10 */
22705		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22706		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22707		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22708		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22709		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22710		command_type = OTHER_COMMAND;
22711		break;
22712	case CMD_XMIT_SEQUENCE64_WQE:
22713		if (phba->link_flag & LS_LOOPBACK_MODE)
22714			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22715
22716		wqe->xmit_sequence.rsvd3 = 0;
22717		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22718		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22719		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22720		       LPFC_WQE_IOD_WRITE);
22721		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22722		       LPFC_WQE_LENLOC_WORD12);
22723		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22724		command_type = OTHER_COMMAND;
22725		break;
22726	case CMD_XMIT_BLS_RSP64_WQE:
22727		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22728		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22729		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22730		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22731		       phba->vpi_ids[phba->pport->vpi]);
22732		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22733		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22734		       LPFC_WQE_LENLOC_NONE);
22735		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
22736		command_type = OTHER_COMMAND;
22737		break;
22738	case CMD_FCP_ICMND64_WQE:	/* task mgmt commands */
22739	case CMD_ABORT_XRI_WQE:		/* abort iotag */
22740	case CMD_SEND_FRAME:		/* mds loopback */
22741		/* cases already formatted for sli4 wqe - no chgs necessary */
22742		return;
22743	default:
22744		dump_stack();
22745		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22746				"6207 Invalid command 0x%x\n",
22747				cmnd);
22748		break;
22749	}
22750
22751	wqe->generic.wqe_com.abort_tag = abort_tag;
22752	bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22753	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22754	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22755}
22756