1/* bnx2fc_io.c: QLogic Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
3 *
4 * Copyright (c) 2008-2013 Broadcom Corporation
5 * Copyright (c) 2014-2016 QLogic Corporation
6 * Copyright (c) 2016-2017 Cavium Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 */
14
15#include "bnx2fc.h"
16
17#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
18
19static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
20			   int bd_index);
21static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
22static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
23static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
26				 struct fcoe_fcp_rsp_payload *fcp_rsp,
27				 u8 num_rq, unsigned char *rq_data);
28
29void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30			  unsigned int timer_msec)
31{
32	struct bnx2fc_interface *interface = io_req->port->priv;
33
34	if (queue_delayed_work(interface->timer_work_queue,
35			       &io_req->timeout_work,
36			       msecs_to_jiffies(timer_msec)))
37		kref_get(&io_req->refcount);
38}
39
40static void bnx2fc_cmd_timeout(struct work_struct *work)
41{
42	struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
43						 timeout_work.work);
44	u8 cmd_type = io_req->cmd_type;
45	struct bnx2fc_rport *tgt = io_req->tgt;
46	int rc;
47
48	BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
49		      "req_flags = %lx\n", cmd_type, io_req->req_flags);
50
51	spin_lock_bh(&tgt->tgt_lock);
52	if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
53		clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
54		/*
55		 * ideally we should hold the io_req until RRQ complets,
56		 * and release io_req from timeout hold.
57		 */
58		spin_unlock_bh(&tgt->tgt_lock);
59		bnx2fc_send_rrq(io_req);
60		return;
61	}
62	if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
63		BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
64		goto done;
65	}
66
67	switch (cmd_type) {
68	case BNX2FC_SCSI_CMD:
69		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
70							&io_req->req_flags)) {
71			/* Handle eh_abort timeout */
72			BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
73			complete(&io_req->abts_done);
74		} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
75				    &io_req->req_flags)) {
76			/* Handle internally generated ABTS timeout */
77			BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
78					kref_read(&io_req->refcount));
79			if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
80					       &io_req->req_flags))) {
81				/*
82				 * Cleanup and return original command to
83				 * mid-layer.
84				 */
85				bnx2fc_initiate_cleanup(io_req);
86				kref_put(&io_req->refcount, bnx2fc_cmd_release);
87				spin_unlock_bh(&tgt->tgt_lock);
88
89				return;
90			}
91		} else {
92			/* Hanlde IO timeout */
93			BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
94			if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
95					     &io_req->req_flags)) {
96				BNX2FC_IO_DBG(io_req, "IO completed before "
97							   " timer expiry\n");
98				goto done;
99			}
100
101			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
102					      &io_req->req_flags)) {
103				rc = bnx2fc_initiate_abts(io_req);
104				if (rc == SUCCESS)
105					goto done;
106
107				kref_put(&io_req->refcount, bnx2fc_cmd_release);
108				spin_unlock_bh(&tgt->tgt_lock);
109
110				return;
111			} else {
112				BNX2FC_IO_DBG(io_req, "IO already in "
113						      "ABTS processing\n");
114			}
115		}
116		break;
117	case BNX2FC_ELS:
118
119		if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
120			BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
121
122			if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
123					      &io_req->req_flags)) {
124				kref_put(&io_req->refcount, bnx2fc_cmd_release);
125				spin_unlock_bh(&tgt->tgt_lock);
126
127				return;
128			}
129		} else {
130			/*
131			 * Handle ELS timeout.
132			 * tgt_lock is used to sync compl path and timeout
133			 * path. If els compl path is processing this IO, we
134			 * have nothing to do here, just release the timer hold
135			 */
136			BNX2FC_IO_DBG(io_req, "ELS timed out\n");
137			if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
138					       &io_req->req_flags))
139				goto done;
140
141			/* Indicate the cb_func that this ELS is timed out */
142			set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
143
144			if ((io_req->cb_func) && (io_req->cb_arg)) {
145				io_req->cb_func(io_req->cb_arg);
146				io_req->cb_arg = NULL;
147			}
148		}
149		break;
150	default:
151		printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
152			cmd_type);
153		break;
154	}
155
156done:
157	/* release the cmd that was held when timer was set */
158	kref_put(&io_req->refcount, bnx2fc_cmd_release);
159	spin_unlock_bh(&tgt->tgt_lock);
160}
161
162static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
163{
164	/* Called with host lock held */
165	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
166
167	/*
168	 * active_cmd_queue may have other command types as well,
169	 * and during flush operation,  we want to error back only
170	 * scsi commands.
171	 */
172	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
173		return;
174
175	BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
176	if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
177		/* Do not call scsi done for this IO */
178		return;
179	}
180
181	bnx2fc_unmap_sg_list(io_req);
182	io_req->sc_cmd = NULL;
183
184	/* Sanity checks before returning command to mid-layer */
185	if (!sc_cmd) {
186		printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
187				    "IO(0x%x) already cleaned up\n",
188		       io_req->xid);
189		return;
190	}
191	if (!sc_cmd->device) {
192		pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid);
193		return;
194	}
195	if (!sc_cmd->device->host) {
196		pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n",
197		    io_req->xid);
198		return;
199	}
200
201	sc_cmd->result = err_code << 16;
202
203	BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
204		sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
205		sc_cmd->allowed);
206	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
207	bnx2fc_priv(sc_cmd)->io_req = NULL;
208	scsi_done(sc_cmd);
209}
210
211struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
212{
213	struct bnx2fc_cmd_mgr *cmgr;
214	struct io_bdt *bdt_info;
215	struct bnx2fc_cmd *io_req;
216	size_t len;
217	u32 mem_size;
218	u16 xid;
219	int i;
220	int num_ios, num_pri_ios;
221	size_t bd_tbl_sz;
222	int arr_sz = num_possible_cpus() + 1;
223	u16 min_xid = BNX2FC_MIN_XID;
224	u16 max_xid = hba->max_xid;
225
226	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
227		printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
228					and max_xid 0x%x\n", min_xid, max_xid);
229		return NULL;
230	}
231	BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
232
233	num_ios = max_xid - min_xid + 1;
234	len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
235	len += sizeof(struct bnx2fc_cmd_mgr);
236
237	cmgr = kzalloc(len, GFP_KERNEL);
238	if (!cmgr) {
239		printk(KERN_ERR PFX "failed to alloc cmgr\n");
240		return NULL;
241	}
242
243	cmgr->hba = hba;
244	cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
245				  GFP_KERNEL);
246	if (!cmgr->free_list) {
247		printk(KERN_ERR PFX "failed to alloc free_list\n");
248		goto mem_err;
249	}
250
251	cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock),
252				       GFP_KERNEL);
253	if (!cmgr->free_list_lock) {
254		printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
255		kfree(cmgr->free_list);
256		cmgr->free_list = NULL;
257		goto mem_err;
258	}
259
260	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261
262	for (i = 0; i < arr_sz; i++)  {
263		INIT_LIST_HEAD(&cmgr->free_list[i]);
264		spin_lock_init(&cmgr->free_list_lock[i]);
265	}
266
267	/*
268	 * Pre-allocated pool of bnx2fc_cmds.
269	 * Last entry in the free list array is the free list
270	 * of slow path requests.
271	 */
272	xid = BNX2FC_MIN_XID;
273	num_pri_ios = num_ios - hba->elstm_xids;
274	for (i = 0; i < num_ios; i++) {
275		io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
276
277		if (!io_req) {
278			printk(KERN_ERR PFX "failed to alloc io_req\n");
279			goto mem_err;
280		}
281
282		INIT_LIST_HEAD(&io_req->link);
283		INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
284
285		io_req->xid = xid++;
286		if (i < num_pri_ios)
287			list_add_tail(&io_req->link,
288				&cmgr->free_list[io_req->xid %
289						 num_possible_cpus()]);
290		else
291			list_add_tail(&io_req->link,
292				&cmgr->free_list[num_possible_cpus()]);
293		io_req++;
294	}
295
296	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297	mem_size = num_ios * sizeof(struct io_bdt *);
298	cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299	if (!cmgr->io_bdt_pool) {
300		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301		goto mem_err;
302	}
303
304	mem_size = sizeof(struct io_bdt);
305	for (i = 0; i < num_ios; i++) {
306		cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
307		if (!cmgr->io_bdt_pool[i]) {
308			printk(KERN_ERR PFX "failed to alloc "
309				"io_bdt_pool[%d]\n", i);
310			goto mem_err;
311		}
312	}
313
314	/* Allocate an map fcoe_bdt_ctx structures */
315	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
316	for (i = 0; i < num_ios; i++) {
317		bdt_info = cmgr->io_bdt_pool[i];
318		bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
319						      bd_tbl_sz,
320						      &bdt_info->bd_tbl_dma,
321						      GFP_KERNEL);
322		if (!bdt_info->bd_tbl) {
323			printk(KERN_ERR PFX "failed to alloc "
324				"bdt_tbl[%d]\n", i);
325			goto mem_err;
326		}
327	}
328
329	return cmgr;
330
331mem_err:
332	bnx2fc_cmd_mgr_free(cmgr);
333	return NULL;
334}
335
336void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
337{
338	struct io_bdt *bdt_info;
339	struct bnx2fc_hba *hba = cmgr->hba;
340	size_t bd_tbl_sz;
341	u16 min_xid = BNX2FC_MIN_XID;
342	u16 max_xid = hba->max_xid;
343	int num_ios;
344	int i;
345
346	num_ios = max_xid - min_xid + 1;
347
348	/* Free fcoe_bdt_ctx structures */
349	if (!cmgr->io_bdt_pool)
350		goto free_cmd_pool;
351
352	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
353	for (i = 0; i < num_ios; i++) {
354		bdt_info = cmgr->io_bdt_pool[i];
355		if (bdt_info->bd_tbl) {
356			dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
357					    bdt_info->bd_tbl,
358					    bdt_info->bd_tbl_dma);
359			bdt_info->bd_tbl = NULL;
360		}
361	}
362
363	/* Destroy io_bdt pool */
364	for (i = 0; i < num_ios; i++) {
365		kfree(cmgr->io_bdt_pool[i]);
366		cmgr->io_bdt_pool[i] = NULL;
367	}
368
369	kfree(cmgr->io_bdt_pool);
370	cmgr->io_bdt_pool = NULL;
371
372free_cmd_pool:
373	kfree(cmgr->free_list_lock);
374
375	/* Destroy cmd pool */
376	if (!cmgr->free_list)
377		goto free_cmgr;
378
379	for (i = 0; i < num_possible_cpus() + 1; i++)  {
380		struct bnx2fc_cmd *tmp, *io_req;
381
382		list_for_each_entry_safe(io_req, tmp,
383					 &cmgr->free_list[i], link) {
384			list_del(&io_req->link);
385			kfree(io_req);
386		}
387	}
388	kfree(cmgr->free_list);
389free_cmgr:
390	/* Free command manager itself */
391	kfree(cmgr);
392}
393
394struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
395{
396	struct fcoe_port *port = tgt->port;
397	struct bnx2fc_interface *interface = port->priv;
398	struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
399	struct bnx2fc_cmd *io_req;
400	struct list_head *listp;
401	struct io_bdt *bd_tbl;
402	int index = RESERVE_FREE_LIST_INDEX;
403	u32 free_sqes;
404	u32 max_sqes;
405	u16 xid;
406
407	max_sqes = tgt->max_sqes;
408	switch (type) {
409	case BNX2FC_TASK_MGMT_CMD:
410		max_sqes = BNX2FC_TM_MAX_SQES;
411		break;
412	case BNX2FC_ELS:
413		max_sqes = BNX2FC_ELS_MAX_SQES;
414		break;
415	default:
416		break;
417	}
418
419	/*
420	 * NOTE: Free list insertions and deletions are protected with
421	 * cmgr lock
422	 */
423	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
424	free_sqes = atomic_read(&tgt->free_sqes);
425	if ((list_empty(&(cmd_mgr->free_list[index]))) ||
426	    (tgt->num_active_ios.counter  >= max_sqes) ||
427	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
428		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
429			"ios(%d):sqes(%d)\n",
430			tgt->num_active_ios.counter, tgt->max_sqes);
431		if (list_empty(&(cmd_mgr->free_list[index])))
432			printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
433		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
434		return NULL;
435	}
436
437	listp = (struct list_head *)
438			cmd_mgr->free_list[index].next;
439	list_del_init(listp);
440	io_req = (struct bnx2fc_cmd *) listp;
441	xid = io_req->xid;
442	cmd_mgr->cmds[xid] = io_req;
443	atomic_inc(&tgt->num_active_ios);
444	atomic_dec(&tgt->free_sqes);
445	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
446
447	INIT_LIST_HEAD(&io_req->link);
448
449	io_req->port = port;
450	io_req->cmd_mgr = cmd_mgr;
451	io_req->req_flags = 0;
452	io_req->cmd_type = type;
453
454	/* Bind io_bdt for this io_req */
455	/* Have a static link between io_req and io_bdt_pool */
456	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
457	bd_tbl->io_req = io_req;
458
459	/* Hold the io_req  against deletion */
460	kref_init(&io_req->refcount);
461	return io_req;
462}
463
464struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
465{
466	struct fcoe_port *port = tgt->port;
467	struct bnx2fc_interface *interface = port->priv;
468	struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
469	struct bnx2fc_cmd *io_req;
470	struct list_head *listp;
471	struct io_bdt *bd_tbl;
472	u32 free_sqes;
473	u32 max_sqes;
474	u16 xid;
475	int index = raw_smp_processor_id();
476
477	max_sqes = BNX2FC_SCSI_MAX_SQES;
478	/*
479	 * NOTE: Free list insertions and deletions are protected with
480	 * cmgr lock
481	 */
482	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
483	free_sqes = atomic_read(&tgt->free_sqes);
484	if ((list_empty(&cmd_mgr->free_list[index])) ||
485	    (tgt->num_active_ios.counter  >= max_sqes) ||
486	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
487		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
488		return NULL;
489	}
490
491	listp = (struct list_head *)
492		cmd_mgr->free_list[index].next;
493	list_del_init(listp);
494	io_req = (struct bnx2fc_cmd *) listp;
495	xid = io_req->xid;
496	cmd_mgr->cmds[xid] = io_req;
497	atomic_inc(&tgt->num_active_ios);
498	atomic_dec(&tgt->free_sqes);
499	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
500
501	INIT_LIST_HEAD(&io_req->link);
502
503	io_req->port = port;
504	io_req->cmd_mgr = cmd_mgr;
505	io_req->req_flags = 0;
506
507	/* Bind io_bdt for this io_req */
508	/* Have a static link between io_req and io_bdt_pool */
509	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
510	bd_tbl->io_req = io_req;
511
512	/* Hold the io_req  against deletion */
513	kref_init(&io_req->refcount);
514	return io_req;
515}
516
517void bnx2fc_cmd_release(struct kref *ref)
518{
519	struct bnx2fc_cmd *io_req = container_of(ref,
520						struct bnx2fc_cmd, refcount);
521	struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
522	int index;
523
524	if (io_req->cmd_type == BNX2FC_SCSI_CMD)
525		index = io_req->xid % num_possible_cpus();
526	else
527		index = RESERVE_FREE_LIST_INDEX;
528
529
530	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
531	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
532		bnx2fc_free_mp_resc(io_req);
533	cmd_mgr->cmds[io_req->xid] = NULL;
534	/* Delete IO from retire queue */
535	list_del_init(&io_req->link);
536	/* Add it to the free list */
537	list_add(&io_req->link,
538			&cmd_mgr->free_list[index]);
539	atomic_dec(&io_req->tgt->num_active_ios);
540	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
541
542}
543
544static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
545{
546	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
547	struct bnx2fc_interface *interface = io_req->port->priv;
548	struct bnx2fc_hba *hba = interface->hba;
549	size_t sz = sizeof(struct fcoe_bd_ctx);
550
551	/* clear tm flags */
552	mp_req->tm_flags = 0;
553	if (mp_req->mp_req_bd) {
554		dma_free_coherent(&hba->pcidev->dev, sz,
555				     mp_req->mp_req_bd,
556				     mp_req->mp_req_bd_dma);
557		mp_req->mp_req_bd = NULL;
558	}
559	if (mp_req->mp_resp_bd) {
560		dma_free_coherent(&hba->pcidev->dev, sz,
561				     mp_req->mp_resp_bd,
562				     mp_req->mp_resp_bd_dma);
563		mp_req->mp_resp_bd = NULL;
564	}
565	if (mp_req->req_buf) {
566		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
567				     mp_req->req_buf,
568				     mp_req->req_buf_dma);
569		mp_req->req_buf = NULL;
570	}
571	if (mp_req->resp_buf) {
572		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
573				     mp_req->resp_buf,
574				     mp_req->resp_buf_dma);
575		mp_req->resp_buf = NULL;
576	}
577}
578
579int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
580{
581	struct bnx2fc_mp_req *mp_req;
582	struct fcoe_bd_ctx *mp_req_bd;
583	struct fcoe_bd_ctx *mp_resp_bd;
584	struct bnx2fc_interface *interface = io_req->port->priv;
585	struct bnx2fc_hba *hba = interface->hba;
586	dma_addr_t addr;
587	size_t sz;
588
589	mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
590	memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
591
592	if (io_req->cmd_type != BNX2FC_ELS) {
593		mp_req->req_len = sizeof(struct fcp_cmnd);
594		io_req->data_xfer_len = mp_req->req_len;
595	} else
596		mp_req->req_len = io_req->data_xfer_len;
597
598	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
599					     &mp_req->req_buf_dma,
600					     GFP_ATOMIC);
601	if (!mp_req->req_buf) {
602		printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
603		bnx2fc_free_mp_resc(io_req);
604		return FAILED;
605	}
606
607	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
608					      &mp_req->resp_buf_dma,
609					      GFP_ATOMIC);
610	if (!mp_req->resp_buf) {
611		printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
612		bnx2fc_free_mp_resc(io_req);
613		return FAILED;
614	}
615	memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
616	memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
617
618	/* Allocate and map mp_req_bd and mp_resp_bd */
619	sz = sizeof(struct fcoe_bd_ctx);
620	mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
621						 &mp_req->mp_req_bd_dma,
622						 GFP_ATOMIC);
623	if (!mp_req->mp_req_bd) {
624		printk(KERN_ERR PFX "unable to alloc MP req bd\n");
625		bnx2fc_free_mp_resc(io_req);
626		return FAILED;
627	}
628	mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
629						 &mp_req->mp_resp_bd_dma,
630						 GFP_ATOMIC);
631	if (!mp_req->mp_resp_bd) {
632		printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
633		bnx2fc_free_mp_resc(io_req);
634		return FAILED;
635	}
636	/* Fill bd table */
637	addr = mp_req->req_buf_dma;
638	mp_req_bd = mp_req->mp_req_bd;
639	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
640	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
641	mp_req_bd->buf_len = CNIC_PAGE_SIZE;
642	mp_req_bd->flags = 0;
643
644	/*
645	 * MP buffer is either a task mgmt command or an ELS.
646	 * So the assumption is that it consumes a single bd
647	 * entry in the bd table
648	 */
649	mp_resp_bd = mp_req->mp_resp_bd;
650	addr = mp_req->resp_buf_dma;
651	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
652	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
653	mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
654	mp_resp_bd->flags = 0;
655
656	return SUCCESS;
657}
658
659static int bnx2fc_initiate_tmf(struct fc_lport *lport, struct fc_rport *rport,
660			       u64 tm_lun, u8 tm_flags)
661{
662	struct fc_rport_libfc_priv *rp;
663	struct fcoe_port *port;
664	struct bnx2fc_interface *interface;
665	struct bnx2fc_rport *tgt;
666	struct bnx2fc_cmd *io_req;
667	struct bnx2fc_mp_req *tm_req;
668	struct fcoe_task_ctx_entry *task;
669	struct fcoe_task_ctx_entry *task_page;
670	struct fc_frame_header *fc_hdr;
671	struct fcp_cmnd *fcp_cmnd;
672	int task_idx, index;
673	int rc = SUCCESS;
674	u16 xid;
675	u32 sid, did;
676	unsigned long start = jiffies;
677
678	port = lport_priv(lport);
679	interface = port->priv;
680
681	if (rport == NULL) {
682		printk(KERN_ERR PFX "device_reset: rport is NULL\n");
683		rc = FAILED;
684		goto tmf_err;
685	}
686	rp = rport->dd_data;
687
688	rc = fc_block_rport(rport);
689	if (rc)
690		return rc;
691
692	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
693		printk(KERN_ERR PFX "device_reset: link is not ready\n");
694		rc = FAILED;
695		goto tmf_err;
696	}
697	/* rport and tgt are allocated together, so tgt should be non-NULL */
698	tgt = (struct bnx2fc_rport *)&rp[1];
699
700	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
701		printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
702		rc = FAILED;
703		goto tmf_err;
704	}
705retry_tmf:
706	io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
707	if (!io_req) {
708		if (time_after(jiffies, start + HZ)) {
709			printk(KERN_ERR PFX "tmf: Failed TMF");
710			rc = FAILED;
711			goto tmf_err;
712		}
713		msleep(20);
714		goto retry_tmf;
715	}
716	/* Initialize rest of io_req fields */
717	io_req->sc_cmd = NULL;
718	io_req->port = port;
719	io_req->tgt = tgt;
720
721	tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
722
723	rc = bnx2fc_init_mp_req(io_req);
724	if (rc == FAILED) {
725		printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
726		spin_lock_bh(&tgt->tgt_lock);
727		kref_put(&io_req->refcount, bnx2fc_cmd_release);
728		spin_unlock_bh(&tgt->tgt_lock);
729		goto tmf_err;
730	}
731
732	/* Set TM flags */
733	io_req->io_req_flags = 0;
734	tm_req->tm_flags = tm_flags;
735	tm_req->tm_lun = tm_lun;
736
737	/* Fill FCP_CMND */
738	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
739	fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
740	int_to_scsilun(tm_lun, &fcp_cmnd->fc_lun);
741	memset(fcp_cmnd->fc_cdb, 0,  BNX2FC_MAX_CMD_LEN);
742	fcp_cmnd->fc_dl = 0;
743
744	/* Fill FC header */
745	fc_hdr = &(tm_req->req_fc_hdr);
746	sid = tgt->sid;
747	did = rport->port_id;
748	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
749			   FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
750			   FC_FC_SEQ_INIT, 0);
751	/* Obtain exchange id */
752	xid = io_req->xid;
753
754	BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
755	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
756	index = xid % BNX2FC_TASKS_PER_PAGE;
757
758	/* Initialize task context for this IO request */
759	task_page = (struct fcoe_task_ctx_entry *)
760			interface->hba->task_ctx[task_idx];
761	task = &(task_page[index]);
762	bnx2fc_init_mp_task(io_req, task);
763
764	/* Obtain free SQ entry */
765	spin_lock_bh(&tgt->tgt_lock);
766	bnx2fc_add_2_sq(tgt, xid);
767
768	/* Enqueue the io_req to active_tm_queue */
769	io_req->on_tmf_queue = 1;
770	list_add_tail(&io_req->link, &tgt->active_tm_queue);
771
772	init_completion(&io_req->abts_done);
773	io_req->wait_for_abts_comp = 1;
774
775	/* Ring doorbell */
776	bnx2fc_ring_doorbell(tgt);
777	spin_unlock_bh(&tgt->tgt_lock);
778
779	rc = wait_for_completion_timeout(&io_req->abts_done,
780					 interface->tm_timeout * HZ);
781	spin_lock_bh(&tgt->tgt_lock);
782
783	io_req->wait_for_abts_comp = 0;
784	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
785		set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
786		if (io_req->on_tmf_queue) {
787			list_del_init(&io_req->link);
788			io_req->on_tmf_queue = 0;
789		}
790		io_req->wait_for_cleanup_comp = 1;
791		init_completion(&io_req->cleanup_done);
792		bnx2fc_initiate_cleanup(io_req);
793		spin_unlock_bh(&tgt->tgt_lock);
794		rc = wait_for_completion_timeout(&io_req->cleanup_done,
795						 BNX2FC_FW_TIMEOUT);
796		spin_lock_bh(&tgt->tgt_lock);
797		io_req->wait_for_cleanup_comp = 0;
798		if (!rc)
799			kref_put(&io_req->refcount, bnx2fc_cmd_release);
800	}
801
802	spin_unlock_bh(&tgt->tgt_lock);
803
804	if (!rc) {
805		BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
806		rc = FAILED;
807	} else {
808		BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
809		rc = SUCCESS;
810	}
811tmf_err:
812	return rc;
813}
814
815int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
816{
817	struct fc_lport *lport;
818	struct bnx2fc_rport *tgt = io_req->tgt;
819	struct fc_rport *rport = tgt->rport;
820	struct fc_rport_priv *rdata = tgt->rdata;
821	struct bnx2fc_interface *interface;
822	struct fcoe_port *port;
823	struct bnx2fc_cmd *abts_io_req;
824	struct fcoe_task_ctx_entry *task;
825	struct fcoe_task_ctx_entry *task_page;
826	struct fc_frame_header *fc_hdr;
827	struct bnx2fc_mp_req *abts_req;
828	int task_idx, index;
829	u32 sid, did;
830	u16 xid;
831	int rc = SUCCESS;
832	u32 r_a_tov = rdata->r_a_tov;
833
834	/* called with tgt_lock held */
835	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
836
837	port = io_req->port;
838	interface = port->priv;
839	lport = port->lport;
840
841	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
842		printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
843		rc = FAILED;
844		goto abts_err;
845	}
846
847	if (rport == NULL) {
848		printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
849		rc = FAILED;
850		goto abts_err;
851	}
852
853	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
854		printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
855		rc = FAILED;
856		goto abts_err;
857	}
858
859	abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
860	if (!abts_io_req) {
861		printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
862		rc = FAILED;
863		goto abts_err;
864	}
865
866	/* Initialize rest of io_req fields */
867	abts_io_req->sc_cmd = NULL;
868	abts_io_req->port = port;
869	abts_io_req->tgt = tgt;
870	abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
871
872	abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
873	memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
874
875	/* Fill FC header */
876	fc_hdr = &(abts_req->req_fc_hdr);
877
878	/* Obtain oxid and rxid for the original exchange to be aborted */
879	fc_hdr->fh_ox_id = htons(io_req->xid);
880	fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
881
882	sid = tgt->sid;
883	did = rport->port_id;
884
885	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
886			   FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
887			   FC_FC_SEQ_INIT, 0);
888
889	xid = abts_io_req->xid;
890	BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
891	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
892	index = xid % BNX2FC_TASKS_PER_PAGE;
893
894	/* Initialize task context for this IO request */
895	task_page = (struct fcoe_task_ctx_entry *)
896			interface->hba->task_ctx[task_idx];
897	task = &(task_page[index]);
898	bnx2fc_init_mp_task(abts_io_req, task);
899
900	/*
901	 * ABTS task is a temporary task that will be cleaned up
902	 * irrespective of ABTS response. We need to start the timer
903	 * for the original exchange, as the CQE is posted for the original
904	 * IO request.
905	 *
906	 * Timer for ABTS is started only when it is originated by a
907	 * TM request. For the ABTS issued as part of ULP timeout,
908	 * scsi-ml maintains the timers.
909	 */
910
911	/* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
912	bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
913
914	/* Obtain free SQ entry */
915	bnx2fc_add_2_sq(tgt, xid);
916
917	/* Ring doorbell */
918	bnx2fc_ring_doorbell(tgt);
919
920abts_err:
921	return rc;
922}
923
924int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
925				enum fc_rctl r_ctl)
926{
927	struct bnx2fc_rport *tgt = orig_io_req->tgt;
928	struct bnx2fc_interface *interface;
929	struct fcoe_port *port;
930	struct bnx2fc_cmd *seq_clnp_req;
931	struct fcoe_task_ctx_entry *task;
932	struct fcoe_task_ctx_entry *task_page;
933	struct bnx2fc_els_cb_arg *cb_arg = NULL;
934	int task_idx, index;
935	u16 xid;
936	int rc = 0;
937
938	BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
939		   orig_io_req->xid);
940	kref_get(&orig_io_req->refcount);
941
942	port = orig_io_req->port;
943	interface = port->priv;
944
945	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
946	if (!cb_arg) {
947		printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
948		rc = -ENOMEM;
949		goto cleanup_err;
950	}
951
952	seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
953	if (!seq_clnp_req) {
954		printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
955		rc = -ENOMEM;
956		kfree(cb_arg);
957		goto cleanup_err;
958	}
959	/* Initialize rest of io_req fields */
960	seq_clnp_req->sc_cmd = NULL;
961	seq_clnp_req->port = port;
962	seq_clnp_req->tgt = tgt;
963	seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
964
965	xid = seq_clnp_req->xid;
966
967	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
968	index = xid % BNX2FC_TASKS_PER_PAGE;
969
970	/* Initialize task context for this IO request */
971	task_page = (struct fcoe_task_ctx_entry *)
972		     interface->hba->task_ctx[task_idx];
973	task = &(task_page[index]);
974	cb_arg->aborted_io_req = orig_io_req;
975	cb_arg->io_req = seq_clnp_req;
976	cb_arg->r_ctl = r_ctl;
977	cb_arg->offset = offset;
978	seq_clnp_req->cb_arg = cb_arg;
979
980	printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
981	bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
982
983	/* Obtain free SQ entry */
984	bnx2fc_add_2_sq(tgt, xid);
985
986	/* Ring doorbell */
987	bnx2fc_ring_doorbell(tgt);
988cleanup_err:
989	return rc;
990}
991
992int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
993{
994	struct bnx2fc_rport *tgt = io_req->tgt;
995	struct bnx2fc_interface *interface;
996	struct fcoe_port *port;
997	struct bnx2fc_cmd *cleanup_io_req;
998	struct fcoe_task_ctx_entry *task;
999	struct fcoe_task_ctx_entry *task_page;
1000	int task_idx, index;
1001	u16 xid, orig_xid;
1002	int rc = 0;
1003
1004	/* ASSUMPTION: called with tgt_lock held */
1005	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1006
1007	port = io_req->port;
1008	interface = port->priv;
1009
1010	cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1011	if (!cleanup_io_req) {
1012		printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
1013		rc = -1;
1014		goto cleanup_err;
1015	}
1016
1017	/* Initialize rest of io_req fields */
1018	cleanup_io_req->sc_cmd = NULL;
1019	cleanup_io_req->port = port;
1020	cleanup_io_req->tgt = tgt;
1021	cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1022
1023	xid = cleanup_io_req->xid;
1024
1025	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1026	index = xid % BNX2FC_TASKS_PER_PAGE;
1027
1028	/* Initialize task context for this IO request */
1029	task_page = (struct fcoe_task_ctx_entry *)
1030			interface->hba->task_ctx[task_idx];
1031	task = &(task_page[index]);
1032	orig_xid = io_req->xid;
1033
1034	BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1035
1036	bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1037
1038	/* Obtain free SQ entry */
1039	bnx2fc_add_2_sq(tgt, xid);
1040
1041	/* Set flag that cleanup request is pending with the firmware */
1042	set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1043
1044	/* Ring doorbell */
1045	bnx2fc_ring_doorbell(tgt);
1046
1047cleanup_err:
1048	return rc;
1049}
1050
1051/**
1052 * bnx2fc_eh_target_reset: Reset a target
1053 *
1054 * @sc_cmd:	SCSI command
1055 *
1056 * Set from SCSI host template to send task mgmt command to the target
1057 *	and wait for the response
1058 */
1059int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
1060{
1061	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1062	struct fc_lport *lport = shost_priv(rport_to_shost(rport));
1063
1064	return bnx2fc_initiate_tmf(lport, rport, 0, FCP_TMF_TGT_RESET);
1065}
1066
1067/**
1068 * bnx2fc_eh_device_reset - Reset a single LUN
1069 *
1070 * @sc_cmd:	SCSI command
1071 *
1072 * Set from SCSI host template to send task mgmt command to the target
1073 *	and wait for the response
1074 */
1075int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1076{
1077	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1078	struct fc_lport *lport = shost_priv(rport_to_shost(rport));
1079
1080	return bnx2fc_initiate_tmf(lport, rport, sc_cmd->device->lun,
1081				   FCP_TMF_LUN_RESET);
1082}
1083
1084static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
1085	__must_hold(&tgt->tgt_lock)
1086{
1087	struct bnx2fc_rport *tgt = io_req->tgt;
1088	unsigned int time_left;
1089
1090	init_completion(&io_req->cleanup_done);
1091	io_req->wait_for_cleanup_comp = 1;
1092	bnx2fc_initiate_cleanup(io_req);
1093
1094	spin_unlock_bh(&tgt->tgt_lock);
1095
1096	/*
1097	 * Can't wait forever on cleanup response lest we let the SCSI error
1098	 * handler wait forever
1099	 */
1100	time_left = wait_for_completion_timeout(&io_req->cleanup_done,
1101						BNX2FC_FW_TIMEOUT);
1102	if (!time_left) {
1103		BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
1104			      __func__);
1105
1106		/*
1107		 * Put the extra reference to the SCSI command since it would
1108		 * not have been returned in this case.
1109		 */
1110		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1111	}
1112
1113	spin_lock_bh(&tgt->tgt_lock);
1114	io_req->wait_for_cleanup_comp = 0;
1115	return SUCCESS;
1116}
1117
1118/**
1119 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1120 *			SCSI command
1121 *
1122 * @sc_cmd:	SCSI_ML command pointer
1123 *
1124 * SCSI abort request handler
1125 */
1126int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1127{
1128	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1129	struct fc_rport_libfc_priv *rp = rport->dd_data;
1130	struct bnx2fc_cmd *io_req;
1131	struct fc_lport *lport;
1132	struct bnx2fc_rport *tgt;
1133	int rc;
1134	unsigned int time_left;
1135
1136	rc = fc_block_scsi_eh(sc_cmd);
1137	if (rc)
1138		return rc;
1139
1140	lport = shost_priv(sc_cmd->device->host);
1141	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1142		printk(KERN_ERR PFX "eh_abort: link not ready\n");
1143		return FAILED;
1144	}
1145
1146	tgt = (struct bnx2fc_rport *)&rp[1];
1147
1148	BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1149
1150	spin_lock_bh(&tgt->tgt_lock);
1151	io_req = bnx2fc_priv(sc_cmd)->io_req;
1152	if (!io_req) {
1153		/* Command might have just completed */
1154		printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1155		spin_unlock_bh(&tgt->tgt_lock);
1156		return SUCCESS;
1157	}
1158	BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1159		      kref_read(&io_req->refcount));
1160
1161	/* Hold IO request across abort processing */
1162	kref_get(&io_req->refcount);
1163
1164	BUG_ON(tgt != io_req->tgt);
1165
1166	/* Remove the io_req from the active_q. */
1167	/*
1168	 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1169	 * issue an ABTS on this particular IO req, as the
1170	 * io_req is no longer in the active_q.
1171	 */
1172	if (tgt->flush_in_prog) {
1173		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1174			"flush in progress\n", io_req->xid);
1175		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1176		spin_unlock_bh(&tgt->tgt_lock);
1177		return SUCCESS;
1178	}
1179
1180	if (io_req->on_active_queue == 0) {
1181		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1182				"not on active_q\n", io_req->xid);
1183		/*
1184		 * The IO is still with the FW.
1185		 * Return failure and let SCSI-ml retry eh_abort.
1186		 */
1187		spin_unlock_bh(&tgt->tgt_lock);
1188		return FAILED;
1189	}
1190
1191	/*
1192	 * Only eh_abort processing will remove the IO from
1193	 * active_cmd_q before processing the request. this is
1194	 * done to avoid race conditions between IOs aborted
1195	 * as part of task management completion and eh_abort
1196	 * processing
1197	 */
1198	list_del_init(&io_req->link);
1199	io_req->on_active_queue = 0;
1200	/* Move IO req to retire queue */
1201	list_add_tail(&io_req->link, &tgt->io_retire_queue);
1202
1203	init_completion(&io_req->abts_done);
1204	init_completion(&io_req->cleanup_done);
1205
1206	if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1207		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1208				"already in abts processing\n", io_req->xid);
1209		if (cancel_delayed_work(&io_req->timeout_work))
1210			kref_put(&io_req->refcount,
1211				 bnx2fc_cmd_release); /* drop timer hold */
1212		/*
1213		 * We don't want to hold off the upper layer timer so simply
1214		 * cleanup the command and return that I/O was successfully
1215		 * aborted.
1216		 */
1217		bnx2fc_abts_cleanup(io_req);
1218		/* This only occurs when an task abort was requested while ABTS
1219		   is in progress.  Setting the IO_CLEANUP flag will skip the
1220		   RRQ process in the case when the fw generated SCSI_CMD cmpl
1221		   was a result from the ABTS request rather than the CLEANUP
1222		   request */
1223		set_bit(BNX2FC_FLAG_IO_CLEANUP,	&io_req->req_flags);
1224		rc = FAILED;
1225		goto done;
1226	}
1227
1228	/* Cancel the current timer running on this io_req */
1229	if (cancel_delayed_work(&io_req->timeout_work))
1230		kref_put(&io_req->refcount,
1231			 bnx2fc_cmd_release); /* drop timer hold */
1232	set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1233	io_req->wait_for_abts_comp = 1;
1234	rc = bnx2fc_initiate_abts(io_req);
1235	if (rc == FAILED) {
1236		io_req->wait_for_cleanup_comp = 1;
1237		bnx2fc_initiate_cleanup(io_req);
1238		spin_unlock_bh(&tgt->tgt_lock);
1239		wait_for_completion(&io_req->cleanup_done);
1240		spin_lock_bh(&tgt->tgt_lock);
1241		io_req->wait_for_cleanup_comp = 0;
1242		goto done;
1243	}
1244	spin_unlock_bh(&tgt->tgt_lock);
1245
1246	/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
1247	time_left = wait_for_completion_timeout(&io_req->abts_done,
1248					msecs_to_jiffies(2 * rp->r_a_tov + 1));
1249	if (time_left)
1250		BNX2FC_IO_DBG(io_req,
1251			      "Timed out in eh_abort waiting for abts_done");
1252
1253	spin_lock_bh(&tgt->tgt_lock);
1254	io_req->wait_for_abts_comp = 0;
1255	if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1256		BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
1257		rc = SUCCESS;
1258	} else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1259				      &io_req->req_flags))) {
1260		/* Let the scsi-ml try to recover this command */
1261		printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1262		       io_req->xid);
1263		/*
1264		 * Cleanup firmware residuals before returning control back
1265		 * to SCSI ML.
1266		 */
1267		rc = bnx2fc_abts_cleanup(io_req);
1268		goto done;
1269	} else {
1270		/*
1271		 * We come here even when there was a race condition
1272		 * between timeout and abts completion, and abts
1273		 * completion happens just in time.
1274		 */
1275		BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1276		rc = SUCCESS;
1277		bnx2fc_scsi_done(io_req, DID_ABORT);
1278		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1279	}
1280done:
1281	/* release the reference taken in eh_abort */
1282	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1283	spin_unlock_bh(&tgt->tgt_lock);
1284	return rc;
1285}
1286
1287void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1288				      struct fcoe_task_ctx_entry *task,
1289				      u8 rx_state)
1290{
1291	struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1292	struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1293	u32 offset = cb_arg->offset;
1294	enum fc_rctl r_ctl = cb_arg->r_ctl;
1295	int rc = 0;
1296	struct bnx2fc_rport *tgt = orig_io_req->tgt;
1297
1298	BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1299			      "cmd_type = %d\n",
1300		   seq_clnp_req->xid, seq_clnp_req->cmd_type);
1301
1302	if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1303		printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1304			seq_clnp_req->xid);
1305		goto free_cb_arg;
1306	}
1307
1308	spin_unlock_bh(&tgt->tgt_lock);
1309	rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1310	spin_lock_bh(&tgt->tgt_lock);
1311
1312	if (rc)
1313		printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1314			" IO will abort\n");
1315	seq_clnp_req->cb_arg = NULL;
1316	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1317free_cb_arg:
1318	kfree(cb_arg);
1319	return;
1320}
1321
1322void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1323				  struct fcoe_task_ctx_entry *task,
1324				  u8 num_rq)
1325{
1326	BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1327			      "refcnt = %d, cmd_type = %d\n",
1328		   kref_read(&io_req->refcount), io_req->cmd_type);
1329	/*
1330	 * Test whether there is a cleanup request pending. If not just
1331	 * exit.
1332	 */
1333	if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ,
1334				&io_req->req_flags))
1335		return;
1336	/*
1337	 * If we receive a cleanup completion for this request then the
1338	 * firmware will not give us an abort completion for this request
1339	 * so clear any ABTS pending flags.
1340	 */
1341	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) &&
1342	    !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
1343		set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags);
1344		if (io_req->wait_for_abts_comp)
1345			complete(&io_req->abts_done);
1346	}
1347
1348	bnx2fc_scsi_done(io_req, DID_ERROR);
1349	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1350	if (io_req->wait_for_cleanup_comp)
1351		complete(&io_req->cleanup_done);
1352}
1353
1354void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1355			       struct fcoe_task_ctx_entry *task,
1356			       u8 num_rq)
1357{
1358	u32 r_ctl;
1359	u32 r_a_tov = FC_DEF_R_A_TOV;
1360	u8 issue_rrq = 0;
1361	struct bnx2fc_rport *tgt = io_req->tgt;
1362
1363	BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1364			      "refcnt = %d, cmd_type = %d\n",
1365		   io_req->xid,
1366		   kref_read(&io_req->refcount), io_req->cmd_type);
1367
1368	if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1369				       &io_req->req_flags)) {
1370		BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1371				" this io\n");
1372		return;
1373	}
1374
1375	/*
1376	 * If we receive an ABTS completion here then we will not receive
1377	 * a cleanup completion so clear any cleanup pending flags.
1378	 */
1379	if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) {
1380		clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1381		if (io_req->wait_for_cleanup_comp)
1382			complete(&io_req->cleanup_done);
1383	}
1384
1385	/* Do not issue RRQ as this IO is already cleanedup */
1386	if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1387				&io_req->req_flags))
1388		goto io_compl;
1389
1390	/*
1391	 * For ABTS issued due to SCSI eh_abort_handler, timeout
1392	 * values are maintained by scsi-ml itself. Cancel timeout
1393	 * in case ABTS issued as part of task management function
1394	 * or due to FW error.
1395	 */
1396	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1397		if (cancel_delayed_work(&io_req->timeout_work))
1398			kref_put(&io_req->refcount,
1399				 bnx2fc_cmd_release); /* drop timer hold */
1400
1401	r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1402
1403	switch (r_ctl) {
1404	case FC_RCTL_BA_ACC:
1405		/*
1406		 * Dont release this cmd yet. It will be relesed
1407		 * after we get RRQ response
1408		 */
1409		BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1410		issue_rrq = 1;
1411		break;
1412
1413	case FC_RCTL_BA_RJT:
1414		BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1415		break;
1416	default:
1417		printk(KERN_ERR PFX "Unknown ABTS response\n");
1418		break;
1419	}
1420
1421	if (issue_rrq) {
1422		BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1423		set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1424	}
1425	set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1426	bnx2fc_cmd_timer_set(io_req, r_a_tov);
1427
1428io_compl:
1429	if (io_req->wait_for_abts_comp) {
1430		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1431				       &io_req->req_flags))
1432			complete(&io_req->abts_done);
1433	} else {
1434		/*
1435		 * We end up here when ABTS is issued as
1436		 * in asynchronous context, i.e., as part
1437		 * of task management completion, or
1438		 * when FW error is received or when the
1439		 * ABTS is issued when the IO is timed
1440		 * out.
1441		 */
1442
1443		if (io_req->on_active_queue) {
1444			list_del_init(&io_req->link);
1445			io_req->on_active_queue = 0;
1446			/* Move IO req to retire queue */
1447			list_add_tail(&io_req->link, &tgt->io_retire_queue);
1448		}
1449		bnx2fc_scsi_done(io_req, DID_ERROR);
1450		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1451	}
1452}
1453
1454static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1455{
1456	struct bnx2fc_rport *tgt = io_req->tgt;
1457	struct bnx2fc_cmd *cmd, *tmp;
1458	struct bnx2fc_mp_req *tm_req = &io_req->mp_req;
1459	u64 lun;
1460	int rc = 0;
1461
1462	/* called with tgt_lock held */
1463	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1464	/*
1465	 * Walk thru the active_ios queue and ABORT the IO
1466	 * that matches with the LUN that was reset
1467	 */
1468	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1469		BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1470		if (!cmd->sc_cmd)
1471			continue;
1472		lun = cmd->sc_cmd->device->lun;
1473		if (lun == tm_req->tm_lun) {
1474			/* Initiate ABTS on this cmd */
1475			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1476					      &cmd->req_flags)) {
1477				/* cancel the IO timeout */
1478				if (cancel_delayed_work(&io_req->timeout_work))
1479					kref_put(&io_req->refcount,
1480						 bnx2fc_cmd_release);
1481							/* timer hold */
1482				rc = bnx2fc_initiate_abts(cmd);
1483				/* abts shouldn't fail in this context */
1484				WARN_ON(rc != SUCCESS);
1485			} else
1486				printk(KERN_ERR PFX "lun_rst: abts already in"
1487					" progress for this IO 0x%x\n",
1488					cmd->xid);
1489		}
1490	}
1491}
1492
1493static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1494{
1495	struct bnx2fc_rport *tgt = io_req->tgt;
1496	struct bnx2fc_cmd *cmd, *tmp;
1497	int rc = 0;
1498
1499	/* called with tgt_lock held */
1500	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1501	/*
1502	 * Walk thru the active_ios queue and ABORT the IO
1503	 * that matches with the LUN that was reset
1504	 */
1505	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1506		BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1507		/* Initiate ABTS */
1508		if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1509							&cmd->req_flags)) {
1510			/* cancel the IO timeout */
1511			if (cancel_delayed_work(&io_req->timeout_work))
1512				kref_put(&io_req->refcount,
1513					 bnx2fc_cmd_release); /* timer hold */
1514			rc = bnx2fc_initiate_abts(cmd);
1515			/* abts shouldn't fail in this context */
1516			WARN_ON(rc != SUCCESS);
1517
1518		} else
1519			printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1520				" for this IO 0x%x\n", cmd->xid);
1521	}
1522}
1523
1524void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1525			     struct fcoe_task_ctx_entry *task, u8 num_rq,
1526				  unsigned char *rq_data)
1527{
1528	struct bnx2fc_mp_req *tm_req;
1529	struct fc_frame_header *fc_hdr;
1530	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1531	u64 *hdr;
1532	u64 *temp_hdr;
1533	void *rsp_buf;
1534
1535	/* Called with tgt_lock held */
1536	BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1537
1538	if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1539		set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1540	else {
1541		/* TM has already timed out and we got
1542		 * delayed completion. Ignore completion
1543		 * processing.
1544		 */
1545		return;
1546	}
1547
1548	tm_req = &(io_req->mp_req);
1549	fc_hdr = &(tm_req->resp_fc_hdr);
1550	hdr = (u64 *)fc_hdr;
1551	temp_hdr = (u64 *)
1552		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1553	hdr[0] = cpu_to_be64(temp_hdr[0]);
1554	hdr[1] = cpu_to_be64(temp_hdr[1]);
1555	hdr[2] = cpu_to_be64(temp_hdr[2]);
1556
1557	tm_req->resp_len =
1558		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1559
1560	rsp_buf = tm_req->resp_buf;
1561
1562	if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1563		bnx2fc_parse_fcp_rsp(io_req,
1564				     (struct fcoe_fcp_rsp_payload *)
1565				     rsp_buf, num_rq, rq_data);
1566		if (io_req->fcp_rsp_code == 0) {
1567			/* TM successful */
1568			if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1569				bnx2fc_lun_reset_cmpl(io_req);
1570			else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1571				bnx2fc_tgt_reset_cmpl(io_req);
1572		}
1573	} else {
1574		printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1575			fc_hdr->fh_r_ctl);
1576	}
1577	if (sc_cmd) {
1578		if (!bnx2fc_priv(sc_cmd)->io_req) {
1579			printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
1580			return;
1581		}
1582		switch (io_req->fcp_status) {
1583		case FC_GOOD:
1584			if (io_req->cdb_status == 0) {
1585				/* Good IO completion */
1586				sc_cmd->result = DID_OK << 16;
1587			} else {
1588				/* Transport status is good, SCSI status not good */
1589				sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1590			}
1591			if (io_req->fcp_resid)
1592				scsi_set_resid(sc_cmd, io_req->fcp_resid);
1593			break;
1594
1595		default:
1596			BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1597				      io_req->fcp_status);
1598			break;
1599		}
1600
1601		sc_cmd = io_req->sc_cmd;
1602		io_req->sc_cmd = NULL;
1603
1604		bnx2fc_priv(sc_cmd)->io_req = NULL;
1605		scsi_done(sc_cmd);
1606	}
1607
1608	/* check if the io_req exists in tgt's tmf_q */
1609	if (io_req->on_tmf_queue) {
1610
1611		list_del_init(&io_req->link);
1612		io_req->on_tmf_queue = 0;
1613	} else {
1614
1615		printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1616		return;
1617	}
1618
1619	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1620	if (io_req->wait_for_abts_comp) {
1621		BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1622		complete(&io_req->abts_done);
1623	}
1624}
1625
1626static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1627			   int bd_index)
1628{
1629	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1630	int frag_size, sg_frags;
1631
1632	sg_frags = 0;
1633	while (sg_len) {
1634		if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1635			frag_size = BNX2FC_BD_SPLIT_SZ;
1636		else
1637			frag_size = sg_len;
1638		bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1639		bd[bd_index + sg_frags].buf_addr_hi  = addr >> 32;
1640		bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1641		bd[bd_index + sg_frags].flags = 0;
1642
1643		addr += (u64) frag_size;
1644		sg_frags++;
1645		sg_len -= frag_size;
1646	}
1647	return sg_frags;
1648
1649}
1650
1651static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1652{
1653	struct bnx2fc_interface *interface = io_req->port->priv;
1654	struct bnx2fc_hba *hba = interface->hba;
1655	struct scsi_cmnd *sc = io_req->sc_cmd;
1656	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1657	struct scatterlist *sg;
1658	int byte_count = 0;
1659	int sg_count = 0;
1660	int bd_count = 0;
1661	int sg_frags;
1662	unsigned int sg_len;
1663	u64 addr;
1664	int i;
1665
1666	WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD);
1667	/*
1668	 * Use dma_map_sg directly to ensure we're using the correct
1669	 * dev struct off of pcidev.
1670	 */
1671	sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1672			      scsi_sg_count(sc), sc->sc_data_direction);
1673	scsi_for_each_sg(sc, sg, sg_count, i) {
1674		sg_len = sg_dma_len(sg);
1675		addr = sg_dma_address(sg);
1676		if (sg_len > BNX2FC_MAX_BD_LEN) {
1677			sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1678						   bd_count);
1679		} else {
1680
1681			sg_frags = 1;
1682			bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1683			bd[bd_count].buf_addr_hi  = addr >> 32;
1684			bd[bd_count].buf_len = (u16)sg_len;
1685			bd[bd_count].flags = 0;
1686		}
1687		bd_count += sg_frags;
1688		byte_count += sg_len;
1689	}
1690	if (byte_count != scsi_bufflen(sc))
1691		printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1692			"task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1693			io_req->xid);
1694	return bd_count;
1695}
1696
1697static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1698{
1699	struct scsi_cmnd *sc = io_req->sc_cmd;
1700	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1701	int bd_count;
1702
1703	if (scsi_sg_count(sc)) {
1704		bd_count = bnx2fc_map_sg(io_req);
1705		if (bd_count == 0)
1706			return -ENOMEM;
1707	} else {
1708		bd_count = 0;
1709		bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1710		bd[0].buf_len = bd[0].flags = 0;
1711	}
1712	io_req->bd_tbl->bd_valid = bd_count;
1713
1714	/*
1715	 * Return the command to ML if BD count exceeds the max number
1716	 * that can be handled by FW.
1717	 */
1718	if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) {
1719		pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n",
1720		       bd_count, io_req->xid);
1721		return -ENOMEM;
1722	}
1723
1724	return 0;
1725}
1726
1727static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1728{
1729	struct scsi_cmnd *sc = io_req->sc_cmd;
1730	struct bnx2fc_interface *interface = io_req->port->priv;
1731	struct bnx2fc_hba *hba = interface->hba;
1732
1733	/*
1734	 * Use dma_unmap_sg directly to ensure we're using the correct
1735	 * dev struct off of pcidev.
1736	 */
1737	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1738		dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
1739		    scsi_sg_count(sc), sc->sc_data_direction);
1740		io_req->bd_tbl->bd_valid = 0;
1741	}
1742}
1743
1744void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1745				  struct fcp_cmnd *fcp_cmnd)
1746{
1747	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1748
1749	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1750	fcp_cmnd->fc_cmdref = 0;
1751	fcp_cmnd->fc_pri_ta = 0;
1752	fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1753	fcp_cmnd->fc_flags = io_req->io_req_flags;
1754	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1755}
1756
1757static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1758				 struct fcoe_fcp_rsp_payload *fcp_rsp,
1759				 u8 num_rq, unsigned char *rq_data)
1760{
1761	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1762	u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1763	u32 rq_buff_len = 0;
1764	int fcp_sns_len = 0;
1765	int fcp_rsp_len = 0;
1766
1767	io_req->fcp_status = FC_GOOD;
1768	io_req->fcp_resid = 0;
1769	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1770	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1771		io_req->fcp_resid = fcp_rsp->fcp_resid;
1772
1773	io_req->scsi_comp_flags = rsp_flags;
1774	io_req->cdb_status = fcp_rsp->scsi_status_code;
1775
1776	/* Fetch fcp_rsp_info and fcp_sns_info if available */
1777	if (num_rq) {
1778
1779		/*
1780		 * We do not anticipate num_rq >1, as the linux defined
1781		 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1782		 * 256 bytes of single rq buffer is good enough to hold this.
1783		 */
1784
1785		if (rsp_flags &
1786		    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1787			fcp_rsp_len = rq_buff_len
1788					= fcp_rsp->fcp_rsp_len;
1789		}
1790
1791		if (rsp_flags &
1792		    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1793			fcp_sns_len = fcp_rsp->fcp_sns_len;
1794			rq_buff_len += fcp_rsp->fcp_sns_len;
1795		}
1796
1797		io_req->fcp_rsp_len = fcp_rsp_len;
1798		io_req->fcp_sns_len = fcp_sns_len;
1799
1800		if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1801			/* Invalid sense sense length. */
1802			printk(KERN_ERR PFX "invalid sns length %d\n",
1803				rq_buff_len);
1804			/* reset rq_buff_len */
1805			rq_buff_len =  num_rq * BNX2FC_RQ_BUF_SZ;
1806		}
1807
1808		/* fetch fcp_rsp_code */
1809		if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1810			/* Only for task management function */
1811			io_req->fcp_rsp_code = rq_data[3];
1812			BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n",
1813				io_req->fcp_rsp_code);
1814		}
1815
1816		/* fetch sense data */
1817		rq_data += fcp_rsp_len;
1818
1819		if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1820			printk(KERN_ERR PFX "Truncating sense buffer\n");
1821			fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1822		}
1823
1824		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1825		if (fcp_sns_len)
1826			memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1827
1828	}
1829}
1830
1831/**
1832 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1833 *
1834 * @host:	The Scsi_Host the command was issued to
1835 * @sc_cmd:	struct scsi_cmnd to be executed
1836 *
1837 * This is the IO strategy routine, called by SCSI-ML
1838 **/
1839int bnx2fc_queuecommand(struct Scsi_Host *host,
1840			struct scsi_cmnd *sc_cmd)
1841{
1842	struct fc_lport *lport = shost_priv(host);
1843	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1844	struct fc_rport_libfc_priv *rp = rport->dd_data;
1845	struct bnx2fc_rport *tgt;
1846	struct bnx2fc_cmd *io_req;
1847	int rc = 0;
1848	int rval;
1849
1850	rval = fc_remote_port_chkready(rport);
1851	if (rval) {
1852		sc_cmd->result = rval;
1853		scsi_done(sc_cmd);
1854		return 0;
1855	}
1856
1857	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1858		rc = SCSI_MLQUEUE_HOST_BUSY;
1859		goto exit_qcmd;
1860	}
1861
1862	/* rport and tgt are allocated together, so tgt should be non-NULL */
1863	tgt = (struct bnx2fc_rport *)&rp[1];
1864
1865	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1866		/*
1867		 * Session is not offloaded yet. Let SCSI-ml retry
1868		 * the command.
1869		 */
1870		rc = SCSI_MLQUEUE_TARGET_BUSY;
1871		goto exit_qcmd;
1872	}
1873	if (tgt->retry_delay_timestamp) {
1874		if (time_after(jiffies, tgt->retry_delay_timestamp)) {
1875			tgt->retry_delay_timestamp = 0;
1876		} else {
1877			/* If retry_delay timer is active, flow off the ML */
1878			rc = SCSI_MLQUEUE_TARGET_BUSY;
1879			goto exit_qcmd;
1880		}
1881	}
1882
1883	spin_lock_bh(&tgt->tgt_lock);
1884
1885	io_req = bnx2fc_cmd_alloc(tgt);
1886	if (!io_req) {
1887		rc = SCSI_MLQUEUE_HOST_BUSY;
1888		goto exit_qcmd_tgtlock;
1889	}
1890	io_req->sc_cmd = sc_cmd;
1891
1892	if (bnx2fc_post_io_req(tgt, io_req)) {
1893		printk(KERN_ERR PFX "Unable to post io_req\n");
1894		rc = SCSI_MLQUEUE_HOST_BUSY;
1895		goto exit_qcmd_tgtlock;
1896	}
1897
1898exit_qcmd_tgtlock:
1899	spin_unlock_bh(&tgt->tgt_lock);
1900exit_qcmd:
1901	return rc;
1902}
1903
1904void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1905				   struct fcoe_task_ctx_entry *task,
1906				   u8 num_rq, unsigned char *rq_data)
1907{
1908	struct fcoe_fcp_rsp_payload *fcp_rsp;
1909	struct bnx2fc_rport *tgt = io_req->tgt;
1910	struct scsi_cmnd *sc_cmd;
1911	u16 scope = 0, qualifier = 0;
1912
1913	/* scsi_cmd_cmpl is called with tgt lock held */
1914
1915	if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1916		/* we will not receive ABTS response for this IO */
1917		BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1918			   "this scsi cmd\n");
1919		if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
1920				       &io_req->req_flags)) {
1921			BNX2FC_IO_DBG(io_req,
1922				      "Actual completion after cleanup request cleaning up\n");
1923			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
1924		}
1925		return;
1926	}
1927
1928	/* Cancel the timeout_work, as we received IO completion */
1929	if (cancel_delayed_work(&io_req->timeout_work))
1930		kref_put(&io_req->refcount,
1931			 bnx2fc_cmd_release); /* drop timer hold */
1932
1933	sc_cmd = io_req->sc_cmd;
1934	if (sc_cmd == NULL) {
1935		printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1936		return;
1937	}
1938
1939	/* Fetch fcp_rsp from task context and perform cmd completion */
1940	fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1941		   &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1942
1943	/* parse fcp_rsp and obtain sense data from RQ if available */
1944	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
1945
1946	if (!bnx2fc_priv(sc_cmd)->io_req) {
1947		printk(KERN_ERR PFX "io_req is NULL\n");
1948		return;
1949	}
1950
1951	if (io_req->on_active_queue) {
1952		list_del_init(&io_req->link);
1953		io_req->on_active_queue = 0;
1954		/* Move IO req to retire queue */
1955		list_add_tail(&io_req->link, &tgt->io_retire_queue);
1956	} else {
1957		/* This should not happen, but could have been pulled
1958		 * by bnx2fc_flush_active_ios(), or during a race
1959		 * between command abort and (late) completion.
1960		 */
1961		BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1962		if (io_req->wait_for_abts_comp)
1963			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1964					       &io_req->req_flags))
1965				complete(&io_req->abts_done);
1966	}
1967
1968	bnx2fc_unmap_sg_list(io_req);
1969	io_req->sc_cmd = NULL;
1970
1971	switch (io_req->fcp_status) {
1972	case FC_GOOD:
1973		if (io_req->cdb_status == 0) {
1974			/* Good IO completion */
1975			sc_cmd->result = DID_OK << 16;
1976		} else {
1977			/* Transport status is good, SCSI status not good */
1978			BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1979				 " fcp_resid = 0x%x\n",
1980				io_req->cdb_status, io_req->fcp_resid);
1981			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1982
1983			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1984			    io_req->cdb_status == SAM_STAT_BUSY) {
1985				/* Newer array firmware with BUSY or
1986				 * TASK_SET_FULL may return a status that needs
1987				 * the scope bits masked.
1988				 * Or a huge delay timestamp up to 27 minutes
1989				 * can result.
1990				 */
1991				if (fcp_rsp->retry_delay_timer) {
1992					/* Upper 2 bits */
1993					scope = fcp_rsp->retry_delay_timer
1994						& 0xC000;
1995					/* Lower 14 bits */
1996					qualifier = fcp_rsp->retry_delay_timer
1997						& 0x3FFF;
1998				}
1999				if (scope > 0 && qualifier > 0 &&
2000					qualifier <= 0x3FEF) {
2001					/* Set the jiffies +
2002					 * retry_delay_timer * 100ms
2003					 * for the rport/tgt
2004					 */
2005					tgt->retry_delay_timestamp = jiffies +
2006						(qualifier * HZ / 10);
2007				}
2008			}
2009		}
2010		if (io_req->fcp_resid)
2011			scsi_set_resid(sc_cmd, io_req->fcp_resid);
2012		break;
2013	default:
2014		printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
2015			io_req->fcp_status);
2016		break;
2017	}
2018	bnx2fc_priv(sc_cmd)->io_req = NULL;
2019	scsi_done(sc_cmd);
2020	kref_put(&io_req->refcount, bnx2fc_cmd_release);
2021}
2022
2023int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2024			       struct bnx2fc_cmd *io_req)
2025{
2026	struct fcoe_task_ctx_entry *task;
2027	struct fcoe_task_ctx_entry *task_page;
2028	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
2029	struct fcoe_port *port = tgt->port;
2030	struct bnx2fc_interface *interface = port->priv;
2031	struct bnx2fc_hba *hba = interface->hba;
2032	struct fc_lport *lport = port->lport;
2033	int task_idx, index;
2034	u16 xid;
2035
2036	/* bnx2fc_post_io_req() is called with the tgt_lock held */
2037
2038	/* Initialize rest of io_req fields */
2039	io_req->cmd_type = BNX2FC_SCSI_CMD;
2040	io_req->port = port;
2041	io_req->tgt = tgt;
2042	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
2043	bnx2fc_priv(sc_cmd)->io_req = io_req;
2044
2045	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
2046		io_req->io_req_flags = BNX2FC_READ;
2047		this_cpu_inc(lport->stats->InputRequests);
2048		this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
2049	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
2050		io_req->io_req_flags = BNX2FC_WRITE;
2051		this_cpu_inc(lport->stats->OutputRequests);
2052		this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
2053	} else {
2054		io_req->io_req_flags = 0;
2055		this_cpu_inc(lport->stats->ControlRequests);
2056	}
2057
2058	xid = io_req->xid;
2059
2060	/* Build buffer descriptor list for firmware from sg list */
2061	if (bnx2fc_build_bd_list_from_sg(io_req)) {
2062		printk(KERN_ERR PFX "BD list creation failed\n");
2063		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2064		return -EAGAIN;
2065	}
2066
2067	task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2068	index = xid % BNX2FC_TASKS_PER_PAGE;
2069
2070	/* Initialize task context for this IO request */
2071	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2072	task = &(task_page[index]);
2073	bnx2fc_init_task(io_req, task);
2074
2075	if (tgt->flush_in_prog) {
2076		printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2077		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2078		return -EAGAIN;
2079	}
2080
2081	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2082		printk(KERN_ERR PFX "Session not ready...post_io\n");
2083		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2084		return -EAGAIN;
2085	}
2086
2087	/* Time IO req */
2088	if (tgt->io_timeout)
2089		bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
2090	/* Obtain free SQ entry */
2091	bnx2fc_add_2_sq(tgt, xid);
2092
2093	/* Enqueue the io_req to active_cmd_queue */
2094
2095	io_req->on_active_queue = 1;
2096	/* move io_req from pending_queue to active_queue */
2097	list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2098
2099	/* Ring doorbell */
2100	bnx2fc_ring_doorbell(tgt);
2101	return 0;
2102}
2103