1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Aic94xx Task Management Functions
4 *
5 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 */
8
9#include <linux/spinlock.h>
10#include <linux/gfp.h>
11#include "aic94xx.h"
12#include "aic94xx_sas.h"
13#include "aic94xx_hwi.h"
14
15/* ---------- Internal enqueue ---------- */
16
17static int asd_enqueue_internal(struct asd_ascb *ascb,
18		void (*tasklet_complete)(struct asd_ascb *,
19					 struct done_list_struct *),
20				void (*timed_out)(struct timer_list *t))
21{
22	int res;
23
24	ascb->tasklet_complete = tasklet_complete;
25	ascb->uldd_timer = 1;
26
27	ascb->timer.function = timed_out;
28	ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
29
30	add_timer(&ascb->timer);
31
32	res = asd_post_ascb_list(ascb->ha, ascb, 1);
33	if (unlikely(res))
34		del_timer(&ascb->timer);
35	return res;
36}
37
38/* ---------- CLEAR NEXUS ---------- */
39
40struct tasklet_completion_status {
41	int	dl_opcode;
42	int	tmf_state;
43	u8	tag_valid:1;
44	__be16	tag;
45};
46
47#define DECLARE_TCS(tcs) \
48	struct tasklet_completion_status tcs = { \
49		.dl_opcode = 0, \
50		.tmf_state = 0, \
51		.tag_valid = 0, \
52		.tag = 0, \
53	}
54
55
56static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
57					     struct done_list_struct *dl)
58{
59	struct tasklet_completion_status *tcs = ascb->uldd_task;
60	ASD_DPRINTK("%s: here\n", __func__);
61	if (!del_timer(&ascb->timer)) {
62		ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
63		return;
64	}
65	ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
66	tcs->dl_opcode = dl->opcode;
67	complete(ascb->completion);
68	asd_ascb_free(ascb);
69}
70
71static void asd_clear_nexus_timedout(struct timer_list *t)
72{
73	struct asd_ascb *ascb = from_timer(ascb, t, timer);
74	struct tasklet_completion_status *tcs = ascb->uldd_task;
75
76	ASD_DPRINTK("%s: here\n", __func__);
77	tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
78	complete(ascb->completion);
79}
80
81#define CLEAR_NEXUS_PRE         \
82	struct asd_ascb *ascb; \
83	struct scb *scb; \
84	int res; \
85	DECLARE_COMPLETION_ONSTACK(completion); \
86	DECLARE_TCS(tcs); \
87		\
88	ASD_DPRINTK("%s: PRE\n", __func__); \
89        res = 1;                \
90	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
91	if (!ascb)              \
92		return -ENOMEM; \
93                                \
94	ascb->completion = &completion; \
95	ascb->uldd_task = &tcs; \
96	scb = ascb->scb;        \
97	scb->header.opcode = CLEAR_NEXUS
98
99#define CLEAR_NEXUS_POST        \
100	ASD_DPRINTK("%s: POST\n", __func__); \
101	res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
102				   asd_clear_nexus_timedout);              \
103	if (res)                \
104		goto out_err;   \
105	ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
106	wait_for_completion(&completion); \
107	res = tcs.dl_opcode; \
108	if (res == TC_NO_ERROR) \
109		res = TMF_RESP_FUNC_COMPLETE;   \
110	return res; \
111out_err:                        \
112	asd_ascb_free(ascb);    \
113	return res
114
115int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
116{
117	struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
118
119	CLEAR_NEXUS_PRE;
120	scb->clear_nexus.nexus = NEXUS_ADAPTER;
121	CLEAR_NEXUS_POST;
122}
123
124int asd_clear_nexus_port(struct asd_sas_port *port)
125{
126	struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
127
128	CLEAR_NEXUS_PRE;
129	scb->clear_nexus.nexus = NEXUS_PORT;
130	scb->clear_nexus.conn_mask = port->phy_mask;
131	CLEAR_NEXUS_POST;
132}
133
134enum clear_nexus_phase {
135	NEXUS_PHASE_PRE,
136	NEXUS_PHASE_POST,
137	NEXUS_PHASE_RESUME,
138};
139
140static int asd_clear_nexus_I_T(struct domain_device *dev,
141			       enum clear_nexus_phase phase)
142{
143	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
144
145	CLEAR_NEXUS_PRE;
146	scb->clear_nexus.nexus = NEXUS_I_T;
147	switch (phase) {
148	case NEXUS_PHASE_PRE:
149		scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
150		break;
151	case NEXUS_PHASE_POST:
152		scb->clear_nexus.flags = SEND_Q | NOTINQ;
153		break;
154	case NEXUS_PHASE_RESUME:
155		scb->clear_nexus.flags = RESUME_TX;
156	}
157	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
158						   dev->lldd_dev);
159	CLEAR_NEXUS_POST;
160}
161
162int asd_I_T_nexus_reset(struct domain_device *dev)
163{
164	int res, tmp_res, i;
165	struct sas_phy *phy = sas_get_local_phy(dev);
166	/* Standard mandates link reset for ATA  (type 0) and
167	 * hard reset for SSP (type 1) */
168	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
169			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
170
171	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
172	/* send a hard reset */
173	ASD_DPRINTK("sending %s reset to %s\n",
174		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
175	res = sas_phy_reset(phy, reset_type);
176	if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
177		/* wait for the maximum settle time */
178		msleep(500);
179		/* clear all outstanding commands (keep nexus suspended) */
180		asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
181	}
182	for (i = 0 ; i < 3; i++) {
183		tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
184		if (tmp_res == TC_RESUME)
185			goto out;
186		msleep(500);
187	}
188
189	/* This is a bit of a problem:  the sequencer is still suspended
190	 * and is refusing to resume.  Hope it will resume on a bigger hammer
191	 * or the disk is lost */
192	dev_printk(KERN_ERR, &phy->dev,
193		   "Failed to resume nexus after reset 0x%x\n", tmp_res);
194
195	res = TMF_RESP_FUNC_FAILED;
196 out:
197	sas_put_local_phy(phy);
198	return res;
199}
200
201static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
202{
203	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
204
205	CLEAR_NEXUS_PRE;
206	scb->clear_nexus.nexus = NEXUS_I_T_L;
207	scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
208	memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
209	scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210						   dev->lldd_dev);
211	CLEAR_NEXUS_POST;
212}
213
214static int asd_clear_nexus_tag(struct sas_task *task)
215{
216	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
217	struct asd_ascb *tascb = task->lldd_task;
218
219	CLEAR_NEXUS_PRE;
220	scb->clear_nexus.nexus = NEXUS_TAG;
221	memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
222	scb->clear_nexus.ssp_task.tag = tascb->tag;
223	if (task->dev->tproto)
224		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
225							  task->dev->lldd_dev);
226	CLEAR_NEXUS_POST;
227}
228
229static int asd_clear_nexus_index(struct sas_task *task)
230{
231	struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
232	struct asd_ascb *tascb = task->lldd_task;
233
234	CLEAR_NEXUS_PRE;
235	scb->clear_nexus.nexus = NEXUS_TRANS_CX;
236	if (task->dev->tproto)
237		scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
238							  task->dev->lldd_dev);
239	scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
240	CLEAR_NEXUS_POST;
241}
242
243/* ---------- TMFs ---------- */
244
245static void asd_tmf_timedout(struct timer_list *t)
246{
247	struct asd_ascb *ascb = from_timer(ascb, t, timer);
248	struct tasklet_completion_status *tcs = ascb->uldd_task;
249
250	ASD_DPRINTK("tmf timed out\n");
251	tcs->tmf_state = TMF_RESP_FUNC_FAILED;
252	complete(ascb->completion);
253}
254
255static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
256				    struct done_list_struct *dl)
257{
258	struct asd_ha_struct *asd_ha = ascb->ha;
259	unsigned long flags;
260	struct tc_resp_sb_struct {
261		__le16 index_escb;
262		u8     len_lsb;
263		u8     flags;
264	} __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
265
266	int  edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
267	struct asd_ascb *escb;
268	struct asd_dma_tok *edb;
269	struct ssp_frame_hdr *fh;
270	struct ssp_response_iu   *ru;
271	int res = TMF_RESP_FUNC_FAILED;
272
273	ASD_DPRINTK("tmf resp tasklet\n");
274
275	spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
276	escb = asd_tc_index_find(&asd_ha->seq,
277				 (int)le16_to_cpu(resp_sb->index_escb));
278	spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
279
280	if (!escb) {
281		ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
282		return res;
283	}
284
285	edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
286	ascb->tag = *(__be16 *)(edb->vaddr+4);
287	fh = edb->vaddr + 16;
288	ru = edb->vaddr + 16 + sizeof(*fh);
289	res = ru->status;
290	if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
291		res = ru->resp_data[3];
292#if 0
293	ascb->tag = fh->tag;
294#endif
295	ascb->tag_valid = 1;
296
297	asd_invalidate_edb(escb, edb_id);
298	return res;
299}
300
301static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
302				     struct done_list_struct *dl)
303{
304	struct tasklet_completion_status *tcs;
305
306	if (!del_timer(&ascb->timer))
307		return;
308
309	tcs = ascb->uldd_task;
310	ASD_DPRINTK("tmf tasklet complete\n");
311
312	tcs->dl_opcode = dl->opcode;
313
314	if (dl->opcode == TC_SSP_RESP) {
315		tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
316		tcs->tag_valid = ascb->tag_valid;
317		tcs->tag = ascb->tag;
318	}
319
320	complete(ascb->completion);
321	asd_ascb_free(ascb);
322}
323
324static int asd_clear_nexus(struct sas_task *task)
325{
326	int res = TMF_RESP_FUNC_FAILED;
327	int leftover;
328	struct asd_ascb *tascb = task->lldd_task;
329	DECLARE_COMPLETION_ONSTACK(completion);
330	unsigned long flags;
331
332	tascb->completion = &completion;
333
334	ASD_DPRINTK("task not done, clearing nexus\n");
335	if (tascb->tag_valid)
336		res = asd_clear_nexus_tag(task);
337	else
338		res = asd_clear_nexus_index(task);
339	leftover = wait_for_completion_timeout(&completion,
340					       AIC94XX_SCB_TIMEOUT);
341	tascb->completion = NULL;
342	ASD_DPRINTK("came back from clear nexus\n");
343	spin_lock_irqsave(&task->task_state_lock, flags);
344	if (leftover < 1)
345		res = TMF_RESP_FUNC_FAILED;
346	if (task->task_state_flags & SAS_TASK_STATE_DONE)
347		res = TMF_RESP_FUNC_COMPLETE;
348	spin_unlock_irqrestore(&task->task_state_lock, flags);
349
350	return res;
351}
352
353/**
354 * asd_abort_task -- ABORT TASK TMF
355 * @task: the task to be aborted
356 *
357 * Before calling ABORT TASK the task state flags should be ORed with
358 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
359 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
360 *
361 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
362 * Returns: SAS TMF responses (see sas_task.h),
363 *          -ENOMEM,
364 *          -SAS_QUEUE_FULL.
365 *
366 * When ABORT TASK returns, the caller of ABORT TASK checks first the
367 * task->task_state_flags, and then the return value of ABORT TASK.
368 *
369 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
370 * task was completed successfully prior to it being aborted.  The
371 * caller of ABORT TASK has responsibility to call task->task_done()
372 * xor free the task, depending on their framework.  The return code
373 * is TMF_RESP_FUNC_FAILED in this case.
374 *
375 * Else the SAS_TASK_STATE_DONE bit is not set,
376 * 	If the return code is TMF_RESP_FUNC_COMPLETE, then
377 * 		the task was aborted successfully.  The caller of
378 * 		ABORT TASK has responsibility to call task->task_done()
379 *              to finish the task, xor free the task depending on their
380 *		framework.
381 *	else
382 * 		the ABORT TASK returned some kind of error. The task
383 *              was _not_ cancelled.  Nothing can be assumed.
384 *		The caller of ABORT TASK may wish to retry.
385 */
386int asd_abort_task(struct sas_task *task)
387{
388	struct asd_ascb *tascb = task->lldd_task;
389	struct asd_ha_struct *asd_ha = tascb->ha;
390	int res = 1;
391	unsigned long flags;
392	struct asd_ascb *ascb = NULL;
393	struct scb *scb;
394	int leftover;
395	DECLARE_TCS(tcs);
396	DECLARE_COMPLETION_ONSTACK(completion);
397	DECLARE_COMPLETION_ONSTACK(tascb_completion);
398
399	tascb->completion = &tascb_completion;
400
401	spin_lock_irqsave(&task->task_state_lock, flags);
402	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
403		spin_unlock_irqrestore(&task->task_state_lock, flags);
404		res = TMF_RESP_FUNC_COMPLETE;
405		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
406		goto out_done;
407	}
408	spin_unlock_irqrestore(&task->task_state_lock, flags);
409
410	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
411	if (!ascb)
412		return -ENOMEM;
413
414	ascb->uldd_task = &tcs;
415	ascb->completion = &completion;
416	scb = ascb->scb;
417	scb->header.opcode = SCB_ABORT_TASK;
418
419	switch (task->task_proto) {
420	case SAS_PROTOCOL_SATA:
421	case SAS_PROTOCOL_STP:
422		scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
423		break;
424	case SAS_PROTOCOL_SSP:
425		scb->abort_task.proto_conn_rate  = (1 << 4); /* SSP */
426		scb->abort_task.proto_conn_rate |= task->dev->linkrate;
427		break;
428	case SAS_PROTOCOL_SMP:
429		break;
430	default:
431		break;
432	}
433
434	if (task->task_proto == SAS_PROTOCOL_SSP) {
435		scb->abort_task.ssp_frame.frame_type = SSP_TASK;
436		memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
437		       task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
438		memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
439		       task->dev->port->ha->hashed_sas_addr,
440		       HASHED_SAS_ADDR_SIZE);
441		scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
442
443		memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
444		scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
445		scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
446	}
447
448	scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
449	scb->abort_task.conn_handle = cpu_to_le16(
450		(u16)(unsigned long)task->dev->lldd_dev);
451	scb->abort_task.retry_count = 1;
452	scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
453	scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
454
455	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
456				   asd_tmf_timedout);
457	if (res)
458		goto out_free;
459	wait_for_completion(&completion);
460	ASD_DPRINTK("tmf came back\n");
461
462	tascb->tag = tcs.tag;
463	tascb->tag_valid = tcs.tag_valid;
464
465	spin_lock_irqsave(&task->task_state_lock, flags);
466	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
467		spin_unlock_irqrestore(&task->task_state_lock, flags);
468		res = TMF_RESP_FUNC_COMPLETE;
469		ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
470		goto out_done;
471	}
472	spin_unlock_irqrestore(&task->task_state_lock, flags);
473
474	if (tcs.dl_opcode == TC_SSP_RESP) {
475		/* The task to be aborted has been sent to the device.
476		 * We got a Response IU for the ABORT TASK TMF. */
477		if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
478			res = asd_clear_nexus(task);
479		else
480			res = tcs.tmf_state;
481	} else if (tcs.dl_opcode == TC_NO_ERROR &&
482		   tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
483		/* timeout */
484		res = TMF_RESP_FUNC_FAILED;
485	} else {
486		/* In the following we assume that the managing layer
487		 * will _never_ make a mistake, when issuing ABORT
488		 * TASK.
489		 */
490		switch (tcs.dl_opcode) {
491		default:
492			res = asd_clear_nexus(task);
493			fallthrough;
494		case TC_NO_ERROR:
495			break;
496			/* The task hasn't been sent to the device xor
497			 * we never got a (sane) Response IU for the
498			 * ABORT TASK TMF.
499			 */
500		case TF_NAK_RECV:
501			res = TMF_RESP_INVALID_FRAME;
502			break;
503		case TF_TMF_TASK_DONE:	/* done but not reported yet */
504			res = TMF_RESP_FUNC_FAILED;
505			leftover =
506				wait_for_completion_timeout(&tascb_completion,
507							  AIC94XX_SCB_TIMEOUT);
508			spin_lock_irqsave(&task->task_state_lock, flags);
509			if (leftover < 1)
510				res = TMF_RESP_FUNC_FAILED;
511			if (task->task_state_flags & SAS_TASK_STATE_DONE)
512				res = TMF_RESP_FUNC_COMPLETE;
513			spin_unlock_irqrestore(&task->task_state_lock, flags);
514			break;
515		case TF_TMF_NO_TAG:
516		case TF_TMF_TAG_FREE: /* the tag is in the free list */
517		case TF_TMF_NO_CONN_HANDLE: /* no such device */
518			res = TMF_RESP_FUNC_COMPLETE;
519			break;
520		case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
521			res = TMF_RESP_FUNC_ESUPP;
522			break;
523		}
524	}
525 out_done:
526	tascb->completion = NULL;
527	if (res == TMF_RESP_FUNC_COMPLETE) {
528		task->lldd_task = NULL;
529		mb();
530		asd_ascb_free(tascb);
531	}
532	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
533	return res;
534
535 out_free:
536	asd_ascb_free(ascb);
537	ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
538	return res;
539}
540
541/**
542 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
543 * @dev: pointer to struct domain_device of interest
544 * @lun: pointer to u8[8] which is the LUN
545 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
546 * @index: the transaction context of the task to be queried if QT TMF
547 *
548 * This function is used to send ABORT TASK SET, CLEAR ACA,
549 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
550 *
551 * No SCBs should be queued to the I_T_L nexus when this SCB is
552 * pending.
553 *
554 * Returns: TMF response code (see sas_task.h or the SAS spec)
555 */
556static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
557				int tmf, int index)
558{
559	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
560	struct asd_ascb *ascb;
561	int res = 1;
562	struct scb *scb;
563	DECLARE_COMPLETION_ONSTACK(completion);
564	DECLARE_TCS(tcs);
565
566	if (!(dev->tproto & SAS_PROTOCOL_SSP))
567		return TMF_RESP_FUNC_ESUPP;
568
569	ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
570	if (!ascb)
571		return -ENOMEM;
572
573	ascb->completion = &completion;
574	ascb->uldd_task = &tcs;
575	scb = ascb->scb;
576
577	if (tmf == TMF_QUERY_TASK)
578		scb->header.opcode = QUERY_SSP_TASK;
579	else
580		scb->header.opcode = INITIATE_SSP_TMF;
581
582	scb->ssp_tmf.proto_conn_rate  = (1 << 4); /* SSP */
583	scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
584	/* SSP frame header */
585	scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
586	memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
587	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
588	memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
589	       dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
590	scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
591	/* SSP Task IU */
592	memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
593	scb->ssp_tmf.ssp_task.tmf = tmf;
594
595	scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
596	scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
597					      dev->lldd_dev);
598	scb->ssp_tmf.retry_count = 1;
599	scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
600	if (tmf == TMF_QUERY_TASK)
601		scb->ssp_tmf.index = cpu_to_le16(index);
602
603	res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
604				   asd_tmf_timedout);
605	if (res)
606		goto out_err;
607	wait_for_completion(&completion);
608
609	switch (tcs.dl_opcode) {
610	case TC_NO_ERROR:
611		res = TMF_RESP_FUNC_COMPLETE;
612		break;
613	case TF_NAK_RECV:
614		res = TMF_RESP_INVALID_FRAME;
615		break;
616	case TF_TMF_TASK_DONE:
617		res = TMF_RESP_FUNC_FAILED;
618		break;
619	case TF_TMF_NO_TAG:
620	case TF_TMF_TAG_FREE: /* the tag is in the free list */
621	case TF_TMF_NO_CONN_HANDLE: /* no such device */
622		res = TMF_RESP_FUNC_COMPLETE;
623		break;
624	case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
625		res = TMF_RESP_FUNC_ESUPP;
626		break;
627	default:
628		/* Allow TMF response codes to propagate upwards */
629		res = tcs.dl_opcode;
630		break;
631	}
632	return res;
633out_err:
634	asd_ascb_free(ascb);
635	return res;
636}
637
638int asd_abort_task_set(struct domain_device *dev, u8 *lun)
639{
640	int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
641
642	if (res == TMF_RESP_FUNC_COMPLETE)
643		asd_clear_nexus_I_T_L(dev, lun);
644	return res;
645}
646
647int asd_clear_task_set(struct domain_device *dev, u8 *lun)
648{
649	int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
650
651	if (res == TMF_RESP_FUNC_COMPLETE)
652		asd_clear_nexus_I_T_L(dev, lun);
653	return res;
654}
655
656int asd_lu_reset(struct domain_device *dev, u8 *lun)
657{
658	int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
659
660	if (res == TMF_RESP_FUNC_COMPLETE)
661		asd_clear_nexus_I_T_L(dev, lun);
662	return res;
663}
664
665/**
666 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
667 * @task: pointer to sas_task struct of interest
668 *
669 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
670 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
671 *
672 * Normally the management layer sets the task to aborted state,
673 * and then calls query task and then abort task.
674 */
675int asd_query_task(struct sas_task *task)
676{
677	struct asd_ascb *ascb = task->lldd_task;
678	int index;
679
680	if (ascb) {
681		index = ascb->tc_index;
682		return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
683					    TMF_QUERY_TASK, index);
684	}
685	return TMF_RESP_FUNC_COMPLETE;
686}
687