1/*
2 * This file is provided under a dual BSD/GPLv2 license.  When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 *   * Redistributions of source code must retain the above copyright
34 *     notice, this list of conditions and the following disclaimer.
35 *   * Redistributions in binary form must reproduce the above copyright
36 *     notice, this list of conditions and the following disclaimer in
37 *     the documentation and/or other materials provided with the
38 *     distribution.
39 *   * Neither the name of Intel Corporation nor the names of its
40 *     contributors may be used to endorse or promote products derived
41 *     from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <scsi/sas.h>
56#include <linux/bitops.h>
57#include "isci.h"
58#include "port.h"
59#include "remote_device.h"
60#include "request.h"
61#include "remote_node_context.h"
62#include "scu_event_codes.h"
63#include "task.h"
64
65#undef C
66#define C(a) (#a)
67const char *dev_state_name(enum sci_remote_device_states state)
68{
69	static const char * const strings[] = REMOTE_DEV_STATES;
70
71	return strings[state];
72}
73#undef C
74
75enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
76					  enum sci_remote_node_suspension_reasons reason)
77{
78	return sci_remote_node_context_suspend(&idev->rnc, reason,
79					       SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
80}
81
82/**
83 * isci_remote_device_ready() - This function is called by the ihost when the
84 *    remote device is ready. We mark the isci device as ready and signal the
85 *    waiting proccess.
86 * @ihost: our valid isci_host
87 * @idev: remote device
88 *
89 */
90static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
91{
92	dev_dbg(&ihost->pdev->dev,
93		"%s: idev = %p\n", __func__, idev);
94
95	clear_bit(IDEV_IO_NCQERROR, &idev->flags);
96	set_bit(IDEV_IO_READY, &idev->flags);
97	if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
98		wake_up(&ihost->eventq);
99}
100
101static enum sci_status sci_remote_device_terminate_req(
102	struct isci_host *ihost,
103	struct isci_remote_device *idev,
104	int check_abort,
105	struct isci_request *ireq)
106{
107	if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
108	    (ireq->target_device != idev) ||
109	    (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
110		return SCI_SUCCESS;
111
112	dev_dbg(&ihost->pdev->dev,
113		"%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
114		__func__, idev, idev->flags, ireq, ireq->target_device);
115
116	set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
117
118	return sci_controller_terminate_request(ihost, idev, ireq);
119}
120
121static enum sci_status sci_remote_device_terminate_reqs_checkabort(
122	struct isci_remote_device *idev,
123	int chk)
124{
125	struct isci_host *ihost = idev->owning_port->owning_controller;
126	enum sci_status status  = SCI_SUCCESS;
127	u32 i;
128
129	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
130		struct isci_request *ireq = ihost->reqs[i];
131		enum sci_status s;
132
133		s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
134		if (s != SCI_SUCCESS)
135			status = s;
136	}
137	return status;
138}
139
140static bool isci_compare_suspendcount(
141	struct isci_remote_device *idev,
142	u32 localcount)
143{
144	smp_rmb();
145
146	/* Check for a change in the suspend count, or the RNC
147	 * being destroyed.
148	 */
149	return (localcount != idev->rnc.suspend_count)
150	    || sci_remote_node_context_is_being_destroyed(&idev->rnc);
151}
152
153static bool isci_check_reqterm(
154	struct isci_host *ihost,
155	struct isci_remote_device *idev,
156	struct isci_request *ireq,
157	u32 localcount)
158{
159	unsigned long flags;
160	bool res;
161
162	spin_lock_irqsave(&ihost->scic_lock, flags);
163	res = isci_compare_suspendcount(idev, localcount)
164		&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
165	spin_unlock_irqrestore(&ihost->scic_lock, flags);
166
167	return res;
168}
169
170static bool isci_check_devempty(
171	struct isci_host *ihost,
172	struct isci_remote_device *idev,
173	u32 localcount)
174{
175	unsigned long flags;
176	bool res;
177
178	spin_lock_irqsave(&ihost->scic_lock, flags);
179	res = isci_compare_suspendcount(idev, localcount)
180		&& idev->started_request_count == 0;
181	spin_unlock_irqrestore(&ihost->scic_lock, flags);
182
183	return res;
184}
185
186enum sci_status isci_remote_device_terminate_requests(
187	struct isci_host *ihost,
188	struct isci_remote_device *idev,
189	struct isci_request *ireq)
190{
191	enum sci_status status = SCI_SUCCESS;
192	unsigned long flags;
193	u32 rnc_suspend_count;
194
195	spin_lock_irqsave(&ihost->scic_lock, flags);
196
197	if (isci_get_device(idev) == NULL) {
198		dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
199			__func__, idev);
200		spin_unlock_irqrestore(&ihost->scic_lock, flags);
201		status = SCI_FAILURE;
202	} else {
203		/* If already suspended, don't wait for another suspension. */
204		smp_rmb();
205		rnc_suspend_count
206			= sci_remote_node_context_is_suspended(&idev->rnc)
207				? 0 : idev->rnc.suspend_count;
208
209		dev_dbg(&ihost->pdev->dev,
210			"%s: idev=%p, ireq=%p; started_request_count=%d, "
211				"rnc_suspend_count=%d, rnc.suspend_count=%d"
212				"about to wait\n",
213			__func__, idev, ireq, idev->started_request_count,
214			rnc_suspend_count, idev->rnc.suspend_count);
215
216		#define MAX_SUSPEND_MSECS 10000
217		if (ireq) {
218			/* Terminate a specific TC. */
219			set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
220			sci_remote_device_terminate_req(ihost, idev, 0, ireq);
221			spin_unlock_irqrestore(&ihost->scic_lock, flags);
222			if (!wait_event_timeout(ihost->eventq,
223						isci_check_reqterm(ihost, idev, ireq,
224								   rnc_suspend_count),
225						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
226
227				dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
228					 __func__, ihost->id);
229				dev_dbg(&ihost->pdev->dev,
230					 "%s: ******* Timeout waiting for "
231					 "suspend; idev=%p, current state %s; "
232					 "started_request_count=%d, flags=%lx\n\t"
233					 "rnc_suspend_count=%d, rnc.suspend_count=%d "
234					 "RNC: current state %s, current "
235					 "suspend_type %x dest state %d;\n"
236					 "ireq=%p, ireq->flags = %lx\n",
237					 __func__, idev,
238					 dev_state_name(idev->sm.current_state_id),
239					 idev->started_request_count, idev->flags,
240					 rnc_suspend_count, idev->rnc.suspend_count,
241					 rnc_state_name(idev->rnc.sm.current_state_id),
242					 idev->rnc.suspend_type,
243					 idev->rnc.destination_state,
244					 ireq, ireq->flags);
245			}
246			spin_lock_irqsave(&ihost->scic_lock, flags);
247			clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
248			if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
249				isci_free_tag(ihost, ireq->io_tag);
250			spin_unlock_irqrestore(&ihost->scic_lock, flags);
251		} else {
252			/* Terminate all TCs. */
253			sci_remote_device_terminate_requests(idev);
254			spin_unlock_irqrestore(&ihost->scic_lock, flags);
255			if (!wait_event_timeout(ihost->eventq,
256						isci_check_devempty(ihost, idev,
257								    rnc_suspend_count),
258						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
259
260				dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
261					 __func__, ihost->id);
262				dev_dbg(&ihost->pdev->dev,
263					"%s: ******* Timeout waiting for "
264					"suspend; idev=%p, current state %s; "
265					"started_request_count=%d, flags=%lx\n\t"
266					"rnc_suspend_count=%d, "
267					"RNC: current state %s, "
268					"rnc.suspend_count=%d, current "
269					"suspend_type %x dest state %d\n",
270					__func__, idev,
271					dev_state_name(idev->sm.current_state_id),
272					idev->started_request_count, idev->flags,
273					rnc_suspend_count,
274					rnc_state_name(idev->rnc.sm.current_state_id),
275					idev->rnc.suspend_count,
276					idev->rnc.suspend_type,
277					idev->rnc.destination_state);
278			}
279		}
280		dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
281			__func__, idev);
282		isci_put_device(idev);
283	}
284	return status;
285}
286
287/**
288* isci_remote_device_not_ready() - This function is called by the ihost when
289*    the remote device is not ready. We mark the isci device as ready (not
290*    "ready_for_io") and signal the waiting proccess.
291* @ihost: This parameter specifies the isci host object.
292* @idev: This parameter specifies the remote device
293* @reason: Reason to switch on
294*
295* sci_lock is held on entrance to this function.
296*/
297static void isci_remote_device_not_ready(struct isci_host *ihost,
298					 struct isci_remote_device *idev,
299					 u32 reason)
300{
301	dev_dbg(&ihost->pdev->dev,
302		"%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
303
304	switch (reason) {
305	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
306		set_bit(IDEV_IO_NCQERROR, &idev->flags);
307
308		/* Suspend the remote device so the I/O can be terminated. */
309		sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
310
311		/* Kill all outstanding requests for the device. */
312		sci_remote_device_terminate_requests(idev);
313
314		fallthrough;	/* into the default case */
315	default:
316		clear_bit(IDEV_IO_READY, &idev->flags);
317		break;
318	}
319}
320
321/* called once the remote node context is ready to be freed.
322 * The remote device can now report that its stop operation is complete. none
323 */
324static void rnc_destruct_done(void *_dev)
325{
326	struct isci_remote_device *idev = _dev;
327
328	BUG_ON(idev->started_request_count != 0);
329	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
330}
331
332enum sci_status sci_remote_device_terminate_requests(
333	struct isci_remote_device *idev)
334{
335	return sci_remote_device_terminate_reqs_checkabort(idev, 0);
336}
337
338enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
339					u32 timeout)
340{
341	struct sci_base_state_machine *sm = &idev->sm;
342	enum sci_remote_device_states state = sm->current_state_id;
343
344	switch (state) {
345	case SCI_DEV_INITIAL:
346	case SCI_DEV_FAILED:
347	case SCI_DEV_FINAL:
348	default:
349		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
350			 __func__, dev_state_name(state));
351		return SCI_FAILURE_INVALID_STATE;
352	case SCI_DEV_STOPPED:
353		return SCI_SUCCESS;
354	case SCI_DEV_STARTING:
355		/* device not started so there had better be no requests */
356		BUG_ON(idev->started_request_count != 0);
357		sci_remote_node_context_destruct(&idev->rnc,
358						      rnc_destruct_done, idev);
359		/* Transition to the stopping state and wait for the
360		 * remote node to complete being posted and invalidated.
361		 */
362		sci_change_state(sm, SCI_DEV_STOPPING);
363		return SCI_SUCCESS;
364	case SCI_DEV_READY:
365	case SCI_STP_DEV_IDLE:
366	case SCI_STP_DEV_CMD:
367	case SCI_STP_DEV_NCQ:
368	case SCI_STP_DEV_NCQ_ERROR:
369	case SCI_STP_DEV_AWAIT_RESET:
370	case SCI_SMP_DEV_IDLE:
371	case SCI_SMP_DEV_CMD:
372		sci_change_state(sm, SCI_DEV_STOPPING);
373		if (idev->started_request_count == 0)
374			sci_remote_node_context_destruct(&idev->rnc,
375							 rnc_destruct_done,
376							 idev);
377		else {
378			sci_remote_device_suspend(
379				idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
380			sci_remote_device_terminate_requests(idev);
381		}
382		return SCI_SUCCESS;
383	case SCI_DEV_STOPPING:
384		/* All requests should have been terminated, but if there is an
385		 * attempt to stop a device already in the stopping state, then
386		 * try again to terminate.
387		 */
388		return sci_remote_device_terminate_requests(idev);
389	case SCI_DEV_RESETTING:
390		sci_change_state(sm, SCI_DEV_STOPPING);
391		return SCI_SUCCESS;
392	}
393}
394
395enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
396{
397	struct sci_base_state_machine *sm = &idev->sm;
398	enum sci_remote_device_states state = sm->current_state_id;
399
400	switch (state) {
401	case SCI_DEV_INITIAL:
402	case SCI_DEV_STOPPED:
403	case SCI_DEV_STARTING:
404	case SCI_SMP_DEV_IDLE:
405	case SCI_SMP_DEV_CMD:
406	case SCI_DEV_STOPPING:
407	case SCI_DEV_FAILED:
408	case SCI_DEV_RESETTING:
409	case SCI_DEV_FINAL:
410	default:
411		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
412			 __func__, dev_state_name(state));
413		return SCI_FAILURE_INVALID_STATE;
414	case SCI_DEV_READY:
415	case SCI_STP_DEV_IDLE:
416	case SCI_STP_DEV_CMD:
417	case SCI_STP_DEV_NCQ:
418	case SCI_STP_DEV_NCQ_ERROR:
419	case SCI_STP_DEV_AWAIT_RESET:
420		sci_change_state(sm, SCI_DEV_RESETTING);
421		return SCI_SUCCESS;
422	}
423}
424
425enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
426{
427	struct sci_base_state_machine *sm = &idev->sm;
428	enum sci_remote_device_states state = sm->current_state_id;
429
430	if (state != SCI_DEV_RESETTING) {
431		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
432			 __func__, dev_state_name(state));
433		return SCI_FAILURE_INVALID_STATE;
434	}
435
436	sci_change_state(sm, SCI_DEV_READY);
437	return SCI_SUCCESS;
438}
439
440enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
441						     u32 frame_index)
442{
443	struct sci_base_state_machine *sm = &idev->sm;
444	enum sci_remote_device_states state = sm->current_state_id;
445	struct isci_host *ihost = idev->owning_port->owning_controller;
446	enum sci_status status;
447
448	switch (state) {
449	case SCI_DEV_INITIAL:
450	case SCI_DEV_STOPPED:
451	case SCI_DEV_STARTING:
452	case SCI_STP_DEV_IDLE:
453	case SCI_SMP_DEV_IDLE:
454	case SCI_DEV_FINAL:
455	default:
456		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
457			 __func__, dev_state_name(state));
458		/* Return the frame back to the controller */
459		sci_controller_release_frame(ihost, frame_index);
460		return SCI_FAILURE_INVALID_STATE;
461	case SCI_DEV_READY:
462	case SCI_STP_DEV_NCQ_ERROR:
463	case SCI_STP_DEV_AWAIT_RESET:
464	case SCI_DEV_STOPPING:
465	case SCI_DEV_FAILED:
466	case SCI_DEV_RESETTING: {
467		struct isci_request *ireq;
468		struct ssp_frame_hdr hdr;
469		void *frame_header;
470		ssize_t word_cnt;
471
472		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
473								       frame_index,
474								       &frame_header);
475		if (status != SCI_SUCCESS)
476			return status;
477
478		word_cnt = sizeof(hdr) / sizeof(u32);
479		sci_swab32_cpy(&hdr, frame_header, word_cnt);
480
481		ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
482		if (ireq && ireq->target_device == idev) {
483			/* The IO request is now in charge of releasing the frame */
484			status = sci_io_request_frame_handler(ireq, frame_index);
485		} else {
486			/* We could not map this tag to a valid IO
487			 * request Just toss the frame and continue
488			 */
489			sci_controller_release_frame(ihost, frame_index);
490		}
491		break;
492	}
493	case SCI_STP_DEV_NCQ: {
494		struct dev_to_host_fis *hdr;
495
496		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
497								       frame_index,
498								       (void **)&hdr);
499		if (status != SCI_SUCCESS)
500			return status;
501
502		if (hdr->fis_type == FIS_SETDEVBITS &&
503		    (hdr->status & ATA_ERR)) {
504			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
505
506			/* TODO Check sactive and complete associated IO if any. */
507			sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
508		} else if (hdr->fis_type == FIS_REGD2H &&
509			   (hdr->status & ATA_ERR)) {
510			/*
511			 * Some devices return D2H FIS when an NCQ error is detected.
512			 * Treat this like an SDB error FIS ready reason.
513			 */
514			idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
515			sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
516		} else
517			status = SCI_FAILURE;
518
519		sci_controller_release_frame(ihost, frame_index);
520		break;
521	}
522	case SCI_STP_DEV_CMD:
523	case SCI_SMP_DEV_CMD:
524		/* The device does not process any UF received from the hardware while
525		 * in this state.  All unsolicited frames are forwarded to the io request
526		 * object.
527		 */
528		status = sci_io_request_frame_handler(idev->working_request, frame_index);
529		break;
530	}
531
532	return status;
533}
534
535static bool is_remote_device_ready(struct isci_remote_device *idev)
536{
537
538	struct sci_base_state_machine *sm = &idev->sm;
539	enum sci_remote_device_states state = sm->current_state_id;
540
541	switch (state) {
542	case SCI_DEV_READY:
543	case SCI_STP_DEV_IDLE:
544	case SCI_STP_DEV_CMD:
545	case SCI_STP_DEV_NCQ:
546	case SCI_STP_DEV_NCQ_ERROR:
547	case SCI_STP_DEV_AWAIT_RESET:
548	case SCI_SMP_DEV_IDLE:
549	case SCI_SMP_DEV_CMD:
550		return true;
551	default:
552		return false;
553	}
554}
555
556/*
557 * called once the remote node context has transisitioned to a ready
558 * state (after suspending RX and/or TX due to early D2H fis)
559 */
560static void atapi_remote_device_resume_done(void *_dev)
561{
562	struct isci_remote_device *idev = _dev;
563	struct isci_request *ireq = idev->working_request;
564
565	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
566}
567
568enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
569						     u32 event_code)
570{
571	enum sci_status status;
572	struct sci_base_state_machine *sm = &idev->sm;
573	enum sci_remote_device_states state = sm->current_state_id;
574
575	switch (scu_get_event_type(event_code)) {
576	case SCU_EVENT_TYPE_RNC_OPS_MISC:
577	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
578	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
579		status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
580		break;
581	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
582		if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
583			status = SCI_SUCCESS;
584
585			/* Suspend the associated RNC */
586			sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
587
588			dev_dbg(scirdev_to_dev(idev),
589				"%s: device: %p event code: %x: %s\n",
590				__func__, idev, event_code,
591				is_remote_device_ready(idev)
592				? "I_T_Nexus_Timeout event"
593				: "I_T_Nexus_Timeout event in wrong state");
594
595			break;
596		}
597		fallthrough;	/* and treat as unhandled */
598	default:
599		dev_dbg(scirdev_to_dev(idev),
600			"%s: device: %p event code: %x: %s\n",
601			__func__, idev, event_code,
602			is_remote_device_ready(idev)
603			? "unexpected event"
604			: "unexpected event in wrong state");
605		status = SCI_FAILURE_INVALID_STATE;
606		break;
607	}
608
609	if (status != SCI_SUCCESS)
610		return status;
611
612	/* Decode device-specific states that may require an RNC resume during
613	 * normal operation.  When the abort path is active, these resumes are
614	 * managed when the abort path exits.
615	 */
616	if (state == SCI_STP_DEV_ATAPI_ERROR) {
617		/* For ATAPI error state resume the RNC right away. */
618		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
619		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
620			return sci_remote_node_context_resume(&idev->rnc,
621							      atapi_remote_device_resume_done,
622							      idev);
623		}
624	}
625
626	if (state == SCI_STP_DEV_IDLE) {
627
628		/* We pick up suspension events to handle specifically to this
629		 * state. We resume the RNC right away.
630		 */
631		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
632		    scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
633			status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
634	}
635
636	return status;
637}
638
639static void sci_remote_device_start_request(struct isci_remote_device *idev,
640						 struct isci_request *ireq,
641						 enum sci_status status)
642{
643	struct isci_port *iport = idev->owning_port;
644
645	/* cleanup requests that failed after starting on the port */
646	if (status != SCI_SUCCESS)
647		sci_port_complete_io(iport, idev, ireq);
648	else {
649		kref_get(&idev->kref);
650		idev->started_request_count++;
651	}
652}
653
654enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
655						struct isci_remote_device *idev,
656						struct isci_request *ireq)
657{
658	struct sci_base_state_machine *sm = &idev->sm;
659	enum sci_remote_device_states state = sm->current_state_id;
660	struct isci_port *iport = idev->owning_port;
661	enum sci_status status;
662
663	switch (state) {
664	case SCI_DEV_INITIAL:
665	case SCI_DEV_STOPPED:
666	case SCI_DEV_STARTING:
667	case SCI_STP_DEV_NCQ_ERROR:
668	case SCI_DEV_STOPPING:
669	case SCI_DEV_FAILED:
670	case SCI_DEV_RESETTING:
671	case SCI_DEV_FINAL:
672	default:
673		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
674			 __func__, dev_state_name(state));
675		return SCI_FAILURE_INVALID_STATE;
676	case SCI_DEV_READY:
677		/* attempt to start an io request for this device object. The remote
678		 * device object will issue the start request for the io and if
679		 * successful it will start the request for the port object then
680		 * increment its own request count.
681		 */
682		status = sci_port_start_io(iport, idev, ireq);
683		if (status != SCI_SUCCESS)
684			return status;
685
686		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
687		if (status != SCI_SUCCESS)
688			break;
689
690		status = sci_request_start(ireq);
691		break;
692	case SCI_STP_DEV_IDLE: {
693		/* handle the start io operation for a sata device that is in
694		 * the command idle state. - Evalute the type of IO request to
695		 * be started - If its an NCQ request change to NCQ substate -
696		 * If its any other command change to the CMD substate
697		 *
698		 * If this is a softreset we may want to have a different
699		 * substate.
700		 */
701		enum sci_remote_device_states new_state;
702		struct sas_task *task = isci_request_access_task(ireq);
703
704		status = sci_port_start_io(iport, idev, ireq);
705		if (status != SCI_SUCCESS)
706			return status;
707
708		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
709		if (status != SCI_SUCCESS)
710			break;
711
712		status = sci_request_start(ireq);
713		if (status != SCI_SUCCESS)
714			break;
715
716		if (task->ata_task.use_ncq)
717			new_state = SCI_STP_DEV_NCQ;
718		else {
719			idev->working_request = ireq;
720			new_state = SCI_STP_DEV_CMD;
721		}
722		sci_change_state(sm, new_state);
723		break;
724	}
725	case SCI_STP_DEV_NCQ: {
726		struct sas_task *task = isci_request_access_task(ireq);
727
728		if (task->ata_task.use_ncq) {
729			status = sci_port_start_io(iport, idev, ireq);
730			if (status != SCI_SUCCESS)
731				return status;
732
733			status = sci_remote_node_context_start_io(&idev->rnc, ireq);
734			if (status != SCI_SUCCESS)
735				break;
736
737			status = sci_request_start(ireq);
738		} else
739			return SCI_FAILURE_INVALID_STATE;
740		break;
741	}
742	case SCI_STP_DEV_AWAIT_RESET:
743		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
744	case SCI_SMP_DEV_IDLE:
745		status = sci_port_start_io(iport, idev, ireq);
746		if (status != SCI_SUCCESS)
747			return status;
748
749		status = sci_remote_node_context_start_io(&idev->rnc, ireq);
750		if (status != SCI_SUCCESS)
751			break;
752
753		status = sci_request_start(ireq);
754		if (status != SCI_SUCCESS)
755			break;
756
757		idev->working_request = ireq;
758		sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
759		break;
760	case SCI_STP_DEV_CMD:
761	case SCI_SMP_DEV_CMD:
762		/* device is already handling a command it can not accept new commands
763		 * until this one is complete.
764		 */
765		return SCI_FAILURE_INVALID_STATE;
766	}
767
768	sci_remote_device_start_request(idev, ireq, status);
769	return status;
770}
771
772static enum sci_status common_complete_io(struct isci_port *iport,
773					  struct isci_remote_device *idev,
774					  struct isci_request *ireq)
775{
776	enum sci_status status;
777
778	status = sci_request_complete(ireq);
779	if (status != SCI_SUCCESS)
780		return status;
781
782	status = sci_port_complete_io(iport, idev, ireq);
783	if (status != SCI_SUCCESS)
784		return status;
785
786	sci_remote_device_decrement_request_count(idev);
787	return status;
788}
789
790enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
791						   struct isci_remote_device *idev,
792						   struct isci_request *ireq)
793{
794	struct sci_base_state_machine *sm = &idev->sm;
795	enum sci_remote_device_states state = sm->current_state_id;
796	struct isci_port *iport = idev->owning_port;
797	enum sci_status status;
798
799	switch (state) {
800	case SCI_DEV_INITIAL:
801	case SCI_DEV_STOPPED:
802	case SCI_DEV_STARTING:
803	case SCI_STP_DEV_IDLE:
804	case SCI_SMP_DEV_IDLE:
805	case SCI_DEV_FAILED:
806	case SCI_DEV_FINAL:
807	default:
808		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
809			 __func__, dev_state_name(state));
810		return SCI_FAILURE_INVALID_STATE;
811	case SCI_DEV_READY:
812	case SCI_STP_DEV_AWAIT_RESET:
813	case SCI_DEV_RESETTING:
814		status = common_complete_io(iport, idev, ireq);
815		break;
816	case SCI_STP_DEV_CMD:
817	case SCI_STP_DEV_NCQ:
818	case SCI_STP_DEV_NCQ_ERROR:
819	case SCI_STP_DEV_ATAPI_ERROR:
820		status = common_complete_io(iport, idev, ireq);
821		if (status != SCI_SUCCESS)
822			break;
823
824		if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
825			/* This request causes hardware error, device needs to be Lun Reset.
826			 * So here we force the state machine to IDLE state so the rest IOs
827			 * can reach RNC state handler, these IOs will be completed by RNC with
828			 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
829			 */
830			sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
831		} else if (idev->started_request_count == 0)
832			sci_change_state(sm, SCI_STP_DEV_IDLE);
833		break;
834	case SCI_SMP_DEV_CMD:
835		status = common_complete_io(iport, idev, ireq);
836		if (status != SCI_SUCCESS)
837			break;
838		sci_change_state(sm, SCI_SMP_DEV_IDLE);
839		break;
840	case SCI_DEV_STOPPING:
841		status = common_complete_io(iport, idev, ireq);
842		if (status != SCI_SUCCESS)
843			break;
844
845		if (idev->started_request_count == 0)
846			sci_remote_node_context_destruct(&idev->rnc,
847							 rnc_destruct_done,
848							 idev);
849		break;
850	}
851
852	if (status != SCI_SUCCESS)
853		dev_err(scirdev_to_dev(idev),
854			"%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
855			"could not complete\n", __func__, iport,
856			idev, ireq, status);
857	else
858		isci_put_device(idev);
859
860	return status;
861}
862
863static void sci_remote_device_continue_request(void *dev)
864{
865	struct isci_remote_device *idev = dev;
866
867	/* we need to check if this request is still valid to continue. */
868	if (idev->working_request)
869		sci_controller_continue_io(idev->working_request);
870}
871
872enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
873						  struct isci_remote_device *idev,
874						  struct isci_request *ireq)
875{
876	struct sci_base_state_machine *sm = &idev->sm;
877	enum sci_remote_device_states state = sm->current_state_id;
878	struct isci_port *iport = idev->owning_port;
879	enum sci_status status;
880
881	switch (state) {
882	case SCI_DEV_INITIAL:
883	case SCI_DEV_STOPPED:
884	case SCI_DEV_STARTING:
885	case SCI_SMP_DEV_IDLE:
886	case SCI_SMP_DEV_CMD:
887	case SCI_DEV_STOPPING:
888	case SCI_DEV_FAILED:
889	case SCI_DEV_RESETTING:
890	case SCI_DEV_FINAL:
891	default:
892		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
893			 __func__, dev_state_name(state));
894		return SCI_FAILURE_INVALID_STATE;
895	case SCI_STP_DEV_IDLE:
896	case SCI_STP_DEV_CMD:
897	case SCI_STP_DEV_NCQ:
898	case SCI_STP_DEV_NCQ_ERROR:
899	case SCI_STP_DEV_AWAIT_RESET:
900		status = sci_port_start_io(iport, idev, ireq);
901		if (status != SCI_SUCCESS)
902			return status;
903
904		status = sci_request_start(ireq);
905		if (status != SCI_SUCCESS)
906			goto out;
907
908		/* Note: If the remote device state is not IDLE this will
909		 * replace the request that probably resulted in the task
910		 * management request.
911		 */
912		idev->working_request = ireq;
913		sci_change_state(sm, SCI_STP_DEV_CMD);
914
915		/* The remote node context must cleanup the TCi to NCQ mapping
916		 * table.  The only way to do this correctly is to either write
917		 * to the TLCR register or to invalidate and repost the RNC. In
918		 * either case the remote node context state machine will take
919		 * the correct action when the remote node context is suspended
920		 * and later resumed.
921		 */
922		sci_remote_device_suspend(idev,
923					  SCI_SW_SUSPEND_LINKHANG_DETECT);
924
925		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
926				sci_remote_device_continue_request, idev);
927
928	out:
929		sci_remote_device_start_request(idev, ireq, status);
930		/* We need to let the controller start request handler know that
931		 * it can't post TC yet. We will provide a callback function to
932		 * post TC when RNC gets resumed.
933		 */
934		return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
935	case SCI_DEV_READY:
936		status = sci_port_start_io(iport, idev, ireq);
937		if (status != SCI_SUCCESS)
938			return status;
939
940		/* Resume the RNC as needed: */
941		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
942							    NULL, NULL);
943		if (status != SCI_SUCCESS)
944			break;
945
946		status = sci_request_start(ireq);
947		break;
948	}
949	sci_remote_device_start_request(idev, ireq, status);
950
951	return status;
952}
953
954void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
955{
956	struct isci_port *iport = idev->owning_port;
957	u32 context;
958
959	context = request |
960		  (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
961		  (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
962		  idev->rnc.remote_node_index;
963
964	sci_controller_post_request(iport->owning_controller, context);
965}
966
967/* called once the remote node context has transisitioned to a
968 * ready state.  This is the indication that the remote device object can also
969 * transition to ready.
970 */
971static void remote_device_resume_done(void *_dev)
972{
973	struct isci_remote_device *idev = _dev;
974
975	if (is_remote_device_ready(idev))
976		return;
977
978	/* go 'ready' if we are not already in a ready state */
979	sci_change_state(&idev->sm, SCI_DEV_READY);
980}
981
982static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
983{
984	struct isci_remote_device *idev = _dev;
985	struct isci_host *ihost = idev->owning_port->owning_controller;
986
987	/* For NCQ operation we do not issue a isci_remote_device_not_ready().
988	 * As a result, avoid sending the ready notification.
989	 */
990	if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
991		isci_remote_device_ready(ihost, idev);
992}
993
994static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
995{
996	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
997
998	/* Initial state is a transitional state to the stopped state */
999	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
1000}
1001
1002/**
1003 * sci_remote_device_destruct() - free remote node context and destruct
1004 * @idev: This parameter specifies the remote device to be destructed.
1005 *
1006 * Remote device objects are a limited resource.  As such, they must be
1007 * protected.  Thus calls to construct and destruct are mutually exclusive and
1008 * non-reentrant. The return value shall indicate if the device was
1009 * successfully destructed or if some failure occurred. enum sci_status This value
1010 * is returned if the device is successfully destructed.
1011 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
1012 * device isn't valid (e.g. it's already been destoryed, the handle isn't
1013 * valid, etc.).
1014 */
1015static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
1016{
1017	struct sci_base_state_machine *sm = &idev->sm;
1018	enum sci_remote_device_states state = sm->current_state_id;
1019	struct isci_host *ihost;
1020
1021	if (state != SCI_DEV_STOPPED) {
1022		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1023			 __func__, dev_state_name(state));
1024		return SCI_FAILURE_INVALID_STATE;
1025	}
1026
1027	ihost = idev->owning_port->owning_controller;
1028	sci_controller_free_remote_node_context(ihost, idev,
1029						     idev->rnc.remote_node_index);
1030	idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
1031	sci_change_state(sm, SCI_DEV_FINAL);
1032
1033	return SCI_SUCCESS;
1034}
1035
1036/**
1037 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
1038 * @ihost: This parameter specifies the isci host object.
1039 * @idev: This parameter specifies the remote device to be freed.
1040 *
1041 */
1042static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
1043{
1044	dev_dbg(&ihost->pdev->dev,
1045		"%s: isci_device = %p\n", __func__, idev);
1046
1047	/* There should not be any outstanding io's. All paths to
1048	 * here should go through isci_remote_device_nuke_requests.
1049	 * If we hit this condition, we will need a way to complete
1050	 * io requests in process */
1051	BUG_ON(idev->started_request_count > 0);
1052
1053	sci_remote_device_destruct(idev);
1054	list_del_init(&idev->node);
1055	isci_put_device(idev);
1056}
1057
1058static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
1059{
1060	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1061	struct isci_host *ihost = idev->owning_port->owning_controller;
1062	u32 prev_state;
1063
1064	/* If we are entering from the stopping state let the SCI User know that
1065	 * the stop operation has completed.
1066	 */
1067	prev_state = idev->sm.previous_state_id;
1068	if (prev_state == SCI_DEV_STOPPING)
1069		isci_remote_device_deconstruct(ihost, idev);
1070
1071	sci_controller_remote_device_stopped(ihost, idev);
1072}
1073
1074static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
1075{
1076	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1077	struct isci_host *ihost = idev->owning_port->owning_controller;
1078
1079	isci_remote_device_not_ready(ihost, idev,
1080				     SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
1081}
1082
1083static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
1084{
1085	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1086	struct isci_host *ihost = idev->owning_port->owning_controller;
1087	struct domain_device *dev = idev->domain_dev;
1088
1089	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
1090		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
1091	} else if (dev_is_expander(dev->dev_type)) {
1092		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
1093	} else
1094		isci_remote_device_ready(ihost, idev);
1095}
1096
1097static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
1098{
1099	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1100	struct domain_device *dev = idev->domain_dev;
1101
1102	if (dev->dev_type == SAS_END_DEVICE) {
1103		struct isci_host *ihost = idev->owning_port->owning_controller;
1104
1105		isci_remote_device_not_ready(ihost, idev,
1106					     SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
1107	}
1108}
1109
1110static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
1111{
1112	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1113	struct isci_host *ihost = idev->owning_port->owning_controller;
1114
1115	dev_dbg(&ihost->pdev->dev,
1116		"%s: isci_device = %p\n", __func__, idev);
1117
1118	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1119}
1120
1121static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
1122{
1123	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1124	struct isci_host *ihost = idev->owning_port->owning_controller;
1125
1126	dev_dbg(&ihost->pdev->dev,
1127		"%s: isci_device = %p\n", __func__, idev);
1128
1129	sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
1130}
1131
1132static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1133{
1134	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1135
1136	idev->working_request = NULL;
1137	if (sci_remote_node_context_is_ready(&idev->rnc)) {
1138		/*
1139		 * Since the RNC is ready, it's alright to finish completion
1140		 * processing (e.g. signal the remote device is ready). */
1141		sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
1142	} else {
1143		sci_remote_node_context_resume(&idev->rnc,
1144			sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
1145			idev);
1146	}
1147}
1148
1149static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1150{
1151	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1152	struct isci_host *ihost = idev->owning_port->owning_controller;
1153
1154	BUG_ON(idev->working_request == NULL);
1155
1156	isci_remote_device_not_ready(ihost, idev,
1157				     SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
1158}
1159
1160static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
1161{
1162	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1163	struct isci_host *ihost = idev->owning_port->owning_controller;
1164
1165	if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
1166		isci_remote_device_not_ready(ihost, idev,
1167					     idev->not_ready_reason);
1168}
1169
1170static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1171{
1172	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1173	struct isci_host *ihost = idev->owning_port->owning_controller;
1174
1175	isci_remote_device_ready(ihost, idev);
1176}
1177
1178static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1179{
1180	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1181	struct isci_host *ihost = idev->owning_port->owning_controller;
1182
1183	BUG_ON(idev->working_request == NULL);
1184
1185	isci_remote_device_not_ready(ihost, idev,
1186				     SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1187}
1188
1189static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1190{
1191	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1192
1193	idev->working_request = NULL;
1194}
1195
1196static const struct sci_base_state sci_remote_device_state_table[] = {
1197	[SCI_DEV_INITIAL] = {
1198		.enter_state = sci_remote_device_initial_state_enter,
1199	},
1200	[SCI_DEV_STOPPED] = {
1201		.enter_state = sci_remote_device_stopped_state_enter,
1202	},
1203	[SCI_DEV_STARTING] = {
1204		.enter_state = sci_remote_device_starting_state_enter,
1205	},
1206	[SCI_DEV_READY] = {
1207		.enter_state = sci_remote_device_ready_state_enter,
1208		.exit_state  = sci_remote_device_ready_state_exit
1209	},
1210	[SCI_STP_DEV_IDLE] = {
1211		.enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1212	},
1213	[SCI_STP_DEV_CMD] = {
1214		.enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1215	},
1216	[SCI_STP_DEV_NCQ] = { },
1217	[SCI_STP_DEV_NCQ_ERROR] = {
1218		.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1219	},
1220	[SCI_STP_DEV_ATAPI_ERROR] = { },
1221	[SCI_STP_DEV_AWAIT_RESET] = { },
1222	[SCI_SMP_DEV_IDLE] = {
1223		.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1224	},
1225	[SCI_SMP_DEV_CMD] = {
1226		.enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1227		.exit_state  = sci_smp_remote_device_ready_cmd_substate_exit,
1228	},
1229	[SCI_DEV_STOPPING] = { },
1230	[SCI_DEV_FAILED] = { },
1231	[SCI_DEV_RESETTING] = {
1232		.enter_state = sci_remote_device_resetting_state_enter,
1233		.exit_state  = sci_remote_device_resetting_state_exit
1234	},
1235	[SCI_DEV_FINAL] = { },
1236};
1237
1238/**
1239 * sci_remote_device_construct() - common construction
1240 * @iport: SAS/SATA port through which this device is accessed.
1241 * @idev: remote device to construct
1242 *
1243 * This routine just performs benign initialization and does not
1244 * allocate the remote_node_context which is left to
1245 * sci_remote_device_[de]a_construct().  sci_remote_device_destruct()
1246 * frees the remote_node_context(s) for the device.
1247 */
1248static void sci_remote_device_construct(struct isci_port *iport,
1249				  struct isci_remote_device *idev)
1250{
1251	idev->owning_port = iport;
1252	idev->started_request_count = 0;
1253
1254	sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1255
1256	sci_remote_node_context_construct(&idev->rnc,
1257					       SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1258}
1259
1260/*
1261 * sci_remote_device_da_construct() - construct direct attached device.
1262 *
1263 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1264 * the device is known to the SCI Core since it is contained in the
1265 * sci_phy object.  Remote node context(s) is/are a global resource
1266 * allocated by this routine, freed by sci_remote_device_destruct().
1267 *
1268 * Returns:
1269 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1270 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1271 * sata-only controller instance.
1272 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1273 */
1274static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1275						       struct isci_remote_device *idev)
1276{
1277	enum sci_status status;
1278	struct sci_port_properties properties;
1279
1280	sci_remote_device_construct(iport, idev);
1281
1282	sci_port_get_properties(iport, &properties);
1283	/* Get accurate port width from port's phy mask for a DA device. */
1284	idev->device_port_width = hweight32(properties.phy_mask);
1285
1286	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1287							     idev,
1288							     &idev->rnc.remote_node_index);
1289
1290	if (status != SCI_SUCCESS)
1291		return status;
1292
1293	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1294
1295	return SCI_SUCCESS;
1296}
1297
1298/*
1299 * sci_remote_device_ea_construct() - construct expander attached device
1300 *
1301 * Remote node context(s) is/are a global resource allocated by this
1302 * routine, freed by sci_remote_device_destruct().
1303 *
1304 * Returns:
1305 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1306 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1307 * sata-only controller instance.
1308 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1309 */
1310static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1311						       struct isci_remote_device *idev)
1312{
1313	struct domain_device *dev = idev->domain_dev;
1314	enum sci_status status;
1315
1316	sci_remote_device_construct(iport, idev);
1317
1318	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1319								  idev,
1320								  &idev->rnc.remote_node_index);
1321	if (status != SCI_SUCCESS)
1322		return status;
1323
1324	/* For SAS-2 the physical link rate is actually a logical link
1325	 * rate that incorporates multiplexing.  The SCU doesn't
1326	 * incorporate multiplexing and for the purposes of the
1327	 * connection the logical link rate is that same as the
1328	 * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
1329	 * one another, so this code works for both situations.
1330	 */
1331	idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1332					 dev->linkrate);
1333
1334	/* / @todo Should I assign the port width by reading all of the phys on the port? */
1335	idev->device_port_width = 1;
1336
1337	return SCI_SUCCESS;
1338}
1339
1340enum sci_status sci_remote_device_resume(
1341	struct isci_remote_device *idev,
1342	scics_sds_remote_node_context_callback cb_fn,
1343	void *cb_p)
1344{
1345	enum sci_status status;
1346
1347	status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
1348	if (status != SCI_SUCCESS)
1349		dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
1350			__func__, status);
1351	return status;
1352}
1353
1354static void isci_remote_device_resume_from_abort_complete(void *cbparam)
1355{
1356	struct isci_remote_device *idev = cbparam;
1357	struct isci_host *ihost = idev->owning_port->owning_controller;
1358	scics_sds_remote_node_context_callback abort_resume_cb =
1359		idev->abort_resume_cb;
1360
1361	dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
1362		__func__, abort_resume_cb);
1363
1364	if (abort_resume_cb != NULL) {
1365		idev->abort_resume_cb = NULL;
1366		abort_resume_cb(idev->abort_resume_cbparam);
1367	}
1368	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1369	wake_up(&ihost->eventq);
1370}
1371
1372static bool isci_remote_device_test_resume_done(
1373	struct isci_host *ihost,
1374	struct isci_remote_device *idev)
1375{
1376	unsigned long flags;
1377	bool done;
1378
1379	spin_lock_irqsave(&ihost->scic_lock, flags);
1380	done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
1381		|| test_bit(IDEV_STOP_PENDING, &idev->flags)
1382		|| sci_remote_node_context_is_being_destroyed(&idev->rnc);
1383	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1384
1385	return done;
1386}
1387
1388static void isci_remote_device_wait_for_resume_from_abort(
1389	struct isci_host *ihost,
1390	struct isci_remote_device *idev)
1391{
1392	dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
1393		 __func__, idev);
1394
1395	#define MAX_RESUME_MSECS 10000
1396	if (!wait_event_timeout(ihost->eventq,
1397				isci_remote_device_test_resume_done(ihost, idev),
1398				msecs_to_jiffies(MAX_RESUME_MSECS))) {
1399
1400		dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
1401			 "resume: %p\n", __func__, idev);
1402	}
1403	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1404
1405	dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
1406		 __func__, idev);
1407}
1408
1409enum sci_status isci_remote_device_resume_from_abort(
1410	struct isci_host *ihost,
1411	struct isci_remote_device *idev)
1412{
1413	unsigned long flags;
1414	enum sci_status status = SCI_SUCCESS;
1415	int destroyed;
1416
1417	spin_lock_irqsave(&ihost->scic_lock, flags);
1418	/* Preserve any current resume callbacks, for instance from other
1419	 * resumptions.
1420	 */
1421	idev->abort_resume_cb = idev->rnc.user_callback;
1422	idev->abort_resume_cbparam = idev->rnc.user_cookie;
1423	set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1424	clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1425	destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
1426	if (!destroyed)
1427		status = sci_remote_device_resume(
1428			idev, isci_remote_device_resume_from_abort_complete,
1429			idev);
1430	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1431	if (!destroyed && (status == SCI_SUCCESS))
1432		isci_remote_device_wait_for_resume_from_abort(ihost, idev);
1433	else
1434		clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
1435
1436	return status;
1437}
1438
1439/**
1440 * sci_remote_device_start() - This method will start the supplied remote
1441 *    device.  This method enables normal IO requests to flow through to the
1442 *    remote device.
1443 * @idev: This parameter specifies the device to be started.
1444 * @timeout: This parameter specifies the number of milliseconds in which the
1445 *    start operation should complete.
1446 *
1447 * An indication of whether the device was successfully started. SCI_SUCCESS
1448 * This value is returned if the device was successfully started.
1449 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1450 * the device when there have been no phys added to it.
1451 */
1452static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1453					       u32 timeout)
1454{
1455	struct sci_base_state_machine *sm = &idev->sm;
1456	enum sci_remote_device_states state = sm->current_state_id;
1457	enum sci_status status;
1458
1459	if (state != SCI_DEV_STOPPED) {
1460		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1461			 __func__, dev_state_name(state));
1462		return SCI_FAILURE_INVALID_STATE;
1463	}
1464
1465	status = sci_remote_device_resume(idev, remote_device_resume_done,
1466					  idev);
1467	if (status != SCI_SUCCESS)
1468		return status;
1469
1470	sci_change_state(sm, SCI_DEV_STARTING);
1471
1472	return SCI_SUCCESS;
1473}
1474
1475static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1476						    struct isci_remote_device *idev)
1477{
1478	struct isci_host *ihost = iport->isci_host;
1479	struct domain_device *dev = idev->domain_dev;
1480	enum sci_status status;
1481
1482	if (dev->parent && dev_is_expander(dev->parent->dev_type))
1483		status = sci_remote_device_ea_construct(iport, idev);
1484	else
1485		status = sci_remote_device_da_construct(iport, idev);
1486
1487	if (status != SCI_SUCCESS) {
1488		dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1489			__func__, status);
1490
1491		return status;
1492	}
1493
1494	/* start the device. */
1495	status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1496
1497	if (status != SCI_SUCCESS)
1498		dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1499			 status);
1500
1501	return status;
1502}
1503
1504/**
1505 * isci_remote_device_alloc()
1506 * This function builds the isci_remote_device when a libsas dev_found message
1507 *    is received.
1508 * @ihost: This parameter specifies the isci host object.
1509 * @iport: This parameter specifies the isci_port connected to this device.
1510 *
1511 * pointer to new isci_remote_device.
1512 */
1513static struct isci_remote_device *
1514isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1515{
1516	struct isci_remote_device *idev;
1517	int i;
1518
1519	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1520		idev = &ihost->devices[i];
1521		if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1522			break;
1523	}
1524
1525	if (i >= SCI_MAX_REMOTE_DEVICES) {
1526		dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1527		return NULL;
1528	}
1529	if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1530		return NULL;
1531
1532	return idev;
1533}
1534
1535void isci_remote_device_release(struct kref *kref)
1536{
1537	struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1538	struct isci_host *ihost = idev->isci_port->isci_host;
1539
1540	idev->domain_dev = NULL;
1541	idev->isci_port = NULL;
1542	clear_bit(IDEV_START_PENDING, &idev->flags);
1543	clear_bit(IDEV_STOP_PENDING, &idev->flags);
1544	clear_bit(IDEV_IO_READY, &idev->flags);
1545	clear_bit(IDEV_GONE, &idev->flags);
1546	smp_mb__before_atomic();
1547	clear_bit(IDEV_ALLOCATED, &idev->flags);
1548	wake_up(&ihost->eventq);
1549}
1550
1551/**
1552 * isci_remote_device_stop() - This function is called internally to stop the
1553 *    remote device.
1554 * @ihost: This parameter specifies the isci host object.
1555 * @idev: This parameter specifies the remote device.
1556 *
1557 * The status of the ihost request to stop.
1558 */
1559enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1560{
1561	enum sci_status status;
1562	unsigned long flags;
1563
1564	dev_dbg(&ihost->pdev->dev,
1565		"%s: isci_device = %p\n", __func__, idev);
1566
1567	spin_lock_irqsave(&ihost->scic_lock, flags);
1568	idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1569	set_bit(IDEV_GONE, &idev->flags);
1570
1571	set_bit(IDEV_STOP_PENDING, &idev->flags);
1572	status = sci_remote_device_stop(idev, 50);
1573	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1574
1575	/* Wait for the stop complete callback. */
1576	if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1577		/* nothing to wait for */;
1578	else
1579		wait_for_device_stop(ihost, idev);
1580
1581	dev_dbg(&ihost->pdev->dev,
1582		"%s: isci_device = %p, waiting done.\n", __func__, idev);
1583
1584	return status;
1585}
1586
1587/**
1588 * isci_remote_device_gone() - This function is called by libsas when a domain
1589 *    device is removed.
1590 * @dev: This parameter specifies the libsas domain device.
1591 */
1592void isci_remote_device_gone(struct domain_device *dev)
1593{
1594	struct isci_host *ihost = dev_to_ihost(dev);
1595	struct isci_remote_device *idev = dev->lldd_dev;
1596
1597	dev_dbg(&ihost->pdev->dev,
1598		"%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1599		__func__, dev, idev, idev->isci_port);
1600
1601	isci_remote_device_stop(ihost, idev);
1602}
1603
1604
1605/**
1606 * isci_remote_device_found() - This function is called by libsas when a remote
1607 *    device is discovered. A remote device object is created and started. the
1608 *    function then sleeps until the sci core device started message is
1609 *    received.
1610 * @dev: This parameter specifies the libsas domain device.
1611 *
1612 * status, zero indicates success.
1613 */
1614int isci_remote_device_found(struct domain_device *dev)
1615{
1616	struct isci_host *isci_host = dev_to_ihost(dev);
1617	struct isci_port *isci_port = dev->port->lldd_port;
1618	struct isci_remote_device *isci_device;
1619	enum sci_status status;
1620
1621	dev_dbg(&isci_host->pdev->dev,
1622		"%s: domain_device = %p\n", __func__, dev);
1623
1624	if (!isci_port)
1625		return -ENODEV;
1626
1627	isci_device = isci_remote_device_alloc(isci_host, isci_port);
1628	if (!isci_device)
1629		return -ENODEV;
1630
1631	kref_init(&isci_device->kref);
1632	INIT_LIST_HEAD(&isci_device->node);
1633
1634	spin_lock_irq(&isci_host->scic_lock);
1635	isci_device->domain_dev = dev;
1636	isci_device->isci_port = isci_port;
1637	list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1638
1639	set_bit(IDEV_START_PENDING, &isci_device->flags);
1640	status = isci_remote_device_construct(isci_port, isci_device);
1641
1642	dev_dbg(&isci_host->pdev->dev,
1643		"%s: isci_device = %p\n",
1644		__func__, isci_device);
1645
1646	if (status == SCI_SUCCESS) {
1647		/* device came up, advertise it to the world */
1648		dev->lldd_dev = isci_device;
1649	} else
1650		isci_put_device(isci_device);
1651	spin_unlock_irq(&isci_host->scic_lock);
1652
1653	/* wait for the device ready callback. */
1654	wait_for_device_start(isci_host, isci_device);
1655
1656	return status == SCI_SUCCESS ? 0 : -ENODEV;
1657}
1658
1659enum sci_status isci_remote_device_suspend_terminate(
1660	struct isci_host *ihost,
1661	struct isci_remote_device *idev,
1662	struct isci_request *ireq)
1663{
1664	unsigned long flags;
1665	enum sci_status status;
1666
1667	/* Put the device into suspension. */
1668	spin_lock_irqsave(&ihost->scic_lock, flags);
1669	set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
1670	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
1671	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1672
1673	/* Terminate and wait for the completions. */
1674	status = isci_remote_device_terminate_requests(ihost, idev, ireq);
1675	if (status != SCI_SUCCESS)
1676		dev_dbg(&ihost->pdev->dev,
1677			"%s: isci_remote_device_terminate_requests(%p) "
1678				"returned %d!\n",
1679			__func__, idev, status);
1680
1681	/* NOTE: RNC resumption is left to the caller! */
1682	return status;
1683}
1684
1685int isci_remote_device_is_safe_to_abort(
1686	struct isci_remote_device *idev)
1687{
1688	return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
1689}
1690
1691enum sci_status sci_remote_device_abort_requests_pending_abort(
1692	struct isci_remote_device *idev)
1693{
1694	return sci_remote_device_terminate_reqs_checkabort(idev, 1);
1695}
1696
1697enum sci_status isci_remote_device_reset_complete(
1698	struct isci_host *ihost,
1699	struct isci_remote_device *idev)
1700{
1701	unsigned long flags;
1702	enum sci_status status;
1703
1704	spin_lock_irqsave(&ihost->scic_lock, flags);
1705	status = sci_remote_device_reset_complete(idev);
1706	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1707
1708	return status;
1709}
1710
1711void isci_dev_set_hang_detection_timeout(
1712	struct isci_remote_device *idev,
1713	u32 timeout)
1714{
1715	if (dev_is_sata(idev->domain_dev)) {
1716		if (timeout) {
1717			if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
1718					     &idev->flags))
1719				return;  /* Already enabled. */
1720		} else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
1721					       &idev->flags))
1722			return;  /* Not enabled. */
1723
1724		sci_port_set_hang_detection_timeout(idev->owning_port,
1725						    timeout);
1726	}
1727}
1728