ghd.c revision 1709:39a1331cb1e3
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#include <sys/types.h>
30#include <sys/kmem.h>
31#include <sys/debug.h>
32#include <sys/scsi/scsi.h>
33
34#include "ghd.h"
35
36/* ghd_poll() function codes: */
37typedef enum {
38	GHD_POLL_REQUEST,	/* wait for a specific request */
39	GHD_POLL_DEVICE,	/* wait for a specific device to idle */
40	GHD_POLL_ALL		/* wait for the whole bus to idle */
41} gpoll_t;
42
43/*
44 * Local functions:
45 */
46static	gcmd_t	*ghd_doneq_get(ccc_t *cccp);
47static	void	 ghd_doneq_pollmode_enter(ccc_t *cccp);
48static	void	 ghd_doneq_pollmode_exit(ccc_t *cccp);
49static	uint_t	 ghd_doneq_process(caddr_t arg);
50static	void	 ghd_do_reset_notify_callbacks(ccc_t *cccp);
51
52static	uint_t	 ghd_dummy_intr(caddr_t arg);
53static	int	 ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime,
54			gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status);
55
56
57/*
58 * Local configuration variables
59 */
60
61ulong_t	ghd_tran_abort_timeout = 5;
62ulong_t	ghd_tran_abort_lun_timeout = 5;
63ulong_t	ghd_tran_reset_target_timeout = 5;
64ulong_t	ghd_tran_reset_bus_timeout = 5;
65
66static int
67ghd_doneq_init(ccc_t *cccp)
68{
69	ddi_iblock_cookie_t iblock;
70
71	L2_INIT(&cccp->ccc_doneq);
72	cccp->ccc_hba_pollmode = TRUE;
73
74	if (ddi_add_softintr(cccp->ccc_hba_dip, DDI_SOFTINT_LOW,
75	    &cccp->ccc_doneq_softid, &iblock, NULL,
76	    ghd_doneq_process, (caddr_t)cccp) != DDI_SUCCESS) {
77		GDBG_ERROR(("ghd_doneq_init: add softintr failed cccp 0x%p\n",
78		    (void *)cccp));
79		return (FALSE);
80	}
81
82	mutex_init(&cccp->ccc_doneq_mutex, NULL, MUTEX_DRIVER, iblock);
83	ghd_doneq_pollmode_exit(cccp);
84	return (TRUE);
85}
86
87/*
88 * ghd_complete():
89 *
90 *	The HBA driver calls this entry point when it's completely
91 *	done processing a request.
92 *
93 *	See the GHD_COMPLETE_INLINE() macro in ghd.h for the actual code.
94 */
95
96void
97ghd_complete(ccc_t *cccp, gcmd_t *gcmdp)
98{
99	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
100	GHD_COMPLETE_INLINE(cccp, gcmdp);
101}
102
103
104/*
105 * ghd_doneq_put_head():
106 *
107 *	Mark the request done and prepend it to the doneq.
108 *	See the GHD_DONEQ_PUT_HEAD_INLINE() macros in ghd.h for
109 *	the actual code.
110 */
111void
112ghd_doneq_put_head(ccc_t *cccp, gcmd_t *gcmdp)
113{
114	GHD_DONEQ_PUT_HEAD_INLINE(cccp, gcmdp)
115}
116
117/*
118 * ghd_doneq_put_tail():
119 *
120 *	Mark the request done and append it to the doneq.
121 *	See the GHD_DONEQ_PUT_TAIL_INLINE() macros in ghd.h for
122 *	the actual code.
123 */
124void
125ghd_doneq_put_tail(ccc_t *cccp, gcmd_t *gcmdp)
126{
127	GHD_DONEQ_PUT_TAIL_INLINE(cccp, gcmdp)
128}
129
130static gcmd_t	*
131ghd_doneq_get(ccc_t *cccp)
132{
133	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
134	gcmd_t	 *gcmdp;
135
136	mutex_enter(doneq_mutexp);
137	if ((gcmdp = L2_next(&cccp->ccc_doneq)) != NULL)
138		L2_delete(&gcmdp->cmd_q);
139	mutex_exit(doneq_mutexp);
140	return (gcmdp);
141}
142
143
144static void
145ghd_doneq_pollmode_enter(ccc_t *cccp)
146{
147	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
148
149	mutex_enter(doneq_mutexp);
150	cccp->ccc_hba_pollmode = TRUE;
151	mutex_exit(doneq_mutexp);
152}
153
154
155static void
156ghd_doneq_pollmode_exit(ccc_t *cccp)
157{
158	kmutex_t *doneq_mutexp = &cccp->ccc_doneq_mutex;
159
160	mutex_enter(doneq_mutexp);
161	cccp->ccc_hba_pollmode = FALSE;
162	mutex_exit(doneq_mutexp);
163
164	/* trigger software interrupt for the completion callbacks */
165	if (!L2_EMPTY(&cccp->ccc_doneq))
166		ddi_trigger_softintr(cccp->ccc_doneq_softid);
167}
168
169
170/* ***************************************************************** */
171
172/*
173 *
174 * ghd_doneq_process()
175 *
176 *	This function is called directly from the software interrupt
177 *	handler.
178 *
179 *	The doneq is protected by a separate mutex than the
180 *	HBA mutex in order to avoid mutex contention on MP systems.
181 *
182 */
183
184static uint_t
185ghd_doneq_process(caddr_t arg)
186{
187	ccc_t *cccp =	(ccc_t *)arg;
188	kmutex_t	*doneq_mutexp;
189	gcmd_t		*gcmdp;
190	int			rc = DDI_INTR_UNCLAIMED;
191
192	doneq_mutexp = &cccp->ccc_doneq_mutex;
193
194	for (;;) {
195		mutex_enter(doneq_mutexp);
196		/* skip if FLAG_NOINTR request in progress */
197		if (cccp->ccc_hba_pollmode)
198			break;
199		/* pop the first one from the done Q */
200		if ((gcmdp = L2_next(&cccp->ccc_doneq)) == NULL)
201			break;
202		L2_delete(&gcmdp->cmd_q);
203
204		if (gcmdp->cmd_flags & GCMDFLG_RESET_NOTIFY) {
205			/* special request; processed here and discarded */
206			ghd_do_reset_notify_callbacks(cccp);
207			ghd_gcmd_free(gcmdp);
208			mutex_exit(doneq_mutexp);
209			continue;
210		}
211
212		/*
213		 * drop the mutex since completion
214		 * function can re-enter the top half via
215		 * ghd_transport()
216		 */
217		mutex_exit(doneq_mutexp);
218		gcmdp->cmd_state = GCMD_STATE_IDLE;
219		(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, TRUE);
220#ifdef notyet
221		/* I don't think this is ever necessary */
222		rc = DDI_INTR_CLAIMED;
223#endif
224	}
225	mutex_exit(doneq_mutexp);
226	return (rc);
227}
228
229static void
230ghd_do_reset_notify_callbacks(ccc_t *cccp)
231{
232	ghd_reset_notify_list_t *rnp;
233	L2el_t *rnl = &cccp->ccc_reset_notify_list;
234
235	ASSERT(mutex_owned(&cccp->ccc_doneq_mutex));
236
237	/* lock the reset notify list while we operate on it */
238	mutex_enter(&cccp->ccc_reset_notify_mutex);
239
240	for (rnp = (ghd_reset_notify_list_t *)L2_next(rnl);
241	    rnp != NULL;
242	    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
243
244		/* don't call if HBA driver didn't set it */
245		if (cccp->ccc_hba_reset_notify_callback) {
246			(*cccp->ccc_hba_reset_notify_callback)(rnp->gtgtp,
247			    rnp->callback, rnp->arg);
248		}
249	}
250	mutex_exit(&cccp->ccc_reset_notify_mutex);
251}
252
253
254/* ***************************************************************** */
255
256
257
258/*
259 * Autovector Interrupt Entry Point
260 *
261 *	Dummy return to be used before mutexes has been initialized
262 *	guard against interrupts from drivers sharing the same irq line
263 */
264
265/*ARGSUSED*/
266static uint_t
267ghd_dummy_intr(caddr_t arg)
268{
269	return (DDI_INTR_UNCLAIMED);
270}
271
272
273/*
274 * ghd_register()
275 *
276 *	Do the usual interrupt handler setup stuff.
277 *
278 *	Also, set up three mutexes: the wait queue mutex, the HBA
279 *	mutex, and the done queue mutex. The permitted locking
280 *	orders are:
281 *
282 *		1. enter(waitq)
283 *		2. enter(activel)
284 *		3. enter(doneq)
285 *		4. enter(HBA) then enter(activel)
286 *		5. enter(HBA) then enter(doneq)
287 *		6. enter(HBA) then enter(waitq)
288 *		7. enter(waitq) then tryenter(HBA)
289 *
290 *	Note: cases 6 and 7 won't deadlock because case 7 is always
291 *	mutex_tryenter() call.
292 *
293 */
294
295
296int
297ghd_register(char *labelp,
298	ccc_t	*cccp,
299	dev_info_t *dip,
300	int	inumber,
301	void	*hba_handle,
302	int	(*ccballoc)(gtgt_t *, gcmd_t *, int, int, int, int),
303	void	(*ccbfree)(gcmd_t *),
304	void	(*sg_func)(gcmd_t *, ddi_dma_cookie_t *, int, int),
305	int	(*hba_start)(void *, gcmd_t *),
306	void    (*hba_complete)(void *, gcmd_t *, int),
307	uint_t	(*int_handler)(caddr_t),
308	int	(*get_status)(void *, void *),
309	void	(*process_intr)(void *, void *),
310	int	(*timeout_func)(void *, gcmd_t *, gtgt_t *, gact_t, int),
311	tmr_t	*tmrp,
312	void 	(*hba_reset_notify_callback)(gtgt_t *,
313			void (*)(caddr_t), caddr_t))
314{
315
316	cccp->ccc_label = labelp;
317	cccp->ccc_hba_dip = dip;
318	cccp->ccc_ccballoc = ccballoc;
319	cccp->ccc_ccbfree = ccbfree;
320	cccp->ccc_sg_func = sg_func;
321	cccp->ccc_hba_start = hba_start;
322	cccp->ccc_hba_complete = hba_complete;
323	cccp->ccc_process_intr = process_intr;
324	cccp->ccc_get_status = get_status;
325	cccp->ccc_hba_handle = hba_handle;
326	cccp->ccc_hba_reset_notify_callback = hba_reset_notify_callback;
327
328	/* initialize the HBA's list headers */
329	CCCP_INIT(cccp);
330
331	/*
332	 *	Establish initial dummy interrupt handler
333	 *	get iblock cookie to initialize mutexes used in the
334	 *	real interrupt handler
335	 */
336	if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
337	    ghd_dummy_intr, hba_handle) != DDI_SUCCESS) {
338		return (FALSE);
339	}
340	mutex_init(&cccp->ccc_hba_mutex, NULL, MUTEX_DRIVER, cccp->ccc_iblock);
341	ddi_remove_intr(dip, inumber, cccp->ccc_iblock);
342
343	/* Establish real interrupt handler */
344	if (ddi_add_intr(dip, inumber, &cccp->ccc_iblock, NULL,
345	    int_handler, (caddr_t)hba_handle) != DDI_SUCCESS) {
346		mutex_destroy(&cccp->ccc_hba_mutex);
347		return (FALSE);
348	}
349
350	mutex_init(&cccp->ccc_waitq_mutex, NULL,
351	    MUTEX_DRIVER, cccp->ccc_iblock);
352
353	mutex_init(&cccp->ccc_reset_notify_mutex, NULL,
354	    MUTEX_DRIVER, cccp->ccc_iblock);
355
356	if (ghd_timer_attach(cccp, tmrp, timeout_func) == FALSE) {
357		ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
358		mutex_destroy(&cccp->ccc_hba_mutex);
359		mutex_destroy(&cccp->ccc_waitq_mutex);
360		return (FALSE);
361	}
362
363	if (ghd_doneq_init(cccp)) {
364		return (TRUE);
365	}
366
367	ghd_timer_detach(cccp);
368	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
369	mutex_destroy(&cccp->ccc_hba_mutex);
370	mutex_destroy(&cccp->ccc_waitq_mutex);
371	return (FALSE);
372
373}
374
375
376void
377ghd_unregister(ccc_t *cccp)
378{
379	ghd_timer_detach(cccp);
380	ddi_remove_intr(cccp->ccc_hba_dip, 0, cccp->ccc_iblock);
381	ddi_remove_softintr(cccp->ccc_doneq_softid);
382	mutex_destroy(&cccp->ccc_hba_mutex);
383	mutex_destroy(&cccp->ccc_waitq_mutex);
384	mutex_destroy(&cccp->ccc_doneq_mutex);
385}
386
387
388
389int
390ghd_intr(ccc_t *cccp, void *intr_status)
391{
392	int (*statfunc)(void *, void *) = cccp->ccc_get_status;
393	void (*processfunc)(void *, void *) = cccp->ccc_process_intr;
394	kmutex_t *waitq_mutexp = &cccp->ccc_waitq_mutex;
395	kmutex_t *hba_mutexp = &cccp->ccc_hba_mutex;
396	void		  *handle = cccp->ccc_hba_handle;
397	int		   rc = DDI_INTR_UNCLAIMED;
398	int		   more;
399
400
401	mutex_enter(hba_mutexp);
402
403	GDBG_INTR(("ghd_intr(): cccp=0x%p status=0x%p\n",
404		cccp, intr_status));
405
406	for (;;) {
407		more = FALSE;
408
409		/* process the interrupt status */
410		while ((*statfunc)(handle, intr_status)) {
411			(*processfunc)(handle, intr_status);
412			rc = DDI_INTR_CLAIMED;
413			more = TRUE;
414		}
415		mutex_enter(waitq_mutexp);
416		if (ghd_waitq_process_and_mutex_hold(cccp)) {
417			ASSERT(mutex_owned(hba_mutexp));
418			mutex_exit(waitq_mutexp);
419			continue;
420		}
421		if (more) {
422			mutex_exit(waitq_mutexp);
423			continue;
424		}
425		GDBG_INTR(("ghd_intr(): done cccp=0x%p status=0x%p rc %d\n",
426			cccp, intr_status, rc));
427		/*
428		 * Release the mutexes in the opposite order that they
429		 * were acquired to prevent requests queued by
430		 * ghd_transport() from getting hung up in the wait queue.
431		 */
432		mutex_exit(hba_mutexp);
433		mutex_exit(waitq_mutexp);
434		return (rc);
435	}
436}
437
438static int
439ghd_poll(ccc_t	*cccp,
440	gpoll_t	 polltype,
441	ulong_t	 polltime,
442	gcmd_t	*poll_gcmdp,
443	gtgt_t	*gtgtp,
444	void	*intr_status)
445{
446	gcmd_t	*gcmdp;
447	L2el_t	 gcmd_hold_queue;
448	int	 got_it = FALSE;
449	clock_t	 start_lbolt;
450	clock_t	 current_lbolt;
451
452
453	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
454	L2_INIT(&gcmd_hold_queue);
455
456	/* Que hora es? */
457	start_lbolt = ddi_get_lbolt();
458
459	/* unqueue and save all CMD/CCBs until I find the right one */
460	while (!got_it) {
461
462		/* Give up yet? */
463		current_lbolt = ddi_get_lbolt();
464		if (polltime && (current_lbolt - start_lbolt >= polltime))
465			break;
466
467		/*
468		 * delay 1 msec each time around the loop (this is an
469		 * arbitrary delay value, any value should work) except
470		 * zero because some devices don't like being polled too
471		 * fast and it saturates the bus on an MP system.
472		 */
473		drv_usecwait(1000);
474
475		/*
476		 * check for any new device status
477		 */
478		if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status))
479			(*cccp->ccc_process_intr)(cccp->ccc_hba_handle,
480			    intr_status);
481
482		/*
483		 * If something completed then try to start the
484		 * next request from the wait queue. Don't release
485		 * the HBA mutex because I don't know whether my
486		 * request(s) is/are on the done queue yet.
487		 */
488		mutex_enter(&cccp->ccc_waitq_mutex);
489		(void) ghd_waitq_process_and_mutex_hold(cccp);
490		mutex_exit(&cccp->ccc_waitq_mutex);
491
492		/*
493		 * Process the first of any timed-out requests.
494		 */
495		ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE);
496
497		/*
498		 * Unqueue all the completed requests, look for mine
499		 */
500		while (gcmdp = ghd_doneq_get(cccp)) {
501			/*
502			 * If we got one and it's my request, then
503			 * we're done.
504			 */
505			if (gcmdp == poll_gcmdp) {
506				poll_gcmdp->cmd_state = GCMD_STATE_IDLE;
507				got_it = TRUE;
508				continue;
509			}
510			/* fifo queue the other cmds on my local list */
511			L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp);
512		}
513
514
515		/*
516		 * Check whether we're done yet.
517		 */
518		switch (polltype) {
519		case GHD_POLL_DEVICE:
520			/*
521			 * wait for everything queued on a specific device
522			 */
523			if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0)
524				got_it = TRUE;
525			break;
526
527		case GHD_POLL_ALL:
528			/*
529			 * if waiting for all outstanding requests and
530			 * if active list is now empty then exit
531			 */
532			if (GHBA_NACTIVE(cccp) == 0)
533				got_it = TRUE;
534			break;
535
536		case GHD_POLL_REQUEST:
537			break;
538
539		}
540	}
541
542	if (L2_EMPTY(&gcmd_hold_queue)) {
543		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
544		ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
545		return (got_it);
546	}
547
548	/*
549	 * copy the local gcmd_hold_queue back to the doneq so
550	 * that the order of completion callbacks is preserved
551	 */
552	while (gcmdp = L2_next(&gcmd_hold_queue)) {
553		L2_delete(&gcmdp->cmd_q);
554		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
555	}
556
557	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
558	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
559	return (got_it);
560}
561
562
563/*
564 * ghd_tran_abort()
565 *
566 *	Abort specific command on a target.
567 *
568 */
569
570int
571ghd_tran_abort(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, void *intr_status)
572{
573	gact_t	 action;
574	int	 rc;
575
576	/*
577	 * call the driver's abort_cmd function
578	 */
579
580	mutex_enter(&cccp->ccc_hba_mutex);
581	ghd_doneq_pollmode_enter(cccp);
582
583	switch (gcmdp->cmd_state) {
584	case GCMD_STATE_WAITQ:
585		/* not yet started */
586		action = GACTION_EARLY_ABORT;
587		break;
588
589	case GCMD_STATE_ACTIVE:
590		/* in progress */
591		action = GACTION_ABORT_CMD;
592		break;
593
594	default:
595		/* everything else, probably already being aborted */
596		rc = FALSE;
597		goto exit;
598	}
599
600	/* stop the timer and remove it from the active list */
601	GHD_TIMER_STOP(cccp, gcmdp);
602
603	/* start a new timer and send out the abort command */
604	ghd_timer_newstate(cccp, gcmdp, gtgtp, action, GHD_TGTREQ);
605
606	/* wait for the abort to complete */
607	if (rc = ghd_poll(cccp, GHD_POLL_REQUEST, ghd_tran_abort_timeout,
608	    gcmdp, gtgtp, intr_status)) {
609		gcmdp->cmd_state = GCMD_STATE_DONEQ;
610		GHD_DONEQ_PUT_TAIL(cccp, gcmdp);
611	}
612
613exit:
614	ghd_doneq_pollmode_exit(cccp);
615
616	mutex_enter(&cccp->ccc_waitq_mutex);
617	ghd_waitq_process_and_mutex_exit(cccp);
618
619	return (rc);
620}
621
622
623/*
624 * ghd_tran_abort_lun()
625 *
626 *	Abort all commands on a specific target.
627 *
628 */
629
630int
631ghd_tran_abort_lun(ccc_t *cccp,	gtgt_t *gtgtp, void *intr_status)
632{
633	int	 rc;
634
635	/*
636	 * call the HBA driver's abort_device function
637	 */
638
639	mutex_enter(&cccp->ccc_hba_mutex);
640	ghd_doneq_pollmode_enter(cccp);
641
642	/* send out the abort device request */
643	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_ABORT_DEV, GHD_TGTREQ);
644
645	/* wait for the device to go idle */
646	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_abort_lun_timeout,
647		NULL, gtgtp, intr_status);
648
649	ghd_doneq_pollmode_exit(cccp);
650
651	mutex_enter(&cccp->ccc_waitq_mutex);
652	ghd_waitq_process_and_mutex_exit(cccp);
653
654	return (rc);
655}
656
657
658
659/*
660 * ghd_tran_reset_target()
661 *
662 *	reset the target device
663 *
664 *
665 */
666
667int
668ghd_tran_reset_target(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
669{
670	int rc = TRUE;
671
672
673	mutex_enter(&cccp->ccc_hba_mutex);
674	ghd_doneq_pollmode_enter(cccp);
675
676	/* send out the device reset request */
677	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_TARGET, GHD_TGTREQ);
678
679	/* wait for the device to reset */
680	rc = ghd_poll(cccp, GHD_POLL_DEVICE, ghd_tran_reset_target_timeout,
681		NULL, gtgtp, intr_status);
682
683	ghd_doneq_pollmode_exit(cccp);
684
685	mutex_enter(&cccp->ccc_waitq_mutex);
686	ghd_waitq_process_and_mutex_exit(cccp);
687
688	return (rc);
689}
690
691
692
693/*
694 * ghd_tran_reset_bus()
695 *
696 *	reset the scsi bus
697 *
698 */
699
700int
701ghd_tran_reset_bus(ccc_t *cccp, gtgt_t *gtgtp, void *intr_status)
702{
703	int	rc;
704
705	mutex_enter(&cccp->ccc_hba_mutex);
706	ghd_doneq_pollmode_enter(cccp);
707
708	/* send out the bus reset request */
709	ghd_timer_newstate(cccp, NULL, gtgtp, GACTION_RESET_BUS, GHD_TGTREQ);
710
711	/*
712	 * Wait for all active requests on this HBA to complete
713	 */
714	rc = ghd_poll(cccp, GHD_POLL_ALL, ghd_tran_reset_bus_timeout,
715		NULL, NULL, intr_status);
716
717
718	ghd_doneq_pollmode_exit(cccp);
719
720	mutex_enter(&cccp->ccc_waitq_mutex);
721	ghd_waitq_process_and_mutex_exit(cccp);
722
723	return (rc);
724}
725
726
727int
728ghd_transport(ccc_t	*cccp,
729		gcmd_t	*gcmdp,
730		gtgt_t	*gtgtp,
731		ulong_t	 timeout,
732		int	 polled,
733		void	*intr_status)
734{
735	gdev_t	*gdevp = gtgtp->gt_gdevp;
736
737	ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
738	ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
739
740	if (polled) {
741		/*
742		 * Grab the HBA mutex so no other requests are started
743		 * until after this one completes.
744		 */
745		mutex_enter(&cccp->ccc_hba_mutex);
746
747		GDBG_START(("ghd_transport: polled"
748			" cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
749				cccp, gdevp, gtgtp, gcmdp));
750
751		/*
752		 * Lock the doneq so no other thread flushes the Q.
753		 */
754		ghd_doneq_pollmode_enter(cccp);
755	}
756#if defined(GHD_DEBUG) || defined(__lint)
757	else {
758		GDBG_START(("ghd_transport: non-polled"
759			" cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n",
760				cccp, gdevp, gtgtp, gcmdp));
761	}
762#endif
763	/*
764	 * add this request to the tail of the waitq
765	 */
766	gcmdp->cmd_waitq_level = 1;
767	mutex_enter(&cccp->ccc_waitq_mutex);
768	L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp);
769
770	/*
771	 * Add this request to the packet timer active list and start its
772	 * abort timer.
773	 */
774	gcmdp->cmd_state = GCMD_STATE_WAITQ;
775	ghd_timer_start(cccp, gcmdp, timeout);
776
777
778	/*
779	 * Check the device wait queue throttle and perhaps move
780	 * some requests to the end of the HBA wait queue.
781	 */
782	ghd_waitq_shuffle_up(cccp, gdevp);
783
784	if (!polled) {
785		/*
786		 * See if the HBA mutex is available but use the
787		 * tryenter so I don't deadlock.
788		 */
789		if (!mutex_tryenter(&cccp->ccc_hba_mutex)) {
790			/* The HBA mutex isn't available */
791			GDBG_START(("ghd_transport: !mutex cccp 0x%p\n", cccp));
792			mutex_exit(&cccp->ccc_waitq_mutex);
793			return (TRAN_ACCEPT);
794		}
795		GDBG_START(("ghd_transport: got mutex cccp 0x%p\n", cccp));
796
797		/*
798		 * start as many requests as possible from the head
799		 * of the HBA wait queue
800		 */
801
802		ghd_waitq_process_and_mutex_exit(cccp);
803
804		ASSERT(!mutex_owned(&cccp->ccc_hba_mutex));
805		ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex));
806
807		return (TRAN_ACCEPT);
808	}
809
810
811	/*
812	 * If polled mode (FLAG_NOINTR specified in scsi_pkt flags),
813	 * then ghd_poll() waits until the request completes or times out
814	 * before returning.
815	 */
816
817	mutex_exit(&cccp->ccc_waitq_mutex);
818	(void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status);
819	ghd_doneq_pollmode_exit(cccp);
820
821	mutex_enter(&cccp->ccc_waitq_mutex);
822	ghd_waitq_process_and_mutex_exit(cccp);
823
824	/* call HBA's completion function but don't do callback to target */
825	(*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE);
826
827	GDBG_START(("ghd_transport: polled done cccp 0x%p\n", cccp));
828	return (TRAN_ACCEPT);
829}
830
831int ghd_reset_notify(ccc_t 	*cccp,
832			gtgt_t *gtgtp,
833			int 	flag,
834			void 	(*callback)(caddr_t),
835			caddr_t arg)
836{
837	ghd_reset_notify_list_t *rnp;
838	int rc = FALSE;
839
840	switch (flag) {
841
842	case SCSI_RESET_NOTIFY:
843
844		rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp),
845		    KM_SLEEP);
846		rnp->gtgtp = gtgtp;
847		rnp->callback = callback;
848		rnp->arg = arg;
849
850		mutex_enter(&cccp->ccc_reset_notify_mutex);
851		L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link,
852		    (void *)rnp);
853		mutex_exit(&cccp->ccc_reset_notify_mutex);
854
855		rc = TRUE;
856
857		break;
858
859	case SCSI_RESET_CANCEL:
860
861		mutex_enter(&cccp->ccc_reset_notify_mutex);
862		for (rnp = (ghd_reset_notify_list_t *)
863			L2_next(&cccp->ccc_reset_notify_list);
864		    rnp != NULL;
865		    rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) {
866			if (rnp->gtgtp == gtgtp &&
867			    rnp->callback == callback &&
868			    rnp->arg == arg) {
869				L2_delete(&rnp->l2_link);
870				kmem_free(rnp, sizeof (*rnp));
871				rc = TRUE;
872			}
873		}
874		mutex_exit(&cccp->ccc_reset_notify_mutex);
875		break;
876
877	default:
878		rc = FALSE;
879		break;
880	}
881
882	return (rc);
883}
884
885/*
886 * freeze the HBA waitq output (see ghd_waitq_process_and_mutex_hold),
887 * presumably because of a SCSI reset, for delay milliseconds.
888 */
889
890void
891ghd_freeze_waitq(ccc_t *cccp, int delay)
892{
893	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
894
895	/* freeze the waitq for delay milliseconds */
896
897	mutex_enter(&cccp->ccc_waitq_mutex);
898	cccp->ccc_waitq_freezetime = ddi_get_lbolt();
899	cccp->ccc_waitq_freezedelay = delay;
900	cccp->ccc_waitq_frozen = 1;
901	mutex_exit(&cccp->ccc_waitq_mutex);
902}
903
904void
905ghd_queue_hold(ccc_t *cccp)
906{
907	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
908
909	mutex_enter(&cccp->ccc_waitq_mutex);
910	cccp->ccc_waitq_held = 1;
911	mutex_exit(&cccp->ccc_waitq_mutex);
912}
913
914void
915ghd_queue_unhold(ccc_t *cccp)
916{
917	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
918
919	mutex_enter(&cccp->ccc_waitq_mutex);
920	cccp->ccc_waitq_held = 0;
921	mutex_exit(&cccp->ccc_waitq_mutex);
922}
923
924
925
926/*
927 * Trigger previously-registered reset notifications
928 */
929
930void
931ghd_trigger_reset_notify(ccc_t *cccp)
932{
933	gcmd_t *gcmdp;
934
935	ASSERT(mutex_owned(&cccp->ccc_hba_mutex));
936
937	/* create magic doneq entry */
938
939	gcmdp = ghd_gcmd_alloc((gtgt_t *)NULL, 0, TRUE);
940	gcmdp->cmd_flags = GCMDFLG_RESET_NOTIFY;
941
942	/* put at head of doneq so it's processed ASAP */
943
944	GHD_DONEQ_PUT_HEAD(cccp, gcmdp);
945}
946