1/*	$NetBSD: aic7xxx_osm.c,v 1.43 2023/08/01 21:26:28 andvar Exp $	*/
2
3/*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37/*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41#include <sys/cdefs.h>
42__KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.43 2023/08/01 21:26:28 andvar Exp $");
43
44#include <dev/ic/aic7xxx_osm.h>
45#include <dev/ic/aic7xxx_inline.h>
46
47#ifndef AHC_TMODE_ENABLE
48#define AHC_TMODE_ENABLE 0
49#endif
50
51
52static void	ahc_action(struct scsipi_channel *chan,
53			   scsipi_adapter_req_t req, void *arg);
54static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
55				int nsegments);
56static int	ahc_poll(struct ahc_softc *ahc, int wait);
57static void	ahc_setup_data(struct ahc_softc *ahc,
58			       struct scsipi_xfer *xs, struct scb *scb);
59static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
60static int	ahc_ioctl(struct scsipi_channel *channel, u_long cmd,
61			  void *addr, int flag, struct proc *p);
62
63static bool	ahc_pmf_suspend(device_t, const pmf_qual_t *);
64static bool	ahc_pmf_resume(device_t, const pmf_qual_t *);
65static bool	ahc_pmf_shutdown(device_t, int);
66
67
68/*
69 * Attach all the sub-devices we can find
70 */
71int
72ahc_attach(struct ahc_softc *ahc)
73{
74	u_long 	s;
75	int i;
76	char ahc_info[256];
77
78	LIST_INIT(&ahc->pending_scbs);
79	for (i = 0; i < AHC_NUM_TARGETS; i++)
80		TAILQ_INIT(&ahc->untagged_queues[i]);
81
82	ahc_lock(ahc, &s);
83
84	ahc->sc_adapter.adapt_dev = ahc->sc_dev;
85	ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
86
87	ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1;
88	ahc->sc_adapter.adapt_max_periph = 16;
89
90	ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
91	ahc->sc_adapter.adapt_minphys = ahc_minphys;
92	ahc->sc_adapter.adapt_request = ahc_action;
93
94	ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
95	ahc->sc_channel.chan_bustype = &scsi_bustype;
96	ahc->sc_channel.chan_channel = 0;
97	ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
98	ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
99	ahc->sc_channel.chan_id = ahc->our_id;
100	ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
101
102	if (ahc->features & AHC_TWIN) {
103		ahc->sc_channel_b = ahc->sc_channel;
104		ahc->sc_channel_b.chan_id = ahc->our_id_b;
105		ahc->sc_channel_b.chan_channel = 1;
106	}
107
108	ahc_controller_info(ahc, ahc_info, sizeof(ahc_info));
109	printf("%s: %s\n", device_xname(ahc->sc_dev), ahc_info);
110
111	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
112		ahc->sc_child = config_found(ahc->sc_dev,
113		    &ahc->sc_channel, scsiprint, CFARGS_NONE);
114		if (ahc->features & AHC_TWIN)
115			ahc->sc_child_b = config_found(ahc->sc_dev,
116			    &ahc->sc_channel_b, scsiprint, CFARGS_NONE);
117	} else {
118		if (ahc->features & AHC_TWIN)
119			ahc->sc_child = config_found(ahc->sc_dev,
120			    &ahc->sc_channel_b, scsiprint, CFARGS_NONE);
121		ahc->sc_child_b = config_found(ahc->sc_dev,
122		    &ahc->sc_channel, scsiprint, CFARGS_NONE);
123	}
124
125	ahc_intr_enable(ahc, TRUE);
126
127	if (ahc->flags & AHC_RESET_BUS_A)
128		ahc_reset_channel(ahc, 'A', TRUE);
129	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
130		ahc_reset_channel(ahc, 'B', TRUE);
131
132	if (!pmf_device_register1(ahc->sc_dev,
133	    ahc_pmf_suspend, ahc_pmf_resume, ahc_pmf_shutdown))
134		aprint_error_dev(ahc->sc_dev,
135		    "couldn't establish power handler\n");
136
137	ahc_unlock(ahc, &s);
138	return (1);
139}
140
141/*
142 * XXX we should call the real suspend and resume functions here
143 *     but pmf(9) stuff on cardbus backend is untested yet
144 */
145
146static bool
147ahc_pmf_suspend(device_t dev, const pmf_qual_t *qual)
148{
149	struct ahc_softc *sc = device_private(dev);
150#if 0
151	return (ahc_suspend(sc) == 0);
152#else
153	ahc_shutdown(sc);
154	return true;
155#endif
156}
157
158static bool
159ahc_pmf_resume(device_t dev, const pmf_qual_t *qual)
160{
161#if 0
162	struct ahc_softc *sc = device_private(dev);
163
164	return (ahc_resume(sc) == 0);
165#else
166	return true;
167#endif
168}
169
170static bool
171ahc_pmf_shutdown(device_t dev, int howto)
172{
173	struct ahc_softc *sc = device_private(dev);
174
175	/* Disable all interrupt sources by resetting the controller */
176	ahc_shutdown(sc);
177
178	return true;
179}
180
181/*
182 * Catch an interrupt from the adapter
183 */
184void
185ahc_platform_intr(void *arg)
186{
187	struct	ahc_softc *ahc;
188
189	ahc = arg;
190	ahc_intr(ahc);
191}
192
193/*
194 * We have an scb which has been processed by the
195 * adaptor, now we look to see how the operation
196 * went.
197 */
198void
199ahc_done(struct ahc_softc *ahc, struct scb *scb)
200{
201	struct scsipi_xfer *xs;
202	struct scsipi_periph *periph;
203	u_long s;
204
205	xs = scb->xs;
206	periph = xs->xs_periph;
207	LIST_REMOVE(scb, pending_links);
208	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
209		struct scb_tailq *untagged_q;
210		int target_offset;
211
212		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
213		untagged_q = &ahc->untagged_queues[target_offset];
214		TAILQ_REMOVE(untagged_q, scb, links.tqe);
215		scb->flags &= ~SCB_UNTAGGEDQ;
216		ahc_run_untagged_queue(ahc, untagged_q);
217	}
218
219	callout_stop(&scb->xs->xs_callout);
220
221	if (xs->datalen) {
222		int op;
223
224		if (xs->xs_control & XS_CTL_DATA_IN)
225			op = BUS_DMASYNC_POSTREAD;
226		else
227			op = BUS_DMASYNC_POSTWRITE;
228		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
229				scb->dmamap->dm_mapsize, op);
230		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
231	}
232
233	/*
234	 * If the recovery SCB completes, we have to be
235	 * out of our timeout.
236	 */
237	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
238		struct	scb *list_scb;
239
240		/*
241		 * We were able to complete the command successfully,
242		 * so reinstate the timeouts for all other pending
243		 * commands.
244		 */
245		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
246			if (!(list_scb->xs->xs_control & XS_CTL_POLL)) {
247				callout_reset(&list_scb->xs->xs_callout,
248				    (list_scb->xs->timeout > 1000000) ?
249				    (list_scb->xs->timeout / 1000) * hz :
250				    (list_scb->xs->timeout * hz) / 1000,
251				    ahc_timeout, list_scb);
252			}
253		}
254
255		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
256		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
257			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
258		scsipi_printaddr(xs->xs_periph);
259		printf("%s: no longer in timeout, status = %x\n",
260		       ahc_name(ahc), xs->status);
261
262		scsipi_channel_thaw(&ahc->sc_channel, 1);
263		if (ahc->features & AHC_TWIN)
264			scsipi_channel_thaw(&ahc->sc_channel_b, 1);
265	}
266
267	/* Don't clobber any existing error state */
268	if (xs->error != XS_NOERROR) {
269	  /* Don't clobber any existing error state */
270	} else if ((scb->flags & SCB_SENSE) != 0) {
271		/*
272		 * We performed autosense retrieval.
273		 *
274		 * Zero any sense not transferred by the
275		 * device.  The SCSI spec mandates that any
276		 * untransferred data should be assumed to be
277		 * zero.  Complete the 'bounce' of sense information
278		 * through buffers accessible via bus-space by
279		 * copying it into the clients csio.
280		 */
281		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
282		memcpy(&xs->sense.scsi_sense,
283		       ahc_get_sense_buf(ahc, scb),
284		       sizeof(xs->sense.scsi_sense));
285		xs->error = XS_SENSE;
286	}
287	if (scb->flags & SCB_FREEZE_QUEUE) {
288		scsipi_periph_thaw(periph, 1);
289		scb->flags &= ~SCB_FREEZE_QUEUE;
290	}
291
292	ahc_lock(ahc, &s);
293	ahc_free_scb(ahc, scb);
294	ahc_unlock(ahc, &s);
295
296	scsipi_done(xs);
297}
298
299static int
300ahc_ioctl(struct scsipi_channel *channel, u_long cmd, void *addr,
301    int flag, struct proc *p)
302{
303	struct ahc_softc *ahc;
304	int s, ret = ENOTTY;
305
306	ahc = device_private(channel->chan_adapter->adapt_dev);
307
308	switch (cmd) {
309	case SCBUSIORESET:
310		s = splbio();
311		ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
312		    TRUE);
313		splx(s);
314		ret = 0;
315		break;
316	default:
317		break;
318	}
319
320	return ret;
321}
322
323static void
324ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
325{
326	struct ahc_softc *ahc;
327	int s;
328	struct ahc_initiator_tinfo *tinfo;
329	struct ahc_tmode_tstate *tstate;
330
331	ahc  = device_private(chan->chan_adapter->adapt_dev);
332
333	switch (req) {
334
335	case ADAPTER_REQ_RUN_XFER:
336	  {
337		struct scsipi_xfer *xs;
338		struct scsipi_periph *periph;
339		struct scb *scb;
340		struct hardware_scb *hscb;
341		u_int target_id;
342		u_int our_id;
343		u_long ss;
344
345		xs = arg;
346		periph = xs->xs_periph;
347
348		target_id = periph->periph_target;
349		our_id = ahc->our_id;
350
351		SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
352
353		/*
354		 * get an scb to use.
355		 */
356		ahc_lock(ahc, &ss);
357		if ((scb = ahc_get_scb(ahc)) == NULL) {
358			xs->error = XS_RESOURCE_SHORTAGE;
359			ahc_unlock(ahc, &ss);
360			scsipi_done(xs);
361			return;
362		}
363		ahc_unlock(ahc, &ss);
364
365		hscb = scb->hscb;
366
367		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
368		scb->xs = xs;
369
370		/*
371		 * Put all the arguments for the xfer in the scb
372		 */
373		hscb->control = 0;
374		hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
375		hscb->lun = periph->periph_lun;
376		if (xs->xs_control & XS_CTL_RESET) {
377			hscb->cdb_len = 0;
378			scb->flags |= SCB_DEVICE_RESET;
379			hscb->control |= MK_MESSAGE;
380			ahc_execute_scb(scb, NULL, 0);
381		}
382
383		ahc_setup_data(ahc, xs, scb);
384
385		break;
386	  }
387	case ADAPTER_REQ_GROW_RESOURCES:
388#ifdef AHC_DEBUG
389		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
390#endif
391		chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc);
392		if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
393			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
394		return;
395
396	case ADAPTER_REQ_SET_XFER_MODE:
397	    {
398		struct scsipi_xfer_mode *xm = arg;
399		struct ahc_devinfo devinfo;
400		int target_id, our_id, first;
401		u_int width;
402		char channel;
403		u_int ppr_options = 0, period, offset;
404		struct ahc_syncrate *syncrate;
405		uint16_t old_autoneg;
406
407		target_id = xm->xm_target;
408		our_id = chan->chan_id;
409		channel = (chan->chan_channel == 1) ? 'B' : 'A';
410		s = splbio();
411		tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
412		    &tstate);
413		ahc_compile_devinfo(&devinfo, our_id, target_id,
414		    0, channel, ROLE_INITIATOR);
415
416		old_autoneg = tstate->auto_negotiate;
417
418		/*
419		 * XXX since the period and offset are not provided here,
420		 * fake things by forcing a renegotiation using the user
421		 * settings if this is called for the first time (i.e.
422		 * during probe). Also, cap various values at the user
423		 * values, assuming that the user set it up that way.
424		 */
425		if (ahc->inited_target[target_id] == 0) {
426			period = tinfo->user.period;
427			offset = tinfo->user.offset;
428			ppr_options = tinfo->user.ppr_options;
429			width = tinfo->user.width;
430			tstate->tagenable |=
431			    (ahc->user_tagenable & devinfo.target_mask);
432			tstate->discenable |=
433			    (ahc->user_discenable & devinfo.target_mask);
434			ahc->inited_target[target_id] = 1;
435			first = 1;
436		} else
437			first = 0;
438
439		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
440			width = MSG_EXT_WDTR_BUS_16_BIT;
441		else
442			width = MSG_EXT_WDTR_BUS_8_BIT;
443
444		ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
445		if (width > tinfo->user.width)
446			width = tinfo->user.width;
447		ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
448
449		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
450			period = 0;
451			offset = 0;
452			ppr_options = 0;
453		}
454
455		if ((xm->xm_mode & PERIPH_CAP_DT) &&
456		    (ppr_options & MSG_EXT_PPR_DT_REQ))
457			ppr_options |= MSG_EXT_PPR_DT_REQ;
458		else
459			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
460		if ((tstate->discenable & devinfo.target_mask) == 0 ||
461		    (tstate->tagenable & devinfo.target_mask) == 0)
462			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
463
464		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
465		    (ahc->user_tagenable & devinfo.target_mask))
466			tstate->tagenable |= devinfo.target_mask;
467		else
468			tstate->tagenable &= ~devinfo.target_mask;
469
470		syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
471		    AHC_SYNCRATE_MAX);
472		ahc_validate_offset(ahc, NULL, syncrate, &offset,
473		    width, ROLE_UNKNOWN);
474
475		if (offset == 0) {
476			period = 0;
477			ppr_options = 0;
478		}
479
480		if (ppr_options != 0
481		    && tinfo->user.transport_version >= 3) {
482			tinfo->goal.transport_version =
483			    tinfo->user.transport_version;
484			tinfo->curr.transport_version =
485			    tinfo->user.transport_version;
486		}
487
488		ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
489		    ppr_options, AHC_TRANS_GOAL, FALSE);
490
491		/*
492		 * If this is the first request, and no negotiation is
493		 * needed, just confirm the state to the scsipi layer,
494		 * so that it can print a message.
495		 */
496		if (old_autoneg == tstate->auto_negotiate && first) {
497			xm->xm_mode = 0;
498			xm->xm_period = tinfo->curr.period;
499			xm->xm_offset = tinfo->curr.offset;
500			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
501				xm->xm_mode |= PERIPH_CAP_WIDE16;
502			if (tinfo->curr.period)
503				xm->xm_mode |= PERIPH_CAP_SYNC;
504			if (tstate->tagenable & devinfo.target_mask)
505				xm->xm_mode |= PERIPH_CAP_TQING;
506			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
507				xm->xm_mode |= PERIPH_CAP_DT;
508			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
509		}
510		splx(s);
511	    }
512	}
513
514	return;
515}
516
517static void
518ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
519{
520	struct	scb *scb;
521	struct scsipi_xfer *xs;
522	struct	ahc_softc *ahc;
523	struct	ahc_initiator_tinfo *tinfo;
524	struct	ahc_tmode_tstate *tstate;
525
526	u_int	mask;
527	u_long	s;
528
529	scb = (struct scb *)arg;
530	xs = scb->xs;
531	xs->error = 0;
532	xs->status = 0;
533	xs->xs_status = 0;
534	ahc = device_private(
535	    xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
536
537	if (nsegments != 0) {
538		struct ahc_dma_seg *sg;
539		bus_dma_segment_t *end_seg;
540		int op;
541
542		end_seg = dm_segs + nsegments;
543
544		/* Copy the segments into our SG list */
545		sg = scb->sg_list;
546		while (dm_segs < end_seg) {
547			uint32_t len;
548
549			sg->addr = ahc_htole32(dm_segs->ds_addr);
550			len = dm_segs->ds_len
551			    | ((dm_segs->ds_addr >> 8) & AHC_SG_HIGH_ADDR_MASK);
552			sg->len = ahc_htole32(len);
553			sg++;
554			dm_segs++;
555		}
556
557		/*
558		 * Note where to find the SG entries in bus space.
559		 * We also set the full residual flag which the
560		 * sequencer will clear as soon as a data transfer
561		 * occurs.
562		 */
563		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
564
565		if (xs->xs_control & XS_CTL_DATA_IN)
566			op = BUS_DMASYNC_PREREAD;
567		else
568			op = BUS_DMASYNC_PREWRITE;
569
570		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
571				scb->dmamap->dm_mapsize, op);
572
573		sg--;
574		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
575
576		/* Copy the first SG into the "current" data pointer area */
577		scb->hscb->dataptr = scb->sg_list->addr;
578		scb->hscb->datacnt = scb->sg_list->len;
579	} else {
580		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
581		scb->hscb->dataptr = 0;
582		scb->hscb->datacnt = 0;
583	}
584
585	scb->sg_count = nsegments;
586
587	ahc_lock(ahc, &s);
588
589	/*
590	 * Last time we need to check if this SCB needs to
591	 * be aborted.
592	 */
593	if (xs->xs_status & XS_STS_DONE) {
594		if (nsegments != 0)
595			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
596		ahc_free_scb(ahc, scb);
597		ahc_unlock(ahc, &s);
598		scsipi_done(xs);
599		return;
600	}
601
602	tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
603				    SCSIID_OUR_ID(scb->hscb->scsiid),
604				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
605				    &tstate);
606
607	mask = SCB_GET_TARGET_MASK(ahc, scb);
608	scb->hscb->scsirate = tinfo->scsirate;
609	scb->hscb->scsioffset = tinfo->curr.offset;
610
611	if ((tstate->ultraenb & mask) != 0)
612		scb->hscb->control |= ULTRAENB;
613
614	if ((tstate->discenable & mask) != 0)
615		scb->hscb->control |= DISCENB;
616
617	if (xs->xs_tag_type)
618		scb->hscb->control |= xs->xs_tag_type;
619
620#if 1	/* This looks like it makes sense at first, but it can loop */
621	if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
622	     && tinfo->goal.offset == 0
623	     && tinfo->goal.ppr_options == 0)) {
624		scb->flags |= SCB_NEGOTIATE;
625		scb->hscb->control |= MK_MESSAGE;
626	} else
627#endif
628	if ((tstate->auto_negotiate & mask) != 0) {
629		scb->flags |= SCB_AUTO_NEGOTIATE;
630		scb->hscb->control |= MK_MESSAGE;
631	}
632
633	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
634
635	if (!(xs->xs_control & XS_CTL_POLL)) {
636		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
637			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
638			      ahc_timeout, scb);
639	}
640
641	/*
642	 * We only allow one untagged transaction
643	 * per target in the initiator role unless
644	 * we are storing a full busy target *lun*
645	 * table in SCB space.
646	 */
647	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
648	    && (ahc->flags & AHC_SCB_BTT) == 0) {
649		struct scb_tailq *untagged_q;
650		int target_offset;
651
652		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
653		untagged_q = &(ahc->untagged_queues[target_offset]);
654		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
655		scb->flags |= SCB_UNTAGGEDQ;
656		if (TAILQ_FIRST(untagged_q) != scb) {
657			ahc_unlock(ahc, &s);
658			return;
659		}
660	}
661	scb->flags |= SCB_ACTIVE;
662
663	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
664		/* Define a mapping from our tag to the SCB. */
665		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
666		ahc_pause(ahc);
667		if ((ahc->flags & AHC_PAGESCBS) == 0)
668			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
669		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
670		ahc_unpause(ahc);
671	} else {
672		ahc_queue_scb(ahc, scb);
673	}
674
675	if (!(xs->xs_control & XS_CTL_POLL)) {
676		ahc_unlock(ahc, &s);
677		return;
678	}
679
680	/*
681	 * If we can't use interrupts, poll for completion
682	 */
683
684	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
685	do {
686		if (ahc_poll(ahc, xs->timeout)) {
687			if (!(xs->xs_control & XS_CTL_SILENT))
688				printf("cmd fail\n");
689			ahc_timeout(scb);
690			break;
691		}
692	} while (!(xs->xs_status & XS_STS_DONE));
693	ahc_unlock(ahc, &s);
694
695	return;
696}
697
698static int
699ahc_poll(struct ahc_softc *ahc, int wait)
700{
701	while (--wait) {
702		DELAY(1000);
703		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
704			break;
705	}
706
707	if (wait == 0) {
708		printf("%s: board is not responding\n", ahc_name(ahc));
709		return (EIO);
710	}
711
712	ahc_intr(ahc);
713	return (0);
714}
715
716static void
717ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
718	       struct scb *scb)
719{
720	struct hardware_scb *hscb;
721
722	hscb = scb->hscb;
723	xs->resid = xs->status = 0;
724
725	hscb->cdb_len = xs->cmdlen;
726	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
727		u_long s;
728
729		ahc_set_transaction_status(scb, CAM_REQ_INVALID);
730		ahc_lock(ahc, &s);
731		ahc_free_scb(ahc, scb);
732		ahc_unlock(ahc, &s);
733		scsipi_done(xs);
734		return;
735	}
736
737	if (hscb->cdb_len > 12) {
738		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
739		scb->flags |= SCB_CDB32_PTR;
740	} else {
741		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
742	}
743
744	/* Only use S/G if there is a transfer */
745	if (xs->datalen) {
746		int error;
747
748		error = bus_dmamap_load(ahc->parent_dmat,
749					scb->dmamap, xs->data,
750					xs->datalen, NULL,
751					((xs->xs_control & XS_CTL_NOSLEEP) ?
752					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
753					BUS_DMA_STREAMING |
754					((xs->xs_control & XS_CTL_DATA_IN) ?
755					 BUS_DMA_READ : BUS_DMA_WRITE));
756		if (error) {
757#ifdef AHC_DEBUG
758			printf("%s: in ahc_setup_data(): bus_dmamap_load() "
759			       "= %d\n",
760			       ahc_name(ahc), error);
761#endif
762			xs->error = XS_RESOURCE_SHORTAGE;
763			scsipi_done(xs);
764			return;
765		}
766		ahc_execute_scb(scb,
767				scb->dmamap->dm_segs,
768				scb->dmamap->dm_nsegs);
769	} else {
770		ahc_execute_scb(scb, NULL, 0);
771	}
772}
773
774static void
775ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
776
777	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
778		struct scb *list_scb;
779
780		scb->flags |= SCB_RECOVERY_SCB;
781
782		/*
783		 * Take all queued, but not sent SCBs out of the equation.
784		 * Also ensure that no new CCBs are queued to us while we
785		 * try to fix this problem.
786		 */
787		scsipi_channel_freeze(&ahc->sc_channel, 1);
788		if (ahc->features & AHC_TWIN)
789			scsipi_channel_freeze(&ahc->sc_channel_b, 1);
790
791		/*
792		 * Go through all of our pending SCBs and remove
793		 * any scheduled timeouts for them.  We will reschedule
794		 * them after we've successfully fixed this problem.
795		 */
796		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
797			callout_stop(&list_scb->xs->xs_callout);
798		}
799	}
800}
801
802void
803ahc_timeout(void *arg)
804{
805	struct	scb *scb;
806	struct	ahc_softc *ahc;
807	u_long	s;
808	int	found;
809	u_int	last_phase;
810	int	target;
811	int	lun;
812	int	i;
813	char	channel;
814
815	scb = arg;
816	ahc = scb->ahc_softc;
817
818	ahc_lock(ahc, &s);
819
820	ahc_pause_and_flushwork(ahc);
821
822	if ((scb->flags & SCB_ACTIVE) == 0) {
823		/* Previous timeout took care of me already */
824		printf("%s: Timedout SCB already complete. "
825		       "Interrupts may not be functioning.\n", ahc_name(ahc));
826		ahc_unpause(ahc);
827		ahc_unlock(ahc, &s);
828		return;
829	}
830
831	target = SCB_GET_TARGET(ahc, scb);
832	channel = SCB_GET_CHANNEL(ahc, scb);
833	lun = SCB_GET_LUN(scb);
834
835	ahc_print_path(ahc, scb);
836	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
837	ahc_dump_card_state(ahc);
838	last_phase = ahc_inb(ahc, LASTPHASE);
839	if (scb->sg_count > 0) {
840		for (i = 0; i < scb->sg_count; i++) {
841			printf("sg[%d] - Addr 0x%x : Length %d\n",
842			       i,
843			       scb->sg_list[i].addr,
844			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
845		}
846	}
847	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
848		/*
849		 * Been down this road before.
850		 * Do a full bus reset.
851		 */
852bus_reset:
853		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
854		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
855		printf("%s: Issued Channel %c Bus Reset. "
856		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
857	} else {
858		/*
859		 * If we are a target, transition to bus free and report
860		 * the timeout.
861		 *
862		 * The target/initiator that is holding up the bus may not
863		 * be the same as the one that triggered this timeout
864		 * (different commands have different timeout lengths).
865		 * If the bus is idle and we are acting as the initiator
866		 * for this request, queue a BDR message to the timed out
867		 * target.  Otherwise, if the timed out transaction is
868		 * active:
869		 *   Initiator transaction:
870		 *	Stuff the message buffer with a BDR message and assert
871		 *	ATN in the hopes that the target will let go of the bus
872		 *	and go to the mesgout phase.  If this fails, we'll
873		 *	get another timeout 2 seconds later which will attempt
874		 *	a bus reset.
875		 *
876		 *   Target transaction:
877		 *	Transition to BUS FREE and report the error.
878		 *	It's good to be the target!
879		 */
880		u_int active_scb_index;
881		u_int saved_scbptr;
882
883		saved_scbptr = ahc_inb(ahc, SCBPTR);
884		active_scb_index = ahc_inb(ahc, SCB_TAG);
885
886		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
887		  && (active_scb_index < ahc->scb_data->numscbs)) {
888			struct scb *active_scb;
889
890			/*
891			 * If the active SCB is not us, assume that
892			 * the active SCB has a longer timeout than
893			 * the timedout SCB, and wait for the active
894			 * SCB to timeout.
895			 */
896			active_scb = ahc_lookup_scb(ahc, active_scb_index);
897			if (active_scb != scb) {
898				uint64_t newtimeout;
899
900				ahc_print_path(ahc, scb);
901				printf("Other SCB Timeout%s",
902				       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
903				       ? " again\n" : "\n");
904				scb->flags |= SCB_OTHERTCL_TIMEOUT;
905				newtimeout = MAX(active_scb->xs->timeout,
906						 scb->xs->timeout);
907				callout_reset(&scb->xs->xs_callout,
908				    newtimeout > 1000000 ?
909				    (newtimeout / 1000) * hz :
910				    (newtimeout * hz) / 1000,
911				    ahc_timeout, scb);
912				ahc_unpause(ahc);
913				ahc_unlock(ahc, &s);
914				return;
915			}
916
917			/* It's us */
918			if ((scb->flags & SCB_TARGET_SCB) != 0) {
919
920				/*
921				 * Send back any queued up transactions
922				 * and properly record the error condition.
923				 */
924				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
925					       SCB_GET_CHANNEL(ahc, scb),
926					       SCB_GET_LUN(scb),
927					       scb->hscb->tag,
928					       ROLE_TARGET,
929					       CAM_CMD_TIMEOUT);
930
931				/* Will clear us from the bus */
932				ahc_restart(ahc);
933				ahc_unlock(ahc, &s);
934				return;
935			}
936
937			ahc_set_recoveryscb(ahc, active_scb);
938			ahc_outb(ahc, MSG_OUT, HOST_MSG);
939			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
940			ahc_print_path(ahc, active_scb);
941			printf("BDR message in message buffer\n");
942			active_scb->flags |= SCB_DEVICE_RESET;
943			callout_reset(&active_scb->xs->xs_callout,
944				      2 * hz, ahc_timeout, active_scb);
945			ahc_unpause(ahc);
946		} else {
947			int disconnected;
948
949			/* XXX Shouldn't panic.  Just punt instead? */
950			if ((scb->flags & SCB_TARGET_SCB) != 0)
951				panic("Timed-out target SCB but bus idle");
952
953			if (last_phase != P_BUSFREE
954			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
955				/* XXX What happened to the SCB? */
956				/* Hung target selection.  Goto busfree */
957				printf("%s: Hung target selection\n",
958				       ahc_name(ahc));
959				ahc_restart(ahc);
960				ahc_unlock(ahc, &s);
961				return;
962			}
963
964			if (ahc_search_qinfifo(ahc, target, channel, lun,
965					       scb->hscb->tag, ROLE_INITIATOR,
966					       /*status*/0, SEARCH_COUNT) > 0) {
967				disconnected = FALSE;
968			} else {
969				disconnected = TRUE;
970			}
971
972			if (disconnected) {
973
974				ahc_set_recoveryscb(ahc, scb);
975				/*
976				 * Actually re-queue this SCB in an attempt
977				 * to select the device before it reconnects.
978				 * In either case (selection or reselection),
979				 * we will now issue a target reset to the
980				 * timed-out device.
981				 *
982				 * Set the MK_MESSAGE control bit indicating
983				 * that we desire to send a message.  We
984				 * also set the disconnected flag since
985				 * in the paging case there is no guarantee
986				 * that our SCB control byte matches the
987				 * version on the card.  We don't want the
988				 * sequencer to abort the command thinking
989				 * an unsolicited reselection occurred.
990				 */
991				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
992				scb->flags |= SCB_DEVICE_RESET;
993
994				/*
995				 * Remove any cached copy of this SCB in the
996				 * disconnected list in preparation for the
997				 * queuing of our abort SCB.  We use the
998				 * same element in the SCB, SCB_NEXT, for
999				 * both the qinfifo and the disconnected list.
1000				 */
1001				ahc_search_disc_list(ahc, target, channel,
1002						     lun, scb->hscb->tag,
1003						     /*stop_on_first*/TRUE,
1004						     /*remove*/TRUE,
1005						     /*save_state*/FALSE);
1006
1007				/*
1008				 * In the non-paging case, the sequencer will
1009				 * never re-reference the in-core SCB.
1010				 * To make sure we are notified during
1011				 * reselection, set the MK_MESSAGE flag in
1012				 * the card's copy of the SCB.
1013				 */
1014				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1015					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1016					ahc_outb(ahc, SCB_CONTROL,
1017						 ahc_inb(ahc, SCB_CONTROL)
1018						| MK_MESSAGE);
1019				}
1020
1021				/*
1022				 * Clear out any entries in the QINFIFO first
1023				 * so we are the next SCB for this target
1024				 * to run.
1025				 */
1026				ahc_search_qinfifo(ahc,
1027						   SCB_GET_TARGET(ahc, scb),
1028						   channel, SCB_GET_LUN(scb),
1029						   SCB_LIST_NULL,
1030						   ROLE_INITIATOR,
1031						   CAM_REQUEUE_REQ,
1032						   SEARCH_COMPLETE);
1033				ahc_print_path(ahc, scb);
1034				printf("Queuing a BDR SCB\n");
1035				ahc_qinfifo_requeue_tail(ahc, scb);
1036				ahc_outb(ahc, SCBPTR, saved_scbptr);
1037				callout_reset(&scb->xs->xs_callout, 2 * hz,
1038					      ahc_timeout, scb);
1039				ahc_unpause(ahc);
1040			} else {
1041				/* Go "immediately" to the bus reset */
1042				/* This shouldn't happen */
1043				ahc_set_recoveryscb(ahc, scb);
1044				ahc_print_path(ahc, scb);
1045				printf("SCB %d: Immediate reset.  "
1046					"Flags = 0x%x\n", scb->hscb->tag,
1047					scb->flags);
1048				goto bus_reset;
1049			}
1050		}
1051	}
1052	ahc_unlock(ahc, &s);
1053}
1054
1055void
1056ahc_platform_set_tags(struct ahc_softc *ahc,
1057		      struct ahc_devinfo *devinfo, int enable)
1058{
1059	struct ahc_tmode_tstate *tstate;
1060
1061	ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1062			    devinfo->target, &tstate);
1063
1064	if (enable)
1065		tstate->tagenable |= devinfo->target_mask;
1066	else
1067		tstate->tagenable &= ~devinfo->target_mask;
1068}
1069
1070int
1071ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1072{
1073	if (sizeof(struct ahc_platform_data) == 0)
1074		return 0;
1075	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1076				    M_WAITOK);
1077	return (0);
1078}
1079
1080void
1081ahc_platform_free(struct ahc_softc *ahc)
1082{
1083	if (sizeof(struct ahc_platform_data) == 0)
1084		return;
1085	free(ahc->platform_data, M_DEVBUF);
1086}
1087
1088int
1089ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1090{
1091	return (0);
1092}
1093
1094int
1095ahc_detach(struct ahc_softc *ahc, int flags)
1096{
1097	int rv = 0;
1098
1099	ahc_intr_enable(ahc, FALSE);
1100	if (ahc->sc_child != NULL)
1101		rv = config_detach(ahc->sc_child, flags);
1102	if (rv == 0 && ahc->sc_child_b != NULL)
1103		rv = config_detach(ahc->sc_child_b, flags);
1104
1105	pmf_device_deregister(ahc->sc_dev);
1106	ahc_free(ahc);
1107
1108	return (rv);
1109}
1110
1111
1112void
1113ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1114	       ac_code code, void *opt_arg)
1115{
1116	struct ahc_tmode_tstate *tstate;
1117	struct ahc_initiator_tinfo *tinfo;
1118	struct ahc_devinfo devinfo;
1119	struct scsipi_channel *chan;
1120	struct scsipi_xfer_mode xm;
1121
1122	chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1123	switch (code) {
1124	case AC_TRANSFER_NEG:
1125		tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1126			    &tstate);
1127		ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1128		    channel, ROLE_UNKNOWN);
1129		/*
1130		 * Don't bother if negotiating. XXX?
1131		 */
1132		if (tinfo->curr.period != tinfo->goal.period
1133		    || tinfo->curr.width != tinfo->goal.width
1134		    || tinfo->curr.offset != tinfo->goal.offset
1135		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1136			break;
1137		xm.xm_target = target;
1138		xm.xm_mode = 0;
1139		xm.xm_period = tinfo->curr.period;
1140		xm.xm_offset = tinfo->curr.offset;
1141		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1142			xm.xm_mode |= PERIPH_CAP_WIDE16;
1143		if (tinfo->curr.period)
1144			xm.xm_mode |= PERIPH_CAP_SYNC;
1145		if (tstate->tagenable & devinfo.target_mask)
1146			xm.xm_mode |= PERIPH_CAP_TQING;
1147		if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1148			xm.xm_mode |= PERIPH_CAP_DT;
1149		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1150		break;
1151	case AC_BUS_RESET:
1152		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1153	case AC_SENT_BDR:
1154	default:
1155		break;
1156	}
1157}
1158