1/*	$NetBSD: aic79xx_osm.c,v 1.36 2021/08/07 16:19:12 thorpej Exp $	*/
2
3/*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions, and the following disclaimer,
15 *    without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38/*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.36 2021/08/07 16:19:12 thorpej Exp $");
45
46#include <dev/ic/aic79xx_osm.h>
47#include <dev/ic/aic79xx_inline.h>
48
49#ifndef AHD_TMODE_ENABLE
50#define AHD_TMODE_ENABLE 0
51#endif
52
53static int	ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
54			  void *addr, int flag, struct proc *p);
55static void	ahd_action(struct scsipi_channel *chan,
56			   scsipi_adapter_req_t req, void *arg);
57static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
58				int nsegments);
59static int	ahd_poll(struct ahd_softc *ahd, int wait);
60static void	ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
61			       struct scb *scb);
62
63#if NOT_YET
64static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
65#endif
66
67static bool	ahd_pmf_suspend(device_t, const pmf_qual_t *);
68static bool	ahd_pmf_resume(device_t, const pmf_qual_t *);
69static bool	ahd_pmf_shutdown(device_t, int);
70
71/*
72 * Attach all the sub-devices we can find
73 */
74int
75ahd_attach(struct ahd_softc *ahd)
76{
77	int	s;
78	char	ahd_info[256];
79
80	ahd_controller_info(ahd, ahd_info, sizeof(ahd_info));
81	aprint_normal("%s: %s\n", ahd_name(ahd), ahd_info);
82
83	ahd_lock(ahd, &s);
84
85	ahd->sc_adapter.adapt_dev = ahd->sc_dev;
86	ahd->sc_adapter.adapt_nchannels = 1;
87
88	ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1;
89	ahd->sc_adapter.adapt_max_periph = 32;
90
91	ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
92	ahd->sc_adapter.adapt_minphys = ahd_minphys;
93	ahd->sc_adapter.adapt_request = ahd_action;
94
95	ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
96	ahd->sc_channel.chan_bustype = &scsi_bustype;
97	ahd->sc_channel.chan_channel = 0;
98	ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
99	ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
100	ahd->sc_channel.chan_id = ahd->our_id;
101	ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
102
103	ahd->sc_child = config_found(ahd->sc_dev, &ahd->sc_channel, scsiprint,
104	    CFARGS_NONE);
105
106	ahd_intr_enable(ahd, TRUE);
107
108	if (ahd->flags & AHD_RESET_BUS_A)
109		ahd_reset_channel(ahd, 'A', TRUE);
110
111	if (!pmf_device_register1(ahd->sc_dev,
112	    ahd_pmf_suspend, ahd_pmf_resume, ahd_pmf_shutdown))
113		aprint_error_dev(ahd->sc_dev,
114		    "couldn't establish power handler\n");
115
116	ahd_unlock(ahd, &s);
117
118	return (1);
119}
120
121static bool
122ahd_pmf_suspend(device_t dev, const pmf_qual_t *qual)
123{
124	struct ahd_softc *sc = device_private(dev);
125#if 0
126	return (ahd_suspend(sc) == 0);
127#else
128	ahd_shutdown(sc);
129	return true;
130#endif
131}
132
133static bool
134ahd_pmf_resume(device_t dev, const pmf_qual_t *qual)
135{
136#if 0
137	struct ahd_softc *sc = device_private(dev);
138
139	return (ahd_resume(sc) == 0);
140#else
141	return true;
142#endif
143}
144
145static bool
146ahd_pmf_shutdown(device_t dev, int howto)
147{
148	struct ahd_softc *sc = device_private(dev);
149
150	/* Disable all interrupt sources by resetting the controller */
151	ahd_shutdown(sc);
152
153	return true;
154}
155
156static int
157ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
158	  void *addr, int flag, struct proc *p)
159{
160	struct ahd_softc *ahd;
161	int s, ret = ENOTTY;
162
163	ahd = device_private(channel->chan_adapter->adapt_dev);
164
165	switch (cmd) {
166	case SCBUSIORESET:
167		s = splbio();
168		ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
169		splx(s);
170		ret = 0;
171		break;
172	default:
173		break;
174	}
175
176	return ret;
177}
178
179/*
180 * Catch an interrupt from the adapter
181 */
182void
183ahd_platform_intr(void *arg)
184{
185	struct	ahd_softc *ahd;
186
187	ahd = arg;
188
189	printf("%s; ahd_platform_intr\n", ahd_name(ahd));
190
191	ahd_intr(ahd);
192}
193
194/*
195 * We have an scb which has been processed by the
196 * adaptor, now we look to see how the operation * went.
197 */
198void
199ahd_done(struct ahd_softc *ahd, struct scb *scb)
200{
201	struct scsipi_xfer	*xs;
202	struct scsipi_periph	*periph;
203	int			s;
204
205	LIST_REMOVE(scb, pending_links);
206
207	xs = scb->xs;
208	periph = xs->xs_periph;
209
210	callout_stop(&scb->xs->xs_callout);
211
212	if (xs->datalen) {
213		int op;
214
215		if (xs->xs_control & XS_CTL_DATA_IN)
216			op = BUS_DMASYNC_POSTREAD;
217		else
218			op = BUS_DMASYNC_POSTWRITE;
219
220		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
221				scb->dmamap->dm_mapsize, op);
222		bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
223	}
224
225	/*
226	 * If the recovery SCB completes, we have to be
227	 * out of our timeout.
228	 */
229	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
230		struct	scb *list_scb;
231
232		/*
233		 * We were able to complete the command successfully,
234		 * so reinstate the timeouts for all other pending
235		 * commands.
236		 */
237		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
238			struct scsipi_xfer	*txs = list_scb->xs;
239
240			if (!(txs->xs_control & XS_CTL_POLL)) {
241				callout_reset(&txs->xs_callout,
242				    (txs->timeout > 1000000) ?
243				    (txs->timeout / 1000) * hz :
244				    (txs->timeout * hz) / 1000,
245				    ahd_timeout, list_scb);
246			}
247		}
248
249		if (ahd_get_transaction_status(scb) != XS_NOERROR)
250			ahd_set_transaction_status(scb, XS_TIMEOUT);
251		scsipi_printaddr(xs->xs_periph);
252		printf("%s: no longer in timeout, status = %x\n",
253		       ahd_name(ahd), xs->status);
254	}
255
256	if (xs->error != XS_NOERROR) {
257		/* Don't clobber any existing error state */
258	} else if ((xs->status == SCSI_STATUS_BUSY) ||
259		   (xs->status == SCSI_STATUS_QUEUE_FULL)) {
260		ahd_set_transaction_status(scb, XS_BUSY);
261		printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
262		       ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
263	} else if ((scb->flags & SCB_SENSE) != 0) {
264		/*
265		 * We performed autosense retrieval.
266		 *
267		 * zero the sense data before having
268		 * the drive fill it.  The SCSI spec mandates
269		 * that any untransferred data should be
270		 * assumed to be zero.  Complete the 'bounce'
271		 * of sense information through buffers accessible
272		 * via bus-space by copying it into the clients
273		 * csio.
274		 */
275		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
276		memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
277		       sizeof(struct scsi_sense_data));
278
279		ahd_set_transaction_status(scb, XS_SENSE);
280	} else if ((scb->flags & SCB_PKT_SENSE) != 0) {
281		struct scsi_status_iu_header *siu;
282		u_int sense_len;
283#ifdef AHD_DEBUG
284		int i;
285#endif
286		/*
287		 * Copy only the sense data into the provided buffer.
288		 */
289		siu = (struct scsi_status_iu_header *)scb->sense_data;
290		sense_len = MIN(scsi_4btoul(siu->sense_length),
291				sizeof(xs->sense.scsi_sense));
292		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
293		memcpy(&xs->sense.scsi_sense,
294		       scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
295#ifdef AHD_DEBUG
296		printf("Copied %d bytes of sense data offset %d:", sense_len,
297		       SIU_SENSE_OFFSET(siu));
298		for (i = 0; i < sense_len; i++)
299			printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
300		printf("\n");
301#endif
302		ahd_set_transaction_status(scb, XS_SENSE);
303	}
304
305	if (scb->flags & SCB_FREEZE_QUEUE) {
306		scsipi_periph_thaw(periph, 1);
307		scb->flags &= ~SCB_FREEZE_QUEUE;
308	}
309
310	if (scb->flags & SCB_REQUEUE)
311		ahd_set_transaction_status(scb, XS_REQUEUE);
312
313	ahd_lock(ahd, &s);
314	ahd_free_scb(ahd, scb);
315	ahd_unlock(ahd, &s);
316
317	scsipi_done(xs);
318}
319
320static void
321ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
322{
323	struct ahd_softc *ahd;
324	struct ahd_initiator_tinfo *tinfo;
325	struct ahd_tmode_tstate *tstate;
326
327	ahd = device_private(chan->chan_adapter->adapt_dev);
328
329	switch(req) {
330
331	case ADAPTER_REQ_RUN_XFER:
332	  {
333		struct scsipi_xfer *xs;
334		struct scsipi_periph *periph;
335		struct scb *scb;
336		struct hardware_scb *hscb;
337		u_int target_id;
338		u_int our_id;
339		u_int col_idx;
340		char channel;
341		int s;
342
343		xs = arg;
344		periph = xs->xs_periph;
345
346		SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
347
348		target_id = periph->periph_target;
349		our_id = ahd->our_id;
350		channel = (chan->chan_channel == 1) ? 'B' : 'A';
351
352		/*
353		 * get an scb to use.
354		 */
355		ahd_lock(ahd, &s);
356		tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
357					    target_id, &tstate);
358
359		if (xs->xs_tag_type != 0 ||
360		    (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
361			col_idx = AHD_NEVER_COL_IDX;
362		else
363			col_idx = AHD_BUILD_COL_IDX(target_id,
364			    periph->periph_lun);
365
366		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
367			xs->error = XS_RESOURCE_SHORTAGE;
368			ahd_unlock(ahd, &s);
369			scsipi_done(xs);
370			return;
371		}
372		ahd_unlock(ahd, &s);
373
374		hscb = scb->hscb;
375
376		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
377		scb->xs = xs;
378
379		/*
380		 * Put all the arguments for the xfer in the scb
381		 */
382		hscb->control = 0;
383		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
384		hscb->lun = periph->periph_lun;
385		if (xs->xs_control & XS_CTL_RESET) {
386			hscb->cdb_len = 0;
387			scb->flags |= SCB_DEVICE_RESET;
388			hscb->control |= MK_MESSAGE;
389			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
390			ahd_execute_scb(scb, NULL, 0);
391		} else {
392			hscb->task_management = 0;
393		}
394
395		ahd_setup_data(ahd, xs, scb);
396		break;
397	  }
398
399	case ADAPTER_REQ_GROW_RESOURCES:
400#ifdef AHC_DEBUG
401		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
402#endif
403		chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd);
404		if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC)
405			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
406		break;
407
408	case ADAPTER_REQ_SET_XFER_MODE:
409	    {
410		struct scsipi_xfer_mode *xm = arg;
411		struct ahd_devinfo devinfo;
412		int target_id, our_id, first;
413		u_int width;
414		int s;
415		char channel;
416		u_int ppr_options = 0, period, offset;
417		uint16_t old_autoneg;
418
419		target_id = xm->xm_target;
420		our_id = chan->chan_id;
421		channel = 'A';
422		s = splbio();
423		tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
424		    &tstate);
425		ahd_compile_devinfo(&devinfo, our_id, target_id,
426		    0, channel, ROLE_INITIATOR);
427
428		old_autoneg = tstate->auto_negotiate;
429
430		/*
431		 * XXX since the period and offset are not provided here,
432		 * fake things by forcing a renegotiation using the user
433		 * settings if this is called for the first time (i.e.
434		 * during probe). Also, cap various values at the user
435		 * values, assuming that the user set it up that way.
436		 */
437		if (ahd->inited_target[target_id] == 0) {
438			period = tinfo->user.period;
439			offset = tinfo->user.offset;
440			ppr_options = tinfo->user.ppr_options;
441			width = tinfo->user.width;
442			tstate->tagenable |=
443			    (ahd->user_tagenable & devinfo.target_mask);
444			tstate->discenable |=
445			    (ahd->user_discenable & devinfo.target_mask);
446			ahd->inited_target[target_id] = 1;
447			first = 1;
448		} else
449			first = 0;
450
451		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
452			width = MSG_EXT_WDTR_BUS_16_BIT;
453		else
454			width = MSG_EXT_WDTR_BUS_8_BIT;
455
456		ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
457		if (width > tinfo->user.width)
458			width = tinfo->user.width;
459		ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
460
461		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
462			period = 0;
463			offset = 0;
464			ppr_options = 0;
465		}
466
467		if ((xm->xm_mode & PERIPH_CAP_DT) &&
468		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
469			ppr_options |= MSG_EXT_PPR_DT_REQ;
470		else
471			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
472
473		if ((tstate->discenable & devinfo.target_mask) == 0 ||
474		    (tstate->tagenable & devinfo.target_mask) == 0)
475			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
476
477		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
478		    (ahd->user_tagenable & devinfo.target_mask))
479			tstate->tagenable |= devinfo.target_mask;
480		else
481			tstate->tagenable &= ~devinfo.target_mask;
482
483		ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
484		ahd_validate_offset(ahd, NULL, period, &offset,
485		    MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
486		if (offset == 0) {
487			period = 0;
488			ppr_options = 0;
489		}
490		if (ppr_options != 0
491		    && tinfo->user.transport_version >= 3) {
492			tinfo->goal.transport_version =
493			    tinfo->user.transport_version;
494			tinfo->curr.transport_version =
495			    tinfo->user.transport_version;
496		}
497
498		ahd_set_syncrate(ahd, &devinfo, period, offset,
499		    ppr_options, AHD_TRANS_GOAL, FALSE);
500
501		/*
502		 * If this is the first request, and no negotiation is
503		 * needed, just confirm the state to the scsipi layer,
504		 * so that it can print a message.
505		 */
506		if (old_autoneg == tstate->auto_negotiate && first) {
507			xm->xm_mode = 0;
508			xm->xm_period = tinfo->curr.period;
509			xm->xm_offset = tinfo->curr.offset;
510			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
511				xm->xm_mode |= PERIPH_CAP_WIDE16;
512			if (tinfo->curr.period)
513				xm->xm_mode |= PERIPH_CAP_SYNC;
514			if (tstate->tagenable & devinfo.target_mask)
515				xm->xm_mode |= PERIPH_CAP_TQING;
516			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
517				xm->xm_mode |= PERIPH_CAP_DT;
518			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
519		}
520		splx(s);
521	    }
522	}
523
524	return;
525}
526
527static void
528ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
529{
530	struct scb *scb;
531	struct scsipi_xfer *xs;
532	struct ahd_softc *ahd;
533	struct ahd_initiator_tinfo *tinfo;
534	struct ahd_tmode_tstate *tstate;
535	u_int  mask;
536	int    s;
537
538	scb = arg;
539	xs = scb->xs;
540	xs->error = 0;
541	xs->status = 0;
542	xs->xs_status = 0;
543	ahd = device_private(
544	    xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
545
546	scb->sg_count = 0;
547	if (nsegments != 0) {
548		void *sg;
549		int op;
550		u_int i;
551
552		ahd_setup_data_scb(ahd, scb);
553
554		/* Copy the segments into our SG list */
555		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
556
557			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
558					  dm_segs->ds_len,
559					  /*last*/i == 1);
560			dm_segs++;
561		}
562
563		if (xs->xs_control & XS_CTL_DATA_IN)
564			op = BUS_DMASYNC_PREREAD;
565		else
566			op = BUS_DMASYNC_PREWRITE;
567
568		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
569				scb->dmamap->dm_mapsize, op);
570	}
571
572	ahd_lock(ahd, &s);
573
574	/*
575	 * Last time we need to check if this SCB needs to
576	 * be aborted.
577	 */
578	if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
579		if (nsegments != 0)
580			bus_dmamap_unload(ahd->parent_dmat,
581					  scb->dmamap);
582		ahd_free_scb(ahd, scb);
583		ahd_unlock(ahd, &s);
584		return;
585	}
586
587	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
588				    SCSIID_OUR_ID(scb->hscb->scsiid),
589				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
590				    &tstate);
591
592	mask = SCB_GET_TARGET_MASK(ahd, scb);
593
594	if ((tstate->discenable & mask) != 0)
595		scb->hscb->control |= DISCENB;
596
597	if ((tstate->tagenable & mask) != 0)
598		scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
599
600	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
601		scb->flags |= SCB_PACKETIZED;
602		if (scb->hscb->task_management != 0)
603			scb->hscb->control &= ~MK_MESSAGE;
604	}
605
606#if 0	/* This looks like it makes sense at first, but it can loop */
607	if ((xs->xs_control & XS_CTL_DISCOVERY) &&
608	    (tinfo->goal.width != 0
609	     || tinfo->goal.period != 0
610	     || tinfo->goal.ppr_options != 0)) {
611		scb->flags |= SCB_NEGOTIATE;
612		scb->hscb->control |= MK_MESSAGE;
613	} else
614#endif
615	if ((tstate->auto_negotiate & mask) != 0) {
616		scb->flags |= SCB_AUTO_NEGOTIATE;
617		scb->hscb->control |= MK_MESSAGE;
618	}
619
620	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
621
622	scb->flags |= SCB_ACTIVE;
623
624	if (!(xs->xs_control & XS_CTL_POLL)) {
625		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
626			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
627			      ahd_timeout, scb);
628	}
629
630	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
631		/* Define a mapping from our tag to the SCB. */
632		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
633		ahd_pause(ahd);
634		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
635		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
636		ahd_unpause(ahd);
637	} else {
638		ahd_queue_scb(ahd, scb);
639	}
640
641	if (!(xs->xs_control & XS_CTL_POLL)) {
642		ahd_unlock(ahd, &s);
643		return;
644	}
645	/*
646	 * If we can't use interrupts, poll for completion
647	 */
648	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
649	do {
650		if (ahd_poll(ahd, xs->timeout)) {
651			if (!(xs->xs_control & XS_CTL_SILENT))
652				printf("cmd fail\n");
653			ahd_timeout(scb);
654			break;
655		}
656	} while (!(xs->xs_status & XS_STS_DONE));
657
658	ahd_unlock(ahd, &s);
659}
660
661static int
662ahd_poll(struct ahd_softc *ahd, int wait)
663{
664
665	while (--wait) {
666		DELAY(1000);
667		if (ahd_inb(ahd, INTSTAT) & INT_PEND)
668			break;
669	}
670
671	if (wait == 0) {
672		printf("%s: board is not responding\n", ahd_name(ahd));
673		return (EIO);
674	}
675
676	ahd_intr(ahd);
677	return (0);
678}
679
680
681static void
682ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
683	       struct scb *scb)
684{
685	struct hardware_scb *hscb;
686
687	hscb = scb->hscb;
688	xs->resid = xs->status = 0;
689
690	hscb->cdb_len = xs->cmdlen;
691	if (hscb->cdb_len > MAX_CDB_LEN) {
692		int s;
693		/*
694		 * Should CAM start to support CDB sizes
695		 * greater than 16 bytes, we could use
696		 * the sense buffer to store the CDB.
697		 */
698		ahd_set_transaction_status(scb,
699					   XS_DRIVER_STUFFUP);
700
701		ahd_lock(ahd, &s);
702		ahd_free_scb(ahd, scb);
703		ahd_unlock(ahd, &s);
704		scsipi_done(xs);
705	}
706	memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
707
708	/* Only use S/G if there is a transfer */
709	if (xs->datalen) {
710		int error;
711
712		error = bus_dmamap_load(ahd->parent_dmat,
713					scb->dmamap, xs->data,
714					xs->datalen, NULL,
715					((xs->xs_control & XS_CTL_NOSLEEP) ?
716					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
717					BUS_DMA_STREAMING |
718					((xs->xs_control & XS_CTL_DATA_IN) ?
719					 BUS_DMA_READ : BUS_DMA_WRITE));
720		if (error) {
721#ifdef AHD_DEBUG
722			printf("%s: in ahd_setup_data(): bus_dmamap_load() "
723			       "= %d\n",
724			       ahd_name(ahd), error);
725#endif
726			xs->error = XS_RESOURCE_SHORTAGE;
727			scsipi_done(xs);
728			return;
729		}
730		ahd_execute_scb(scb,
731				scb->dmamap->dm_segs,
732				scb->dmamap->dm_nsegs);
733	} else {
734		ahd_execute_scb(scb, NULL, 0);
735	}
736}
737
738void
739ahd_timeout(void *arg)
740{
741	struct	scb	  *scb;
742	struct	ahd_softc *ahd;
743	int		   s;
744
745	scb = arg;
746	ahd = scb->ahd_softc;
747
748	printf("%s: ahd_timeout\n", ahd_name(ahd));
749
750	ahd_lock(ahd, &s);
751
752	ahd_pause_and_flushwork(ahd);
753	(void)ahd_save_modes(ahd);
754#if 0
755	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
756	ahd_outb(ahd, SCSISIGO, ACKO);
757	printf("set ACK\n");
758	ahd_outb(ahd, SCSISIGO, 0);
759	printf("clearing Ack\n");
760	ahd_restore_modes(ahd, saved_modes);
761#endif
762	if ((scb->flags & SCB_ACTIVE) == 0) {
763		/* Previous timeout took care of me already */
764		printf("%s: Timedout SCB already complete. "
765		       "Interrupts may not be functioning.\n", ahd_name(ahd));
766		ahd_unpause(ahd);
767		ahd_unlock(ahd, &s);
768		return;
769	}
770
771	ahd_print_path(ahd, scb);
772	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
773	ahd_dump_card_state(ahd);
774	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
775			  /*initiate reset*/TRUE);
776	ahd_unlock(ahd, &s);
777	return;
778}
779
780int
781ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
782{
783	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
784				    M_WAITOK | M_ZERO);
785	return (0);
786}
787
788void
789ahd_platform_free(struct ahd_softc *ahd)
790{
791	free(ahd->platform_data, M_DEVBUF);
792}
793
794int
795ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
796{
797	/* We don't sort softcs under NetBSD so report equal always */
798	return (0);
799}
800
801int
802ahd_detach(struct ahd_softc *ahd, int flags)
803{
804	int rv = 0;
805
806	if (ahd->sc_child != NULL)
807		rv = config_detach(ahd->sc_child, flags);
808
809	pmf_device_deregister(ahd->sc_dev);
810
811	ahd_free(ahd);
812
813	return rv;
814}
815
816void
817ahd_platform_set_tags(struct ahd_softc *ahd,
818		      struct ahd_devinfo *devinfo, ahd_queue_alg alg)
819{
820	struct ahd_tmode_tstate *tstate;
821
822	ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
823			    devinfo->target, &tstate);
824
825	if (alg != AHD_QUEUE_NONE)
826		tstate->tagenable |= devinfo->target_mask;
827	else
828		tstate->tagenable &= ~devinfo->target_mask;
829}
830
831void
832ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, u_int lun,
833	       ac_code code, void *opt_arg)
834{
835	struct ahd_tmode_tstate *tstate;
836	struct ahd_initiator_tinfo *tinfo;
837	struct ahd_devinfo devinfo;
838	struct scsipi_channel *chan;
839	struct scsipi_xfer_mode xm;
840
841#ifdef DIAGNOSTIC
842	if (channel != 'A')
843		panic("ahd_send_async: not channel A");
844#endif
845	chan = &ahd->sc_channel;
846	switch (code) {
847	case AC_TRANSFER_NEG:
848		tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target,
849			    &tstate);
850		ahd_compile_devinfo(&devinfo, ahd->our_id, target, lun,
851		    channel, ROLE_UNKNOWN);
852		/*
853		 * Don't bother if negotiating. XXX?
854		 */
855		if (tinfo->curr.period != tinfo->goal.period
856		    || tinfo->curr.width != tinfo->goal.width
857		    || tinfo->curr.offset != tinfo->goal.offset
858		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
859			break;
860		xm.xm_target = target;
861		xm.xm_mode = 0;
862		xm.xm_period = tinfo->curr.period;
863		xm.xm_offset = tinfo->curr.offset;
864		if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
865			xm.xm_mode |= PERIPH_CAP_DT;
866		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
867			xm.xm_mode |= PERIPH_CAP_WIDE16;
868		if (tinfo->curr.period)
869			xm.xm_mode |= PERIPH_CAP_SYNC;
870		if (tstate->tagenable & devinfo.target_mask)
871			xm.xm_mode |= PERIPH_CAP_TQING;
872		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
873		break;
874	case AC_BUS_RESET:
875		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
876	case AC_SENT_BDR:
877	default:
878		break;
879	}
880}
881