1/*-
2 *********************************************************************
3 *	FILE NAME  : amd.c
4 *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5 *		     Erich Chen     (erich@tekram.com.tw)
6 *	Description: Device Driver for the amd53c974 PCI Bus Master
7 *		     SCSI Host adapter found on cards such as
8 *		     the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD$
34 */
35
36/*
37 *********************************************************************
38 *	HISTORY:
39 *
40 *	REV#	DATE	NAME    	DESCRIPTION
41 *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
42 *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
43 *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
44 *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
45 *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
46 *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
47 *********************************************************************
48 */
49
50/* #define AMD_DEBUG0		*/
51/* #define AMD_DEBUG_SCSI_PHASE */
52
53#include <sys/param.h>
54
55#include <sys/systm.h>
56#include <sys/queue.h>
57#include <sys/kernel.h>
58#include <sys/module.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/malloc.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <cam/cam.h>
72#include <cam/cam_ccb.h>
73#include <cam/cam_sim.h>
74#include <cam/cam_xpt_sim.h>
75#include <cam/cam_debug.h>
76
77#include <cam/scsi/scsi_all.h>
78#include <cam/scsi/scsi_message.h>
79
80#include <dev/pci/pcivar.h>
81#include <dev/pci/pcireg.h>
82#include <dev/amd/amd.h>
83
84#define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
85#define PCI_BASE_ADDR0	    		0x10
86
87typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88typedef phase_handler_t *phase_handler_func_t;
89
90static void amd_intr(void *vamd);
91static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92static phase_handler_t amd_NopPhase;
93
94static phase_handler_t amd_DataOutPhase0;
95static phase_handler_t amd_DataInPhase0;
96#define amd_CommandPhase0 amd_NopPhase
97static phase_handler_t amd_StatusPhase0;
98static phase_handler_t amd_MsgOutPhase0;
99static phase_handler_t amd_MsgInPhase0;
100static phase_handler_t amd_DataOutPhase1;
101static phase_handler_t amd_DataInPhase1;
102static phase_handler_t amd_CommandPhase1;
103static phase_handler_t amd_StatusPhase1;
104static phase_handler_t amd_MsgOutPhase1;
105static phase_handler_t amd_MsgInPhase1;
106
107static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108static int	amdparsemsg(struct amd_softc *amd);
109static int	amdhandlemsgreject(struct amd_softc *amd);
110static void	amdconstructsdtr(struct amd_softc *amd,
111				 u_int period, u_int offset);
112static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
113static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
114
115static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116static void amd_Disconnect(struct amd_softc *amd);
117static void amd_Reselect(struct amd_softc *amd);
118static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119static void amd_ScsiRstDetect(struct amd_softc *amd);
120static void amd_ResetSCSIBus(struct amd_softc *amd);
121static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122static void amd_InvalidCmd(struct amd_softc *amd);
123
124static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
125			  int error);
126
127#if 0
128static void amd_timeout(void *arg1);
129static void amd_reset(struct amd_softc *amd);
130#endif
131static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
132
133void    amd_linkSRB(struct amd_softc *amd);
134static int amd_init(device_t);
135static void amd_load_defaults(struct amd_softc *amd);
136static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
137static int amd_EEpromInDO(struct amd_softc *amd);
138static u_int16_t EEpromGetData1(struct amd_softc *amd);
139static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
140static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
141static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
142static void amd_ReadEEprom(struct amd_softc *amd);
143
144static int amd_probe(device_t);
145static int amd_attach(device_t);
146static void amdcompletematch(struct amd_softc *amd, target_id_t target,
147			     lun_id_t lun, u_int tag, struct srb_queue *queue,
148			     cam_status status);
149static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
150		       u_int period, u_int offset, u_int type);
151static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
152
153static __inline void amd_clear_msg_state(struct amd_softc *amd);
154
155static __inline void
156amd_clear_msg_state(struct amd_softc *amd)
157{
158	amd->msgout_len = 0;
159	amd->msgout_index = 0;
160	amd->msgin_index = 0;
161}
162
163static __inline uint32_t
164amd_get_sense_bufaddr(struct amd_softc *amd, struct amd_srb *pSRB)
165{
166	int offset;
167
168	offset = pSRB->TagNumber;
169	return (amd->sense_busaddr + (offset * sizeof(struct scsi_sense_data)));
170}
171
172static __inline struct scsi_sense_data *
173amd_get_sense_buf(struct amd_softc *amd, struct amd_srb *pSRB)
174{
175	int offset;
176
177	offset = pSRB->TagNumber;
178	return (&amd->sense_buffers[offset]);
179}
180
181static __inline uint32_t
182amd_get_sense_bufsize(struct amd_softc *amd, struct amd_srb *pSRB)
183{
184	return (sizeof(struct scsi_sense_data));
185}
186
187/* CAM SIM entry points */
188#define ccb_srb_ptr spriv_ptr0
189#define ccb_amd_ptr spriv_ptr1
190static void	amd_action(struct cam_sim *sim, union ccb *ccb);
191static void	amd_poll(struct cam_sim *sim);
192
193/*
194 * State engine function tables indexed by SCSI phase number
195 */
196phase_handler_func_t amd_SCSI_phase0[] = {
197	amd_DataOutPhase0,
198	amd_DataInPhase0,
199	amd_CommandPhase0,
200	amd_StatusPhase0,
201	amd_NopPhase,
202	amd_NopPhase,
203	amd_MsgOutPhase0,
204	amd_MsgInPhase0
205};
206
207phase_handler_func_t amd_SCSI_phase1[] = {
208	amd_DataOutPhase1,
209	amd_DataInPhase1,
210	amd_CommandPhase1,
211	amd_StatusPhase1,
212	amd_NopPhase,
213	amd_NopPhase,
214	amd_MsgOutPhase1,
215	amd_MsgInPhase1
216};
217
218/*
219 * EEProm/BIOS negotiation periods
220 */
221u_int8_t   eeprom_period[] = {
222	 25,	/* 10.0MHz */
223	 32,	/*  8.0MHz */
224	 38,	/*  6.6MHz */
225	 44,	/*  5.7MHz */
226	 50,	/*  5.0MHz */
227	 63,	/*  4.0MHz */
228	 83,	/*  3.0MHz */
229	125	/*  2.0MHz */
230};
231
232/*
233 * chip clock setting to SCSI specified sync parameter table.
234 */
235u_int8_t tinfo_sync_period[] = {
236	25,	/* 10.0 */
237	32,	/* 8.0 */
238	38,	/* 6.6 */
239	44,	/* 5.7 */
240	50,	/* 5.0 */
241	57,	/* 4.4 */
242	63,	/* 4.0 */
243	70,	/* 3.6 */
244	76,	/* 3.3 */
245	83	/* 3.0 */
246};
247
248static __inline struct amd_srb *
249amdgetsrb(struct amd_softc * amd)
250{
251	int     intflag;
252	struct amd_srb *    pSRB;
253
254	intflag = splcam();
255	pSRB = TAILQ_FIRST(&amd->free_srbs);
256	if (pSRB)
257		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
258	splx(intflag);
259	return (pSRB);
260}
261
262static void
263amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
264{
265	struct scsi_request_sense sense_cmd;
266	u_int8_t *cdb;
267	u_int cdb_len;
268
269	if (srb->SRBFlag & AUTO_REQSENSE) {
270		sense_cmd.opcode = REQUEST_SENSE;
271		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
272		sense_cmd.unused[0] = 0;
273		sense_cmd.unused[1] = 0;
274		sense_cmd.length = sizeof(struct scsi_sense_data);
275		sense_cmd.control = 0;
276		cdb = &sense_cmd.opcode;
277		cdb_len = sizeof(sense_cmd);
278	} else {
279		cdb = &srb->CmdBlock[0];
280		cdb_len = srb->ScsiCmdLen;
281	}
282	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
283}
284
285/*
286 * Attempt to start a waiting transaction.  Interrupts must be disabled
287 * upon entry to this function.
288 */
289static void
290amdrunwaiting(struct amd_softc *amd) {
291	struct amd_srb *srb;
292
293	if (amd->last_phase != SCSI_BUS_FREE)
294		return;
295
296	srb = TAILQ_FIRST(&amd->waiting_srbs);
297	if (srb == NULL)
298		return;
299
300	if (amdstart(amd, srb) == 0) {
301		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
302		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
303	}
304}
305
306static void
307amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
308{
309	struct	 amd_srb *srb;
310	union	 ccb *ccb;
311	struct	 amd_softc *amd;
312	int	 s;
313
314	srb = (struct amd_srb *)arg;
315	ccb = srb->pccb;
316	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
317
318	if (error != 0) {
319		if (error != EFBIG)
320			printf("amd%d: Unexepected error 0x%x returned from "
321			       "bus_dmamap_load\n", amd->unit, error);
322		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
323			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
324			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
325		}
326		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
327		xpt_done(ccb);
328		return;
329	}
330
331	if (nseg != 0) {
332		struct amd_sg *sg;
333		bus_dma_segment_t *end_seg;
334		bus_dmasync_op_t op;
335
336		end_seg = dm_segs + nseg;
337
338		/* Copy the segments into our SG list */
339		srb->pSGlist = &srb->SGsegment[0];
340		sg = srb->pSGlist;
341		while (dm_segs < end_seg) {
342			sg->SGXLen = dm_segs->ds_len;
343			sg->SGXPtr = dm_segs->ds_addr;
344			sg++;
345			dm_segs++;
346		}
347
348		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
349			op = BUS_DMASYNC_PREREAD;
350		else
351			op = BUS_DMASYNC_PREWRITE;
352
353		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
354
355	}
356	srb->SGcount = nseg;
357	srb->SGIndex = 0;
358	srb->AdaptStatus = 0;
359	srb->TargetStatus = 0;
360	srb->MsgCnt = 0;
361	srb->SRBStatus = 0;
362	srb->SRBFlag = 0;
363	srb->SRBState = 0;
364	srb->TotalXferredLen = 0;
365	srb->SGPhysAddr = 0;
366	srb->SGToBeXferLen = 0;
367	srb->EndMessage = 0;
368
369	s = splcam();
370
371	/*
372	 * Last time we need to check if this CCB needs to
373	 * be aborted.
374	 */
375	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
376		if (nseg != 0)
377			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
378		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
379		xpt_done(ccb);
380		splx(s);
381		return;
382	}
383	ccb->ccb_h.status |= CAM_SIM_QUEUED;
384#if 0
385	/* XXX Need a timeout handler */
386	ccb->ccb_h.timeout_ch =
387	    timeout(amdtimeout, (caddr_t)srb,
388		    (ccb->ccb_h.timeout * hz) / 1000);
389#endif
390	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
391	amdrunwaiting(amd);
392	splx(s);
393}
394
395static void
396amd_action(struct cam_sim * psim, union ccb * pccb)
397{
398	struct amd_softc *    amd;
399	u_int   target_id;
400
401	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
402
403	amd = (struct amd_softc *) cam_sim_softc(psim);
404	target_id = pccb->ccb_h.target_id;
405
406	switch (pccb->ccb_h.func_code) {
407	case XPT_SCSI_IO:
408	{
409		struct amd_srb *    pSRB;
410		struct ccb_scsiio *pcsio;
411
412		pcsio = &pccb->csio;
413
414		/*
415		 * Assign an SRB and connect it with this ccb.
416		 */
417		pSRB = amdgetsrb(amd);
418
419		if (!pSRB) {
420			/* Freeze SIMQ */
421			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
422			xpt_done(pccb);
423			return;
424		}
425		pSRB->pccb = pccb;
426		pccb->ccb_h.ccb_srb_ptr = pSRB;
427		pccb->ccb_h.ccb_amd_ptr = amd;
428		pSRB->ScsiCmdLen = pcsio->cdb_len;
429		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
430		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
431			int error;
432
433			error = bus_dmamap_load_ccb(amd->buffer_dmat,
434						    pSRB->dmamap,
435						    pccb,
436						    amdexecutesrb,
437						    pSRB, /*flags*/0);
438			if (error == EINPROGRESS) {
439				/*
440				 * So as to maintain ordering,
441				 * freeze the controller queue
442				 * until our mapping is
443				 * returned.
444				 */
445				xpt_freeze_simq(psim, 1);
446				pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
447			}
448		} else
449			amdexecutesrb(pSRB, NULL, 0, 0);
450		break;
451	}
452	case XPT_PATH_INQ:
453	{
454		struct ccb_pathinq *cpi = &pccb->cpi;
455
456		cpi->version_num = 1;
457		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
458		cpi->target_sprt = 0;
459		cpi->hba_misc = 0;
460		cpi->hba_eng_cnt = 0;
461		cpi->max_target = 7;
462		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
463		cpi->initiator_id = amd->AdaptSCSIID;
464		cpi->bus_id = cam_sim_bus(psim);
465		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
466		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
467		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
468		cpi->unit_number = cam_sim_unit(psim);
469                cpi->transport = XPORT_SPI;
470                cpi->transport_version = 2;
471                cpi->protocol = PROTO_SCSI;
472                cpi->protocol_version = SCSI_REV_2;
473		cpi->ccb_h.status = CAM_REQ_CMP;
474		xpt_done(pccb);
475		break;
476	}
477	case XPT_ABORT:
478		pccb->ccb_h.status = CAM_REQ_INVALID;
479		xpt_done(pccb);
480		break;
481	case XPT_RESET_BUS:
482	{
483
484		int     i;
485
486		amd_ResetSCSIBus(amd);
487		amd->ACBFlag = 0;
488
489		for (i = 0; i < 500; i++) {
490			DELAY(1000);	/* Wait until our interrupt
491					 * handler sees it */
492		}
493
494		pccb->ccb_h.status = CAM_REQ_CMP;
495		xpt_done(pccb);
496		break;
497	}
498	case XPT_RESET_DEV:
499		pccb->ccb_h.status = CAM_REQ_INVALID;
500		xpt_done(pccb);
501		break;
502	case XPT_TERM_IO:
503		pccb->ccb_h.status = CAM_REQ_INVALID;
504		xpt_done(pccb);
505		break;
506	case XPT_GET_TRAN_SETTINGS:
507	{
508		struct ccb_trans_settings *cts = &pccb->cts;
509		struct amd_target_info *targ_info = &amd->tinfo[target_id];
510		struct amd_transinfo *tinfo;
511		int     intflag;
512		struct ccb_trans_settings_scsi *scsi =
513		    &cts->proto_specific.scsi;
514		struct ccb_trans_settings_spi *spi =
515		    &cts->xport_specific.spi;
516
517		cts->protocol = PROTO_SCSI;
518		cts->protocol_version = SCSI_REV_2;
519		cts->transport = XPORT_SPI;
520		cts->transport_version = 2;
521
522		intflag = splcam();
523		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
524			/* current transfer settings */
525			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
526				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
527			} else {
528				spi->flags = 0;
529			}
530			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
531				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
532			} else {
533				scsi->flags = 0;
534			}
535			tinfo = &targ_info->current;
536		} else {
537			/* default(user) transfer settings */
538			if (targ_info->disc_tag & AMD_USR_DISCENB) {
539				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
540			} else {
541				spi->flags = 0;
542			}
543			if (targ_info->disc_tag & AMD_USR_TAGENB) {
544				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
545			} else {
546				scsi->flags = 0;
547			}
548			tinfo = &targ_info->user;
549		}
550		spi->sync_period = tinfo->period;
551		spi->sync_offset = tinfo->offset;
552		splx(intflag);
553
554		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
555		spi->valid = CTS_SPI_VALID_SYNC_RATE
556			   | CTS_SPI_VALID_SYNC_OFFSET
557			   | CTS_SPI_VALID_BUS_WIDTH
558			   | CTS_SPI_VALID_DISC;
559		scsi->valid = CTS_SCSI_VALID_TQ;
560		pccb->ccb_h.status = CAM_REQ_CMP;
561		xpt_done(pccb);
562		break;
563	}
564#define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
565#define	IS_USER_SETTINGS(c)	(c->type == CTS_TYPE_USER_SETTINGS)
566	case XPT_SET_TRAN_SETTINGS:
567	{
568		struct ccb_trans_settings *cts = &pccb->cts;
569		struct amd_target_info *targ_info;
570		u_int  update_type = 0;
571		int    intflag;
572		int    last_entry;
573		struct ccb_trans_settings_scsi *scsi =
574		    &cts->proto_specific.scsi;
575		struct ccb_trans_settings_spi *spi =
576		    &cts->xport_specific.spi;
577		if (IS_CURRENT_SETTINGS(cts)) {
578			update_type |= AMD_TRANS_GOAL;
579		} else if (IS_USER_SETTINGS(cts)) {
580			update_type |= AMD_TRANS_USER;
581		}
582		if (update_type == 0
583		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
584			cts->ccb_h.status = CAM_REQ_INVALID;
585			xpt_done(pccb);
586		}
587
588		intflag = splcam();
589		targ_info = &amd->tinfo[target_id];
590
591		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
592			if (update_type & AMD_TRANS_GOAL) {
593				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
594				   != 0) {
595					targ_info->disc_tag |= AMD_CUR_DISCENB;
596				} else {
597					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
598				}
599			}
600			if (update_type & AMD_TRANS_USER) {
601				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
602				   != 0) {
603					targ_info->disc_tag |= AMD_USR_DISCENB;
604				} else {
605					targ_info->disc_tag &= ~AMD_USR_DISCENB;
606				}
607			}
608		}
609		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
610			if (update_type & AMD_TRANS_GOAL) {
611				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
612				   != 0) {
613					targ_info->disc_tag |= AMD_CUR_TAGENB;
614				} else {
615					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
616				}
617			}
618			if (update_type & AMD_TRANS_USER) {
619				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
620				    != 0) {
621					targ_info->disc_tag |= AMD_USR_TAGENB;
622				} else {
623					targ_info->disc_tag &= ~AMD_USR_TAGENB;
624				}
625			}
626		}
627
628		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
629			if (update_type & AMD_TRANS_GOAL)
630				spi->sync_offset = targ_info->goal.offset;
631			else
632				spi->sync_offset = targ_info->user.offset;
633		}
634
635		if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
636			spi->sync_offset = AMD_MAX_SYNC_OFFSET;
637
638		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
639			if (update_type & AMD_TRANS_GOAL)
640				spi->sync_period = targ_info->goal.period;
641			else
642				spi->sync_period = targ_info->user.period;
643		}
644
645		last_entry = sizeof(tinfo_sync_period) - 1;
646		if ((spi->sync_period != 0)
647		 && (spi->sync_period < tinfo_sync_period[0]))
648			spi->sync_period = tinfo_sync_period[0];
649		if (spi->sync_period > tinfo_sync_period[last_entry])
650		 	spi->sync_period = 0;
651		if (spi->sync_offset == 0)
652			spi->sync_period = 0;
653
654		if ((update_type & AMD_TRANS_USER) != 0) {
655			targ_info->user.period = spi->sync_period;
656			targ_info->user.offset = spi->sync_offset;
657		}
658		if ((update_type & AMD_TRANS_GOAL) != 0) {
659			targ_info->goal.period = spi->sync_period;
660			targ_info->goal.offset = spi->sync_offset;
661		}
662		splx(intflag);
663		pccb->ccb_h.status = CAM_REQ_CMP;
664		xpt_done(pccb);
665		break;
666	}
667	case XPT_CALC_GEOMETRY:
668	{
669		int     extended;
670
671		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
672		cam_calc_geometry(&pccb->ccg, extended);
673		xpt_done(pccb);
674		break;
675	}
676	default:
677		pccb->ccb_h.status = CAM_REQ_INVALID;
678		xpt_done(pccb);
679		break;
680	}
681}
682
683static void
684amd_poll(struct cam_sim * psim)
685{
686	amd_intr(cam_sim_softc(psim));
687}
688
689static u_int8_t *
690phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
691{
692	intptr_t   dataPtr;
693	struct ccb_scsiio *pcsio;
694	u_int8_t   i;
695	struct amd_sg *    pseg;
696
697	dataPtr = 0;
698	pcsio = &pSRB->pccb->csio;
699
700	dataPtr = (intptr_t) pcsio->data_ptr;
701	pseg = pSRB->SGsegment;
702	for (i = 0; i < pSRB->SGIndex; i++) {
703		dataPtr += (int) pseg->SGXLen;
704		pseg++;
705	}
706	dataPtr += (int) xferCnt;
707	return ((u_int8_t *) dataPtr);
708}
709
710static void
711amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
712{
713	bus_addr_t *baddr;
714
715	baddr = (bus_addr_t *)arg;
716	*baddr = segs->ds_addr;
717}
718
719static void
720ResetDevParam(struct amd_softc * amd)
721{
722	u_int target;
723
724	for (target = 0; target <= amd->max_id; target++) {
725		if (amd->AdaptSCSIID != target) {
726			amdsetsync(amd, target, /*clockrate*/0,
727				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
728		}
729	}
730}
731
732static void
733amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
734		 u_int tag, struct srb_queue *queue, cam_status status)
735{
736	struct amd_srb *srb;
737	struct amd_srb *next_srb;
738
739	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
740		union ccb *ccb;
741
742		next_srb = TAILQ_NEXT(srb, links);
743		if (srb->pccb->ccb_h.target_id != target
744		 && target != CAM_TARGET_WILDCARD)
745			continue;
746
747		if (srb->pccb->ccb_h.target_lun != lun
748		 && lun != CAM_LUN_WILDCARD)
749			continue;
750
751		if (srb->TagNumber != tag
752		 && tag != AMD_TAG_WILDCARD)
753			continue;
754
755		ccb = srb->pccb;
756		TAILQ_REMOVE(queue, srb, links);
757		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
758		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
759		 && (status & CAM_DEV_QFRZN) != 0)
760			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
761		ccb->ccb_h.status = status;
762		xpt_done(ccb);
763	}
764
765}
766
767static void
768amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
769	   u_int period, u_int offset, u_int type)
770{
771	struct amd_target_info *tinfo;
772	u_int old_period;
773	u_int old_offset;
774
775	tinfo = &amd->tinfo[target];
776	old_period = tinfo->current.period;
777	old_offset = tinfo->current.offset;
778	if ((type & AMD_TRANS_CUR) != 0
779	 && (old_period != period || old_offset != offset)) {
780		struct cam_path *path;
781
782		tinfo->current.period = period;
783		tinfo->current.offset = offset;
784		tinfo->sync_period_reg = clockrate;
785		tinfo->sync_offset_reg = offset;
786		tinfo->CtrlR3 &= ~FAST_SCSI;
787		tinfo->CtrlR4 &= ~EATER_25NS;
788		if (clockrate > 7)
789			tinfo->CtrlR4 |= EATER_25NS;
790		else
791			tinfo->CtrlR3 |= FAST_SCSI;
792
793		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
794			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
795			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
796			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
797			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
798		}
799		/* If possible, update the XPT's notion of our transfer rate */
800		if (xpt_create_path(&path, /*periph*/NULL,
801				    cam_sim_path(amd->psim), target,
802				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
803			struct ccb_trans_settings neg;
804			struct ccb_trans_settings_spi *spi =
805			    &neg.xport_specific.spi;
806			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
807			memset(&neg, 0, sizeof (neg));
808			spi->sync_period = period;
809			spi->sync_offset = offset;
810			spi->valid = CTS_SPI_VALID_SYNC_RATE
811				  | CTS_SPI_VALID_SYNC_OFFSET;
812			xpt_async(AC_TRANSFER_NEG, path, &neg);
813			xpt_free_path(path);
814		}
815	}
816	if ((type & AMD_TRANS_GOAL) != 0) {
817		tinfo->goal.period = period;
818		tinfo->goal.offset = offset;
819	}
820
821	if ((type & AMD_TRANS_USER) != 0) {
822		tinfo->user.period = period;
823		tinfo->user.offset = offset;
824	}
825}
826
827static void
828amdsettags(struct amd_softc *amd, u_int target, int tagenb)
829{
830	panic("Implement me!\n");
831}
832
833
834#if 0
835/*
836 **********************************************************************
837 * Function : amd_reset (struct amd_softc * amd)
838 * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
839 * Inputs   : cmd - command which caused the SCSI RESET
840 **********************************************************************
841 */
842static void
843amd_reset(struct amd_softc * amd)
844{
845	int	   intflag;
846	u_int8_t   bval;
847	u_int16_t  i;
848
849
850#ifdef AMD_DEBUG0
851	printf("DC390: RESET");
852#endif
853
854	intflag = splcam();
855	bval = amd_read8(amd, CNTLREG1);
856	bval |= DIS_INT_ON_SCSI_RST;
857	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
858	amd_ResetSCSIBus(amd);
859
860	for (i = 0; i < 500; i++) {
861		DELAY(1000);
862	}
863
864	bval = amd_read8(amd, CNTLREG1);
865	bval &= ~DIS_INT_ON_SCSI_RST;
866	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
867
868	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
869	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
870
871	ResetDevParam(amd);
872	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
873			 AMD_TAG_WILDCARD, &amd->running_srbs,
874			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
875	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
876			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
877			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
878	amd->active_srb = NULL;
879	amd->ACBFlag = 0;
880	splx(intflag);
881	return;
882}
883
884void
885amd_timeout(void *arg1)
886{
887	struct amd_srb *    pSRB;
888
889	pSRB = (struct amd_srb *) arg1;
890}
891#endif
892
893static int
894amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
895{
896	union ccb *pccb;
897	struct ccb_scsiio *pcsio;
898	struct amd_target_info *targ_info;
899	u_int identify_msg;
900	u_int command;
901	u_int target;
902	u_int lun;
903
904	pccb = pSRB->pccb;
905	pcsio = &pccb->csio;
906	target = pccb->ccb_h.target_id;
907	lun = pccb->ccb_h.target_lun;
908	targ_info = &amd->tinfo[target];
909
910	amd_clear_msg_state(amd);
911	amd_write8(amd, SCSIDESTIDREG, target);
912	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
913	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
914	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
915	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
916	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
917	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
918
919	identify_msg = MSG_IDENTIFYFLAG | lun;
920	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
921	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
922	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
923	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
924		identify_msg |= MSG_IDENTIFY_DISCFLAG;
925
926	amd_write8(amd, SCSIFIFOREG, identify_msg);
927	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
928	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
929		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
930	if (targ_info->current.period != targ_info->goal.period
931	 || targ_info->current.offset != targ_info->goal.offset) {
932		command = SEL_W_ATN_STOP;
933		amdconstructsdtr(amd, targ_info->goal.period,
934				 targ_info->goal.offset);
935	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
936		command = SEL_W_ATN2;
937		pSRB->SRBState = SRB_START;
938		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
939		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
940	} else {
941		command = SEL_W_ATN;
942		pSRB->SRBState = SRB_START;
943	}
944	if (command != SEL_W_ATN_STOP)
945		amdsetupcommand(amd, pSRB);
946
947	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
948		pSRB->SRBState = SRB_READY;
949		return (1);
950	} else {
951		amd->last_phase = SCSI_ARBITRATING;
952		amd_write8(amd, SCSICMDREG, command);
953		amd->active_srb = pSRB;
954		amd->cur_target = target;
955		amd->cur_lun = lun;
956		return (0);
957	}
958}
959
960/*
961 *  Catch an interrupt from the adapter.
962 *  Process pending device interrupts.
963 */
964static void
965amd_intr(void   *arg)
966{
967	struct amd_softc *amd;
968	struct amd_srb *pSRB;
969	u_int  internstat = 0;
970	u_int  scsistat;
971	u_int  intstat;
972
973	amd = (struct amd_softc *)arg;
974
975	if (amd == NULL) {
976#ifdef AMD_DEBUG0
977		printf("amd_intr: amd NULL return......");
978#endif
979		return;
980	}
981
982	scsistat = amd_read8(amd, SCSISTATREG);
983	if (!(scsistat & INTERRUPT)) {
984#ifdef AMD_DEBUG0
985		printf("amd_intr: scsistat = NULL ,return......");
986#endif
987		return;
988	}
989#ifdef AMD_DEBUG_SCSI_PHASE
990	printf("scsistat=%2x,", scsistat);
991#endif
992
993	internstat = amd_read8(amd, INTERNSTATREG);
994	intstat = amd_read8(amd, INTSTATREG);
995
996#ifdef AMD_DEBUG_SCSI_PHASE
997	printf("intstat=%2x,", intstat);
998#endif
999
1000	if (intstat & DISCONNECTED) {
1001		amd_Disconnect(amd);
1002		return;
1003	}
1004	if (intstat & RESELECTED) {
1005		amd_Reselect(amd);
1006		return;
1007	}
1008	if (intstat & INVALID_CMD) {
1009		amd_InvalidCmd(amd);
1010		return;
1011	}
1012	if (intstat & SCSI_RESET_) {
1013		amd_ScsiRstDetect(amd);
1014		return;
1015	}
1016	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1017		pSRB = amd->active_srb;
1018		/*
1019		 * Run our state engine.  First perform
1020		 * post processing for the last phase we
1021		 * were in, followed by any processing
1022		 * required to handle the current phase.
1023		 */
1024		scsistat =
1025		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1026		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1027		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1028	}
1029}
1030
1031static u_int
1032amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1033{
1034	struct amd_sg *psgl;
1035	u_int32_t   ResidCnt, xferCnt;
1036
1037	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1038		if (scsistat & PARITY_ERR) {
1039			pSRB->SRBStatus |= PARITY_ERROR;
1040		}
1041		if (scsistat & COUNT_2_ZERO) {
1042			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1043				;
1044			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1045			pSRB->SGIndex++;
1046			if (pSRB->SGIndex < pSRB->SGcount) {
1047				pSRB->pSGlist++;
1048				psgl = pSRB->pSGlist;
1049				pSRB->SGPhysAddr = psgl->SGXPtr;
1050				pSRB->SGToBeXferLen = psgl->SGXLen;
1051			} else {
1052				pSRB->SGToBeXferLen = 0;
1053			}
1054		} else {
1055			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1056			ResidCnt += amd_read8(amd, CTCREG_LOW)
1057				  | (amd_read8(amd, CTCREG_MID) << 8)
1058				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1059
1060			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1061			pSRB->SGPhysAddr += xferCnt;
1062			pSRB->TotalXferredLen += xferCnt;
1063			pSRB->SGToBeXferLen = ResidCnt;
1064		}
1065	}
1066	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1067	return (scsistat);
1068}
1069
1070static u_int
1071amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1072{
1073	u_int8_t bval;
1074	u_int16_t  i, residual;
1075	struct amd_sg *psgl;
1076	u_int32_t   ResidCnt, xferCnt;
1077	u_int8_t *  ptr;
1078
1079	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1080		if (scsistat & PARITY_ERR) {
1081			pSRB->SRBStatus |= PARITY_ERROR;
1082		}
1083		if (scsistat & COUNT_2_ZERO) {
1084			while (1) {
1085				bval = amd_read8(amd, DMA_Status);
1086				if ((bval & DMA_XFER_DONE) != 0)
1087					break;
1088			}
1089			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1090
1091			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1092			pSRB->SGIndex++;
1093			if (pSRB->SGIndex < pSRB->SGcount) {
1094				pSRB->pSGlist++;
1095				psgl = pSRB->pSGlist;
1096				pSRB->SGPhysAddr = psgl->SGXPtr;
1097				pSRB->SGToBeXferLen = psgl->SGXLen;
1098			} else {
1099				pSRB->SGToBeXferLen = 0;
1100			}
1101		} else {	/* phase changed */
1102			residual = 0;
1103			bval = amd_read8(amd, CURRENTFIFOREG);
1104			while (bval & 0x1f) {
1105				if ((bval & 0x1f) == 1) {
1106					for (i = 0; i < 0x100; i++) {
1107						bval = amd_read8(amd, CURRENTFIFOREG);
1108						if (!(bval & 0x1f)) {
1109							goto din_1;
1110						} else if (i == 0x0ff) {
1111							residual = 1;
1112							goto din_1;
1113						}
1114					}
1115				} else {
1116					bval = amd_read8(amd, CURRENTFIFOREG);
1117				}
1118			}
1119	din_1:
1120			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1121			for (i = 0; i < 0x8000; i++) {
1122				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1123					break;
1124			}
1125			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1126
1127			ResidCnt = amd_read8(amd, CTCREG_LOW)
1128				 | (amd_read8(amd, CTCREG_MID) << 8)
1129				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1130			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1131			pSRB->SGPhysAddr += xferCnt;
1132			pSRB->TotalXferredLen += xferCnt;
1133			pSRB->SGToBeXferLen = ResidCnt;
1134			if (residual) {
1135				/* get residual byte */
1136				bval = amd_read8(amd, SCSIFIFOREG);
1137				ptr = phystovirt(pSRB, xferCnt);
1138				*ptr = bval;
1139				pSRB->SGPhysAddr++;
1140				pSRB->TotalXferredLen++;
1141				pSRB->SGToBeXferLen--;
1142			}
1143		}
1144	}
1145	return (scsistat);
1146}
1147
1148static u_int
1149amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1150{
1151	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1152	/* get message */
1153	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1154	pSRB->SRBState = SRB_COMPLETED;
1155	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1156	return (SCSI_NOP0);
1157}
1158
1159static u_int
1160amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1161{
1162	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1163		scsistat = SCSI_NOP0;
1164	}
1165	return (scsistat);
1166}
1167
1168static u_int
1169amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1170{
1171	int done;
1172
1173	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1174
1175	done = amdparsemsg(amd);
1176	if (done)
1177		amd->msgin_index = 0;
1178	else
1179		amd->msgin_index++;
1180	return (SCSI_NOP0);
1181}
1182
1183static int
1184amdparsemsg(struct amd_softc *amd)
1185{
1186	int	reject;
1187	int	done;
1188	int	response;
1189
1190	done = FALSE;
1191	response = FALSE;
1192	reject = FALSE;
1193
1194	/*
1195	 * Parse as much of the message as is availible,
1196	 * rejecting it if we don't support it.  When
1197	 * the entire message is availible and has been
1198	 * handled, return TRUE indicating that we have
1199	 * parsed an entire message.
1200	 */
1201	switch (amd->msgin_buf[0]) {
1202	case MSG_DISCONNECT:
1203		amd->active_srb->SRBState = SRB_DISCONNECT;
1204		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1205		done = TRUE;
1206		break;
1207	case MSG_SIMPLE_Q_TAG:
1208	{
1209		struct amd_srb *disc_srb;
1210
1211		if (amd->msgin_index < 1)
1212			break;
1213		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1214		if (amd->active_srb != NULL
1215		 || disc_srb->SRBState != SRB_DISCONNECT
1216		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1217		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1218			printf("amd%d: Unexpected tagged reselection "
1219			       "for target %d, Issuing Abort\n", amd->unit,
1220			       amd->cur_target);
1221			amd->msgout_buf[0] = MSG_ABORT;
1222			amd->msgout_len = 1;
1223			response = TRUE;
1224			break;
1225		}
1226		amd->active_srb = disc_srb;
1227		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1228		done = TRUE;
1229		break;
1230	}
1231	case MSG_MESSAGE_REJECT:
1232		response = amdhandlemsgreject(amd);
1233		if (response == FALSE)
1234			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1235		/* FALLTHROUGH */
1236	case MSG_NOOP:
1237		done = TRUE;
1238		break;
1239	case MSG_EXTENDED:
1240	{
1241		u_int clockrate;
1242		u_int period;
1243		u_int offset;
1244		u_int saved_offset;
1245
1246		/* Wait for enough of the message to begin validation */
1247		if (amd->msgin_index < 1)
1248			break;
1249		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1250			reject = TRUE;
1251			break;
1252		}
1253
1254		/* Wait for opcode */
1255		if (amd->msgin_index < 2)
1256			break;
1257
1258		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1259			reject = TRUE;
1260			break;
1261		}
1262
1263		/*
1264		 * Wait until we have both args before validating
1265		 * and acting on this message.
1266		 *
1267		 * Add one to MSG_EXT_SDTR_LEN to account for
1268		 * the extended message preamble.
1269		 */
1270		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1271			break;
1272
1273		period = amd->msgin_buf[3];
1274		saved_offset = offset = amd->msgin_buf[4];
1275		clockrate = amdfindclockrate(amd, &period);
1276		if (offset > AMD_MAX_SYNC_OFFSET)
1277			offset = AMD_MAX_SYNC_OFFSET;
1278		if (period == 0 || offset == 0) {
1279			offset = 0;
1280			period = 0;
1281			clockrate = 0;
1282		}
1283		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1284			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1285
1286		/*
1287		 * See if we initiated Sync Negotiation
1288		 * and didn't have to fall down to async
1289		 * transfers.
1290		 */
1291		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1292			/* We started it */
1293			if (saved_offset != offset) {
1294				/* Went too low - force async */
1295				reject = TRUE;
1296			}
1297		} else {
1298			/*
1299			 * Send our own SDTR in reply
1300			 */
1301			if (bootverbose)
1302				printf("Sending SDTR!\n");
1303			amd->msgout_index = 0;
1304			amd->msgout_len = 0;
1305			amdconstructsdtr(amd, period, offset);
1306			amd->msgout_index = 0;
1307			response = TRUE;
1308		}
1309		done = TRUE;
1310		break;
1311	}
1312	case MSG_SAVEDATAPOINTER:
1313	case MSG_RESTOREPOINTERS:
1314		/* XXX Implement!!! */
1315		done = TRUE;
1316		break;
1317	default:
1318		reject = TRUE;
1319		break;
1320	}
1321
1322	if (reject) {
1323		amd->msgout_index = 0;
1324		amd->msgout_len = 1;
1325		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1326		done = TRUE;
1327		response = TRUE;
1328	}
1329
1330	if (response)
1331		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1332
1333	if (done && !response)
1334		/* Clear the outgoing message buffer */
1335		amd->msgout_len = 0;
1336
1337	/* Drop Ack */
1338	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1339
1340	return (done);
1341}
1342
1343static u_int
1344amdfindclockrate(struct amd_softc *amd, u_int *period)
1345{
1346	u_int i;
1347	u_int clockrate;
1348
1349	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1350		u_int8_t *table_entry;
1351
1352		table_entry = &tinfo_sync_period[i];
1353		if (*period <= *table_entry) {
1354			/*
1355			 * When responding to a target that requests
1356			 * sync, the requested rate may fall between
1357			 * two rates that we can output, but still be
1358			 * a rate that we can receive.  Because of this,
1359			 * we want to respond to the target with
1360			 * the same rate that it sent to us even
1361			 * if the period we use to send data to it
1362			 * is lower.  Only lower the response period
1363			 * if we must.
1364			 */
1365			if (i == 0) {
1366				*period = *table_entry;
1367			}
1368			break;
1369		}
1370	}
1371
1372	if (i == sizeof(tinfo_sync_period)) {
1373		/* Too slow for us.  Use asnyc transfers. */
1374		*period = 0;
1375		clockrate = 0;
1376	} else
1377		clockrate = i + 4;
1378
1379	return (clockrate);
1380}
1381
1382/*
1383 * See if we sent a particular extended message to the target.
1384 * If "full" is true, the target saw the full message.
1385 * If "full" is false, the target saw at least the first
1386 * byte of the message.
1387 */
1388static int
1389amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1390{
1391	int found;
1392	int index;
1393
1394	found = FALSE;
1395	index = 0;
1396
1397	while (index < amd->msgout_len) {
1398		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1399		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1400			index++;
1401		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1402		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1403			/* Skip tag type and tag id */
1404			index += 2;
1405		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1406			/* Found a candidate */
1407			if (amd->msgout_buf[index+2] == msgtype) {
1408				u_int end_index;
1409
1410				end_index = index + 1
1411					  + amd->msgout_buf[index + 1];
1412				if (full) {
1413					if (amd->msgout_index > end_index)
1414						found = TRUE;
1415				} else if (amd->msgout_index > index)
1416					found = TRUE;
1417			}
1418			break;
1419		} else {
1420			panic("amdsentmsg: Inconsistent msg buffer");
1421		}
1422	}
1423	return (found);
1424}
1425
1426static void
1427amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1428{
1429	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1430	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1431	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1432	amd->msgout_buf[amd->msgout_index++] = period;
1433	amd->msgout_buf[amd->msgout_index++] = offset;
1434	amd->msgout_len += 5;
1435}
1436
1437static int
1438amdhandlemsgreject(struct amd_softc *amd)
1439{
1440	/*
1441	 * If we had an outstanding SDTR for this
1442	 * target, this is a signal that the target
1443	 * is refusing negotiation.  Also watch out
1444	 * for rejected tag messages.
1445	 */
1446	struct	amd_srb *srb;
1447	struct	amd_target_info *targ_info;
1448	int	response = FALSE;
1449
1450	srb = amd->active_srb;
1451	targ_info = &amd->tinfo[amd->cur_target];
1452	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1453		/* note asynch xfers and clear flag */
1454		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1455			   /*period*/0, /*offset*/0,
1456			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1457		printf("amd%d:%d: refuses synchronous negotiation. "
1458		       "Using asynchronous transfers\n",
1459		       amd->unit, amd->cur_target);
1460	} else if ((srb != NULL)
1461		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1462		struct  ccb_trans_settings neg;
1463		struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1464
1465		printf("amd%d:%d: refuses tagged commands.  Performing "
1466		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1467
1468		amdsettags(amd, amd->cur_target, FALSE);
1469		memset(&neg, 0, sizeof (neg));
1470		scsi->valid = CTS_SCSI_VALID_TQ;
1471		xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1472		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1473
1474		/*
1475		 * Resend the identify for this CCB as the target
1476		 * may believe that the selection is invalid otherwise.
1477		 */
1478		if (amd->msgout_len != 0)
1479			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1480			      amd->msgout_len);
1481		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1482				    | srb->pccb->ccb_h.target_lun;
1483		amd->msgout_len++;
1484		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1485		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1486			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1487
1488		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1489
1490		/*
1491		 * Requeue all tagged commands for this target
1492		 * currently in our posession so they can be
1493		 * converted to untagged commands.
1494		 */
1495		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1496				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1497				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1498	} else {
1499		/*
1500		 * Otherwise, we ignore it.
1501		 */
1502		printf("amd%d:%d: Message reject received -- ignored\n",
1503		       amd->unit, amd->cur_target);
1504	}
1505	return (response);
1506}
1507
1508#if 0
1509	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1510		if (bval == MSG_DISCONNECT) {
1511			pSRB->SRBState = SRB_DISCONNECT;
1512		} else if (bval == MSG_SAVEDATAPOINTER) {
1513			goto min6;
1514		} else if ((bval == MSG_EXTENDED)
1515			|| ((bval >= MSG_SIMPLE_Q_TAG)
1516			 && (bval <= MSG_ORDERED_Q_TAG))) {
1517			pSRB->SRBState |= SRB_MSGIN_MULTI;
1518			pSRB->MsgInBuf[0] = bval;
1519			pSRB->MsgCnt = 1;
1520			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1521		} else if (bval == MSG_MESSAGE_REJECT) {
1522			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1523
1524			if (pSRB->SRBState & DO_SYNC_NEGO) {
1525				goto set_async;
1526			}
1527		} else if (bval == MSG_RESTOREPOINTERS) {
1528			goto min6;
1529		} else {
1530			goto min6;
1531		}
1532	} else {		/* minx: */
1533		*pSRB->pMsgPtr = bval;
1534		pSRB->MsgCnt++;
1535		pSRB->pMsgPtr++;
1536		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1537		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1538			if (pSRB->MsgCnt == 2) {
1539				pSRB->SRBState = 0;
1540				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1541				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1542					pSRB = amd->pTmpSRB;
1543					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1544					pDCB->pActiveSRB = pSRB;
1545					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1546					EnableMsgOut2(amd, pSRB);
1547				} else {
1548					if (pDCB->DCBFlag & ABORT_DEV_) {
1549						pSRB->SRBState = SRB_ABORT_SENT;
1550						EnableMsgOut1(amd, pSRB);
1551					}
1552					pDCB->pActiveSRB = pSRB;
1553					pSRB->SRBState = SRB_DATA_XFER;
1554				}
1555			}
1556		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1557			&& (pSRB->MsgCnt == 5)) {
1558			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1559			if ((pSRB->MsgInBuf[1] != 3)
1560			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1561				pSRB->MsgCnt = 1;
1562				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1563				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1564			} else if (!(pSRB->MsgInBuf[3])
1565				|| !(pSRB->MsgInBuf[4])) {
1566		set_async:	/* set async */
1567
1568				pDCB = pSRB->pSRBDCB;
1569				/* disable sync & sync nego */
1570				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1571				pDCB->SyncPeriod = 0;
1572				pDCB->SyncOffset = 0;
1573
1574				pDCB->tinfo.goal.period = 0;
1575				pDCB->tinfo.goal.offset = 0;
1576
1577				pDCB->tinfo.current.period = 0;
1578				pDCB->tinfo.current.offset = 0;
1579				pDCB->tinfo.current.width =
1580				    MSG_EXT_WDTR_BUS_8_BIT;
1581
1582				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1583				pDCB->CtrlR4 &= 0x3f;
1584				pDCB->CtrlR4 |= EATER_25NS;
1585				goto re_prog;
1586			} else {/* set sync */
1587
1588				pDCB = pSRB->pSRBDCB;
1589				/* enable sync & sync nego */
1590				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1591
1592				/* set sync offset */
1593				pDCB->SyncOffset &= 0x0f0;
1594				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1595
1596				/* set sync period */
1597				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1598
1599				wval = (u_int16_t) pSRB->MsgInBuf[3];
1600				wval = wval << 2;
1601				wval--;
1602				wval1 = wval / 25;
1603				if ((wval1 * 25) != wval) {
1604					wval1++;
1605				}
1606				bval = FAST_CLK|FAST_SCSI;
1607				pDCB->CtrlR4 &= 0x3f;
1608				if (wval1 >= 8) {
1609					/* Fast SCSI */
1610					wval1--;
1611					bval = FAST_CLK;
1612					pDCB->CtrlR4 |= EATER_25NS;
1613				}
1614				pDCB->CtrlR3 = bval;
1615				pDCB->SyncPeriod = (u_int8_t) wval1;
1616
1617				pDCB->tinfo.goal.period =
1618				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1619				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1620				pDCB->tinfo.current.period =
1621				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1622				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1623
1624				/*
1625				 * program SCSI control register
1626				 */
1627		re_prog:
1628				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1629				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1630				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1631				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1632			}
1633		}
1634	}
1635min6:
1636	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1637	return (SCSI_NOP0);
1638}
1639#endif
1640
1641static u_int
1642amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1643{
1644	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1645	return (scsistat);
1646}
1647
1648static u_int
1649amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1650{
1651	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1652	return (scsistat);
1653}
1654
1655static void
1656DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1657{
1658	struct amd_sg *    psgl;
1659	u_int32_t   lval;
1660
1661	if (pSRB->SGIndex < pSRB->SGcount) {
1662		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1663
1664		if (!pSRB->SGToBeXferLen) {
1665			psgl = pSRB->pSGlist;
1666			pSRB->SGPhysAddr = psgl->SGXPtr;
1667			pSRB->SGToBeXferLen = psgl->SGXLen;
1668		}
1669		lval = pSRB->SGToBeXferLen;
1670		amd_write8(amd, CTCREG_LOW, lval);
1671		amd_write8(amd, CTCREG_MID, lval >> 8);
1672		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1673
1674		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1675
1676		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1677
1678		pSRB->SRBState = SRB_DATA_XFER;
1679
1680		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1681
1682		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1683
1684		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1685	} else {		/* xfer pad */
1686		if (pSRB->SGcount) {
1687			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1688			pSRB->SRBStatus |= OVER_RUN;
1689		}
1690		amd_write8(amd, CTCREG_LOW, 0);
1691		amd_write8(amd, CTCREG_MID, 0);
1692		amd_write8(amd, CURTXTCNTREG, 0);
1693
1694		pSRB->SRBState |= SRB_XFERPAD;
1695		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1696	}
1697}
1698
1699static u_int
1700amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1701{
1702	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1703	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1704
1705	amdsetupcommand(amd, srb);
1706
1707	srb->SRBState = SRB_COMMAND;
1708	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1709	return (scsistat);
1710}
1711
1712static u_int
1713amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1714{
1715	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1716	pSRB->SRBState = SRB_STATUS;
1717	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1718	return (scsistat);
1719}
1720
1721static u_int
1722amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1723{
1724	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1725
1726	if (amd->msgout_len == 0) {
1727		amd->msgout_buf[0] = MSG_NOOP;
1728		amd->msgout_len = 1;
1729	}
1730	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1731	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1732	return (scsistat);
1733}
1734
1735static u_int
1736amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1737{
1738	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1739	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1740	return (scsistat);
1741}
1742
1743static u_int
1744amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1745{
1746	return (scsistat);
1747}
1748
1749static void
1750amd_Disconnect(struct amd_softc * amd)
1751{
1752	struct	amd_srb *srb;
1753	int	target;
1754	int	lun;
1755
1756	srb = amd->active_srb;
1757	amd->active_srb = NULL;
1758	amd->last_phase = SCSI_BUS_FREE;
1759	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1760	target = amd->cur_target;
1761	lun = amd->cur_lun;
1762
1763	if (srb == NULL) {
1764		/* Invalid reselection */
1765		amdrunwaiting(amd);
1766	} else if (srb->SRBState & SRB_ABORT_SENT) {
1767		/* Clean up and done this srb */
1768#if 0
1769		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1770			/* XXX What about "done'ing" these srbs??? */
1771			if (pSRB->pSRBDCB == pDCB) {
1772				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1773				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1774			}
1775		}
1776		amdrunwaiting(amd);
1777#endif
1778	} else {
1779		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1780		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1781			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1782			goto disc1;
1783		} else if (srb->SRBState & SRB_DISCONNECT) {
1784			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1785				amd->untagged_srbs[target][lun] = srb;
1786			amdrunwaiting(amd);
1787		} else if (srb->SRBState & SRB_COMPLETED) {
1788	disc1:
1789			srb->SRBState = SRB_FREE;
1790			SRBdone(amd, srb);
1791		}
1792	}
1793	return;
1794}
1795
1796static void
1797amd_Reselect(struct amd_softc *amd)
1798{
1799	struct amd_target_info *tinfo;
1800	u_int16_t disc_count;
1801
1802	amd_clear_msg_state(amd);
1803	if (amd->active_srb != NULL) {
1804		/* Requeue the SRB for our attempted Selection */
1805		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1806		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1807		amd->active_srb = NULL;
1808	}
1809	/* get ID */
1810	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1811	amd->cur_target ^= amd->HostID_Bit;
1812	amd->cur_target = ffs(amd->cur_target) - 1;
1813	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1814	tinfo = &amd->tinfo[amd->cur_target];
1815	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1816	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1817	if (disc_count == 0) {
1818		printf("amd%d: Unexpected reselection for target %d, "
1819		       "Issuing Abort\n", amd->unit, amd->cur_target);
1820		amd->msgout_buf[0] = MSG_ABORT;
1821		amd->msgout_len = 1;
1822		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1823	}
1824	if (amd->active_srb != NULL) {
1825		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1826		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1827	}
1828
1829	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1830	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1831	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1832	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1833	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1834	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1835	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1836	amd->last_phase = SCSI_NOP0;
1837}
1838
1839static void
1840SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1841{
1842	u_int8_t   bval, i, status;
1843	union ccb *pccb;
1844	struct ccb_scsiio *pcsio;
1845	int	   intflag;
1846	struct amd_sg *ptr2;
1847	u_int32_t   swlval;
1848
1849	pccb = pSRB->pccb;
1850	pcsio = &pccb->csio;
1851
1852	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1853		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1854
1855	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1856		bus_dmasync_op_t op;
1857
1858		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1859			op = BUS_DMASYNC_POSTREAD;
1860		else
1861			op = BUS_DMASYNC_POSTWRITE;
1862		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1863		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1864	}
1865
1866	status = pSRB->TargetStatus;
1867	pccb->ccb_h.status = CAM_REQ_CMP;
1868	if (pSRB->SRBFlag & AUTO_REQSENSE) {
1869		pSRB->SRBFlag &= ~AUTO_REQSENSE;
1870		pSRB->AdaptStatus = 0;
1871		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1872
1873		if (status == SCSI_STATUS_CHECK_COND) {
1874			pccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1875			goto ckc_e;
1876		}
1877		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1878
1879		pcsio->sense_resid = pcsio->sense_len
1880				   - pSRB->TotalXferredLen;
1881		pSRB->TotalXferredLen = pSRB->Segment1[1];
1882		if (pSRB->TotalXferredLen) {
1883			/* ???? */
1884			pcsio->resid = pcsio->dxfer_len
1885				     - pSRB->TotalXferredLen;
1886			/* The resid field contains valid data	 */
1887			/* Flush resid bytes on complete        */
1888		} else {
1889			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1890		}
1891		bzero(&pcsio->sense_data, pcsio->sense_len);
1892		bcopy(amd_get_sense_buf(amd, pSRB), &pcsio->sense_data,
1893		      pcsio->sense_len);
1894		pccb->ccb_h.status = CAM_AUTOSNS_VALID;
1895		goto ckc_e;
1896	}
1897	if (status) {
1898		if (status == SCSI_STATUS_CHECK_COND) {
1899
1900			if ((pSRB->SGIndex < pSRB->SGcount)
1901			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1902				bval = pSRB->SGcount;
1903				swlval = pSRB->SGToBeXferLen;
1904				ptr2 = pSRB->pSGlist;
1905				ptr2++;
1906				for (i = pSRB->SGIndex + 1; i < bval; i++) {
1907					swlval += ptr2->SGXLen;
1908					ptr2++;
1909				}
1910				/* ??????? */
1911				pcsio->resid = (u_int32_t) swlval;
1912
1913#ifdef	AMD_DEBUG0
1914				printf("XferredLen=%8x,NotYetXferLen=%8x,",
1915					pSRB->TotalXferredLen, swlval);
1916#endif
1917			}
1918			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1919#ifdef	AMD_DEBUG0
1920				printf("RequestSense..................\n");
1921#endif
1922				RequestSense(amd, pSRB);
1923				return;
1924			}
1925			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1926			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1927			goto ckc_e;
1928		} else if (status == SCSI_STATUS_QUEUE_FULL) {
1929			pSRB->AdaptStatus = 0;
1930			pSRB->TargetStatus = 0;
1931			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1932			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1933			goto ckc_e;
1934		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1935			pSRB->AdaptStatus = H_SEL_TIMEOUT;
1936			pSRB->TargetStatus = 0;
1937
1938			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1939			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1940		} else if (status == SCSI_STATUS_BUSY) {
1941#ifdef AMD_DEBUG0
1942			printf("DC390: target busy at %s %d\n",
1943			       __FILE__, __LINE__);
1944#endif
1945			pcsio->scsi_status = SCSI_STATUS_BUSY;
1946			pccb->ccb_h.status = CAM_SCSI_BUSY;
1947		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1948#ifdef AMD_DEBUG0
1949			printf("DC390: target reserved at %s %d\n",
1950			       __FILE__, __LINE__);
1951#endif
1952			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1953			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1954		} else {
1955			pSRB->AdaptStatus = 0;
1956#ifdef AMD_DEBUG0
1957			printf("DC390: driver stuffup at %s %d\n",
1958			       __FILE__, __LINE__);
1959#endif
1960			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1961		}
1962	} else {
1963		status = pSRB->AdaptStatus;
1964		if (status & H_OVER_UNDER_RUN) {
1965			pSRB->TargetStatus = 0;
1966
1967			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1968		} else if (pSRB->SRBStatus & PARITY_ERROR) {
1969#ifdef AMD_DEBUG0
1970			printf("DC390: driver stuffup %s %d\n",
1971			       __FILE__, __LINE__);
1972#endif
1973			/* Driver failed to perform operation	  */
1974			pccb->ccb_h.status = CAM_UNCOR_PARITY;
1975		} else {	/* No error */
1976			pSRB->AdaptStatus = 0;
1977			pSRB->TargetStatus = 0;
1978			pcsio->resid = 0;
1979			/* there is no error, (sense is invalid)  */
1980		}
1981	}
1982ckc_e:
1983	intflag = splcam();
1984	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1985		/* CAM request not yet complete =>device_Q frozen */
1986		xpt_freeze_devq(pccb->ccb_h.path, 1);
1987		pccb->ccb_h.status |= CAM_DEV_QFRZN;
1988	}
1989	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1990	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1991	amdrunwaiting(amd);
1992	splx(intflag);
1993	xpt_done(pccb);
1994
1995}
1996
1997static void
1998amd_ResetSCSIBus(struct amd_softc * amd)
1999{
2000	int     intflag;
2001
2002	intflag = splcam();
2003	amd->ACBFlag |= RESET_DEV;
2004	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2005	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2006	splx(intflag);
2007	return;
2008}
2009
2010static void
2011amd_ScsiRstDetect(struct amd_softc * amd)
2012{
2013	int     intflag;
2014	u_int32_t   wlval;
2015
2016#ifdef AMD_DEBUG0
2017	printf("amd_ScsiRstDetect \n");
2018#endif
2019
2020	wlval = 1000;
2021	while (--wlval) {	/* delay 1 sec */
2022		DELAY(1000);
2023	}
2024	intflag = splcam();
2025
2026	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2027	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2028
2029	if (amd->ACBFlag & RESET_DEV) {
2030		amd->ACBFlag |= RESET_DONE;
2031	} else {
2032		amd->ACBFlag |= RESET_DETECT;
2033		ResetDevParam(amd);
2034		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2035				 AMD_TAG_WILDCARD, &amd->running_srbs,
2036				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2037		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2038				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2039				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2040		amd->active_srb = NULL;
2041		amd->ACBFlag = 0;
2042		amdrunwaiting(amd);
2043	}
2044	splx(intflag);
2045	return;
2046}
2047
2048static void
2049RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2050{
2051	union ccb *pccb;
2052	struct ccb_scsiio *pcsio;
2053
2054	pccb = pSRB->pccb;
2055	pcsio = &pccb->csio;
2056
2057	pSRB->SRBFlag |= AUTO_REQSENSE;
2058	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2059	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2060	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2061	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2062
2063	pSRB->AdaptStatus = 0;
2064	pSRB->TargetStatus = 0;
2065
2066	pSRB->Segmentx.SGXPtr = amd_get_sense_bufaddr(amd, pSRB);
2067	pSRB->Segmentx.SGXLen = amd_get_sense_bufsize(amd, pSRB);
2068
2069	pSRB->pSGlist = &pSRB->Segmentx;
2070	pSRB->SGcount = 1;
2071	pSRB->SGIndex = 0;
2072
2073	pSRB->CmdBlock[0] = REQUEST_SENSE;
2074	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2075	pSRB->CmdBlock[2] = 0;
2076	pSRB->CmdBlock[3] = 0;
2077	pSRB->CmdBlock[4] = pcsio->sense_len;
2078	pSRB->CmdBlock[5] = 0;
2079	pSRB->ScsiCmdLen = 6;
2080
2081	pSRB->TotalXferredLen = 0;
2082	pSRB->SGToBeXferLen = 0;
2083	if (amdstart(amd, pSRB) != 0) {
2084		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2085		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2086	}
2087}
2088
2089static void
2090amd_InvalidCmd(struct amd_softc * amd)
2091{
2092	struct amd_srb *srb;
2093
2094	srb = amd->active_srb;
2095	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2096		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2097}
2098
2099void
2100amd_linkSRB(struct amd_softc *amd)
2101{
2102	u_int16_t  count, i;
2103	struct amd_srb *psrb;
2104	int error;
2105
2106	count = amd->SRBCount;
2107
2108	for (i = 0; i < count; i++) {
2109		psrb = (struct amd_srb *)&amd->SRB_array[i];
2110		psrb->TagNumber = i;
2111
2112		/*
2113		 * Create the dmamap.  This is no longer optional!
2114		 *
2115		 * XXX Since there is no detach method in this driver,
2116		 * this does not get freed!
2117		 */
2118		if ((error = bus_dmamap_create(amd->buffer_dmat, 0,
2119					       &psrb->dmamap)) != 0) {
2120			device_printf(amd->dev, "Error %d creating buffer "
2121				      "dmamap!\n", error);
2122			return;
2123		}
2124		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2125	}
2126}
2127
2128static void
2129amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2130{
2131	if (mode == ENABLE_CE) {
2132		*regval = 0xc0;
2133	} else {
2134		*regval = 0x80;
2135	}
2136	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2137	if (mode == DISABLE_CE) {
2138		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2139	}
2140	DELAY(160);
2141}
2142
2143static void
2144amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2145{
2146	u_int bval;
2147
2148	bval = 0;
2149	if (Carry) {
2150		bval = 0x40;
2151		*regval = 0x80;
2152		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2153	}
2154	DELAY(160);
2155	bval |= 0x80;
2156	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2157	DELAY(160);
2158	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2159	DELAY(160);
2160}
2161
2162static int
2163amd_EEpromInDO(struct amd_softc *amd)
2164{
2165	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2166	DELAY(160);
2167	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2168	DELAY(160);
2169	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2170		return (1);
2171	return (0);
2172}
2173
2174static u_int16_t
2175EEpromGetData1(struct amd_softc *amd)
2176{
2177	u_int	  i;
2178	u_int	  carryFlag;
2179	u_int16_t wval;
2180
2181	wval = 0;
2182	for (i = 0; i < 16; i++) {
2183		wval <<= 1;
2184		carryFlag = amd_EEpromInDO(amd);
2185		wval |= carryFlag;
2186	}
2187	return (wval);
2188}
2189
2190static void
2191amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2192{
2193	u_int i, j;
2194	int carryFlag;
2195
2196	carryFlag = 1;
2197	j = 0x80;
2198	for (i = 0; i < 9; i++) {
2199		amd_EEpromOutDI(amd, regval, carryFlag);
2200		carryFlag = (EEpromCmd & j) ? 1 : 0;
2201		j >>= 1;
2202	}
2203}
2204
2205static void
2206amd_ReadEEprom(struct amd_softc *amd)
2207{
2208	int	   regval;
2209	u_int	   i;
2210	u_int16_t *ptr;
2211	u_int8_t   cmd;
2212
2213	ptr = (u_int16_t *)&amd->eepromBuf[0];
2214	cmd = EEPROM_READ;
2215	for (i = 0; i < 0x40; i++) {
2216		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2217		amd_Prepare(amd, &regval, cmd);
2218		*ptr = EEpromGetData1(amd);
2219		ptr++;
2220		cmd++;
2221		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2222	}
2223}
2224
2225static void
2226amd_load_defaults(struct amd_softc *amd)
2227{
2228	int target;
2229
2230	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2231	for (target = 0; target < MAX_SCSI_ID; target++)
2232		amd->eepromBuf[target << 2] =
2233		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2234	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2235	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2236	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2237}
2238
2239static void
2240amd_load_eeprom_or_defaults(struct amd_softc *amd)
2241{
2242	u_int16_t  wval, *ptr;
2243	u_int8_t   i;
2244
2245	amd_ReadEEprom(amd);
2246	wval = 0;
2247	ptr = (u_int16_t *) & amd->eepromBuf[0];
2248	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2249		wval += *ptr;
2250
2251	if (wval != EE_CHECKSUM) {
2252		if (bootverbose)
2253			printf("amd%d: SEEPROM data unavailable.  "
2254			       "Using default device parameters.\n",
2255			       amd->unit);
2256		amd_load_defaults(amd);
2257	}
2258}
2259
2260/*
2261 **********************************************************************
2262 * Function      : static int amd_init (struct Scsi_Host *host)
2263 * Purpose       : initialize the internal structures for a given SCSI host
2264 * Inputs        : host - pointer to this host adapter's structure/
2265 **********************************************************************
2266 */
2267static int
2268amd_init(device_t dev)
2269{
2270	struct amd_softc *amd = device_get_softc(dev);
2271	struct resource	*iores;
2272	int	i, rid;
2273	u_int	bval;
2274
2275	rid = PCI_BASE_ADDR0;
2276	iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2277	if (iores == NULL) {
2278		if (bootverbose)
2279			printf("amd_init: bus_alloc_resource failure!\n");
2280		return ENXIO;
2281	}
2282	amd->tag = rman_get_bustag(iores);
2283	amd->bsh = rman_get_bushandle(iores);
2284
2285	/* DMA tag for mapping buffers into device visible space. */
2286	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2287			       /*boundary*/0,
2288			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2289			       /*highaddr*/BUS_SPACE_MAXADDR,
2290			       /*filter*/NULL, /*filterarg*/NULL,
2291			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2292			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2293			       /*flags*/BUS_DMA_ALLOCNOW,
2294			       /*lockfunc*/busdma_lock_mutex,
2295			       /*lockarg*/&Giant,
2296			       &amd->buffer_dmat) != 0) {
2297		if (bootverbose)
2298			printf("amd_init: bus_dma_tag_create failure!\n");
2299		return ENXIO;
2300        }
2301
2302	/* Create, allocate, and map DMA buffers for autosense data */
2303	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2304			       /*boundary*/0,
2305			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2306			       /*highaddr*/BUS_SPACE_MAXADDR,
2307			       /*filter*/NULL, /*filterarg*/NULL,
2308			       sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2309			       /*nsegments*/1,
2310			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2311			       /*flags*/0,
2312			       /*lockfunc*/busdma_lock_mutex,
2313			       /*lockarg*/&Giant, &amd->sense_dmat) != 0) {
2314		if (bootverbose)
2315			device_printf(dev, "cannot create sense buffer dmat\n");
2316		return (ENXIO);
2317	}
2318
2319	if (bus_dmamem_alloc(amd->sense_dmat, (void **)&amd->sense_buffers,
2320			     BUS_DMA_NOWAIT, &amd->sense_dmamap) != 0)
2321		return (ENOMEM);
2322
2323	bus_dmamap_load(amd->sense_dmat, amd->sense_dmamap,
2324		       amd->sense_buffers,
2325		       sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2326		       amd_dmamap_cb, &amd->sense_busaddr, /*flags*/0);
2327
2328	TAILQ_INIT(&amd->free_srbs);
2329	TAILQ_INIT(&amd->running_srbs);
2330	TAILQ_INIT(&amd->waiting_srbs);
2331	amd->last_phase = SCSI_BUS_FREE;
2332	amd->dev = dev;
2333	amd->unit = device_get_unit(dev);
2334	amd->SRBCount = MAX_SRB_CNT;
2335	amd->status = 0;
2336	amd_load_eeprom_or_defaults(amd);
2337	amd->max_id = 7;
2338	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2339		amd->max_lun = 7;
2340	} else {
2341		amd->max_lun = 0;
2342	}
2343	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2344	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2345	amd->AdaptSCSILUN = 0;
2346	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2347	amd->ACBFlag = 0;
2348	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2349	amd_linkSRB(amd);
2350	for (i = 0; i <= amd->max_id; i++) {
2351
2352		if (amd->AdaptSCSIID != i) {
2353			struct amd_target_info *tinfo;
2354			PEEprom prom;
2355
2356			tinfo = &amd->tinfo[i];
2357			prom = (PEEprom)&amd->eepromBuf[i << 2];
2358			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2359				tinfo->disc_tag |= AMD_USR_DISCENB;
2360				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2361					tinfo->disc_tag |= AMD_USR_TAGENB;
2362			}
2363			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2364				tinfo->user.period =
2365				    eeprom_period[prom->EE_SPEED];
2366				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2367			}
2368			tinfo->CtrlR1 = amd->AdaptSCSIID;
2369			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2370				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2371			tinfo->CtrlR3 = FAST_CLK;
2372			tinfo->CtrlR4 = EATER_25NS;
2373			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2374				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2375		}
2376	}
2377	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2378	/* Conversion factor = 0 , 40MHz clock */
2379	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2380	/* NOP cmd - clear command register */
2381	amd_write8(amd, SCSICMDREG, NOP_CMD);
2382	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2383	amd_write8(amd, CNTLREG3, FAST_CLK);
2384	bval = EATER_25NS;
2385	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2386		bval |= NEGATE_REQACKDATA;
2387	}
2388	amd_write8(amd, CNTLREG4, bval);
2389
2390	/* Disable SCSI bus reset interrupt */
2391	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2392
2393	return 0;
2394}
2395
2396/*
2397 * attach and init a host adapter
2398 */
2399static int
2400amd_attach(device_t dev)
2401{
2402	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2403	u_int8_t	intstat;
2404	struct amd_softc *amd = device_get_softc(dev);
2405	int		unit = device_get_unit(dev);
2406	int		rid;
2407	void		*ih;
2408	struct resource	*irqres;
2409
2410	if (amd_init(dev)) {
2411		if (bootverbose)
2412			printf("amd_attach: amd_init failure!\n");
2413		return ENXIO;
2414	}
2415
2416	/* Reset Pending INT */
2417	intstat = amd_read8(amd, INTSTATREG);
2418
2419	/* After setting up the adapter, map our interrupt */
2420	rid = 0;
2421	irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2422					RF_SHAREABLE | RF_ACTIVE);
2423	if (irqres == NULL ||
2424	    bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY,
2425	    NULL, amd_intr, amd, &ih)) {
2426		if (bootverbose)
2427			printf("amd%d: unable to register interrupt handler!\n",
2428			       unit);
2429		return ENXIO;
2430	}
2431
2432	/*
2433	 * Now let the CAM generic SCSI layer find the SCSI devices on
2434	 * the bus *  start queue to reset to the idle loop. *
2435	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2436	 * max_sim_transactions
2437	 */
2438	devq = cam_simq_alloc(MAX_START_JOB);
2439	if (devq == NULL) {
2440		if (bootverbose)
2441			printf("amd_attach: cam_simq_alloc failure!\n");
2442		return ENXIO;
2443	}
2444
2445	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2446				  amd, amd->unit, &Giant,
2447				  1, MAX_TAGS_CMD_QUEUE, devq);
2448	if (amd->psim == NULL) {
2449		cam_simq_free(devq);
2450		if (bootverbose)
2451			printf("amd_attach: cam_sim_alloc failure!\n");
2452		return ENXIO;
2453	}
2454
2455	if (xpt_bus_register(amd->psim, dev, 0) != CAM_SUCCESS) {
2456		cam_sim_free(amd->psim, /*free_devq*/TRUE);
2457		if (bootverbose)
2458			printf("amd_attach: xpt_bus_register failure!\n");
2459		return ENXIO;
2460	}
2461
2462	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2463			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2464			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2465		xpt_bus_deregister(cam_sim_path(amd->psim));
2466		cam_sim_free(amd->psim, /* free_simq */ TRUE);
2467		if (bootverbose)
2468			printf("amd_attach: xpt_create_path failure!\n");
2469		return ENXIO;
2470	}
2471
2472	return 0;
2473}
2474
2475static int
2476amd_probe(device_t dev)
2477{
2478	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2479		device_set_desc(dev,
2480			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2481		return BUS_PROBE_DEFAULT;
2482	}
2483	return ENXIO;
2484}
2485
2486static device_method_t amd_methods[] = {
2487	/* Device interface */
2488	DEVMETHOD(device_probe,		amd_probe),
2489	DEVMETHOD(device_attach,	amd_attach),
2490	{ 0, 0 }
2491};
2492
2493static driver_t amd_driver = {
2494	"amd", amd_methods, sizeof(struct amd_softc)
2495};
2496
2497static devclass_t amd_devclass;
2498DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2499MODULE_DEPEND(amd, pci, 1, 1, 1);
2500MODULE_DEPEND(amd, cam, 1, 1, 1);
2501