advlib.c revision 163816
1/*-
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*-
32 * Ported from:
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
34 *
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
41 * modification.
42 */
43
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/dev/advansys/advlib.c 163816 2006-10-31 05:53:29Z mjacob $");
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <sys/bus.h>
54#include <sys/rman.h>
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_sim.h>
59#include <cam/cam_xpt_sim.h>
60
61#include <cam/scsi/scsi_all.h>
62#include <cam/scsi/scsi_message.h>
63#include <cam/scsi/scsi_da.h>
64#include <cam/scsi/scsi_cd.h>
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/pmap.h>
69
70#include <dev/advansys/advansys.h>
71#include <dev/advansys/advmcode.h>
72
73struct adv_quirk_entry {
74	struct scsi_inquiry_pattern inq_pat;
75	u_int8_t quirks;
76#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
77#define ADV_QUIRK_FIX_ASYN_XFER		0x02
78};
79
80static struct adv_quirk_entry adv_quirk_table[] =
81{
82	{
83		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85	},
86	{
87		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88		0
89	},
90	{
91		{
92		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93		  "TANDBERG", " TDC 36", "*"
94		},
95		0
96	},
97	{
98		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99		0
100	},
101	{
102		{
103		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104		  "*", "*", "*"
105		},
106		0
107	},
108	{
109		{
110		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111		  "*", "*", "*"
112		},
113		0
114	},
115	{
116		/* Default quirk entry */
117		{
118		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
120                },
121                ADV_QUIRK_FIX_ASYN_XFER,
122	}
123};
124
125/*
126 * Allowable periods in ns
127 */
128static u_int8_t adv_sdtr_period_tbl[] =
129{
130	25,
131	30,
132	35,
133	40,
134	50,
135	60,
136	70,
137	85
138};
139
140static u_int8_t adv_sdtr_period_tbl_ultra[] =
141{
142	12,
143	19,
144	25,
145	32,
146	38,
147	44,
148	50,
149	57,
150	63,
151	69,
152	75,
153	82,
154	88,
155	94,
156	100,
157	107
158};
159
160struct ext_msg {
161	u_int8_t msg_type;
162	u_int8_t msg_len;
163	u_int8_t msg_req;
164	union {
165		struct {
166			u_int8_t sdtr_xfer_period;
167			u_int8_t sdtr_req_ack_offset;
168		} sdtr;
169		struct {
170       			u_int8_t wdtr_width;
171		} wdtr;
172		struct {
173			u_int8_t mdp[4];
174		} mdp;
175	} u_ext_msg;
176	u_int8_t res;
177};
178
179#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
180#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
181#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
182#define	mdp_b3		u_ext_msg.mdp_b3
183#define	mdp_b2		u_ext_msg.mdp_b2
184#define	mdp_b1		u_ext_msg.mdp_b1
185#define	mdp_b0		u_ext_msg.mdp_b0
186
187/*
188 * Some of the early PCI adapters have problems with
189 * async transfers.  Instead use an offset of 1.
190 */
191#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193/* LRAM routines */
194static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195					u_int16_t *buffer, int count);
196static void	 adv_write_lram_16_multi(struct adv_softc *adv,
197					 u_int16_t s_addr, u_int16_t *buffer,
198					 int count);
199static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200				  u_int16_t set_value, int count);
201static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202				  int count);
203
204static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
205					      u_int16_t addr, u_int16_t value);
206static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210				   u_int32_t value);
211static void	 adv_write_lram_32_multi(struct adv_softc *adv,
212					 u_int16_t s_addr, u_int32_t *buffer,
213					 int count);
214
215/* EEPROM routines */
216static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218				     u_int16_t value);
219static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220					  u_int8_t cmd_reg);
221static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
222					    struct adv_eeprom_config *eeconfig);
223
224/* Initialization */
225static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226				    u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228static void	 adv_reinit_lram(struct adv_softc *adv);
229static void	 adv_init_lram(struct adv_softc *adv);
230static int	 adv_init_microcode_var(struct adv_softc *adv);
231static void	 adv_init_qlink_var(struct adv_softc *adv);
232
233/* Interrupts */
234static void	 adv_disable_interrupt(struct adv_softc *adv);
235static void	 adv_enable_interrupt(struct adv_softc *adv);
236static void	 adv_toggle_irq_act(struct adv_softc *adv);
237
238/* Chip Control */
239static int	 adv_host_req_chip_halt(struct adv_softc *adv);
240static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241#if 0
242static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243#endif
244
245/* Queue handling and execution */
246static __inline int
247		 adv_sgcount_to_qcount(int sgcount);
248
249static __inline int
250adv_sgcount_to_qcount(int sgcount)
251{
252	int	n_sg_list_qs;
253
254	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256		n_sg_list_qs++;
257	return (n_sg_list_qs + 1);
258}
259
260#if BYTE_ORDER == BIG_ENDIAN
261static void	 adv_adj_endian_qdone_info(struct adv_q_done_info *);
262static void	 adv_adj_scsiq_endian(struct adv_scsi_q *);
263#endif
264static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
265				u_int16_t *inbuf, int words);
266static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
267static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
268				       u_int8_t free_q_head, u_int8_t n_free_q);
269static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
270				      u_int8_t free_q_head);
271static int	 adv_send_scsi_queue(struct adv_softc *adv,
272				     struct adv_scsi_q *scsiq,
273				     u_int8_t n_q_required);
274static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
275					     struct adv_scsi_q *scsiq,
276					     u_int q_no);
277static void	 adv_put_ready_queue(struct adv_softc *adv,
278				     struct adv_scsi_q *scsiq, u_int q_no);
279static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
280			       u_int16_t *buffer, int words);
281
282/* Messages */
283static void	 adv_handle_extmsg_in(struct adv_softc *adv,
284				      u_int16_t halt_q_addr, u_int8_t q_cntl,
285				      target_bit_vector target_id,
286				      int tid);
287static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
288				 u_int8_t sdtr_offset);
289static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
290					u_int8_t sdtr_data);
291
292
293/* Exported functions first */
294
295void
296advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
297{
298	struct adv_softc *adv;
299
300	adv = (struct adv_softc *)callback_arg;
301	switch (code) {
302	case AC_FOUND_DEVICE:
303	{
304		struct ccb_getdev *cgd;
305		target_bit_vector target_mask;
306		int num_entries;
307        	caddr_t match;
308		struct adv_quirk_entry *entry;
309		struct adv_target_transinfo* tinfo;
310
311		cgd = (struct ccb_getdev *)arg;
312
313		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
314
315		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
316		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
317				       (caddr_t)adv_quirk_table,
318				       num_entries, sizeof(*adv_quirk_table),
319				       scsi_inquiry_match);
320
321		if (match == NULL)
322			panic("advasync: device didn't match wildcard entry!!");
323
324		entry = (struct adv_quirk_entry *)match;
325
326		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
327			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
328				adv->fix_asyn_xfer_always |= target_mask;
329			else
330				adv->fix_asyn_xfer_always &= ~target_mask;
331			/*
332			 * We start out life with all bits set and clear them
333			 * after we've determined that the fix isn't necessary.
334			 * It may well be that we've already cleared a target
335			 * before the full inquiry session completes, so don't
336			 * gratuitously set a target bit even if it has this
337			 * quirk.  But, if the quirk exonerates a device, clear
338			 * the bit now.
339			 */
340			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
341				adv->fix_asyn_xfer &= ~target_mask;
342		}
343		/*
344		 * Reset our sync settings now that we've determined
345		 * what quirks are in effect for the device.
346		 */
347		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
348		adv_set_syncrate(adv, cgd->ccb_h.path,
349				 cgd->ccb_h.target_id,
350				 tinfo->current.period,
351				 tinfo->current.offset,
352				 ADV_TRANS_CUR);
353		break;
354	}
355	case AC_LOST_DEVICE:
356	{
357		u_int target_mask;
358
359		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
360			target_mask = 0x01 << xpt_path_target_id(path);
361			adv->fix_asyn_xfer |= target_mask;
362		}
363
364		/*
365		 * Revert to async transfers
366		 * for the next device.
367		 */
368		adv_set_syncrate(adv, /*path*/NULL,
369				 xpt_path_target_id(path),
370				 /*period*/0,
371				 /*offset*/0,
372				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
373	}
374	default:
375		break;
376	}
377}
378
379void
380adv_set_bank(struct adv_softc *adv, u_int8_t bank)
381{
382	u_int8_t control;
383
384	/*
385	 * Start out with the bank reset to 0
386	 */
387	control = ADV_INB(adv, ADV_CHIP_CTRL)
388		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
389			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
390			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
391	if (bank == 1) {
392		control |= ADV_CC_BANK_ONE;
393	} else if (bank == 2) {
394		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
395	}
396	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
397}
398
399u_int8_t
400adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
401{
402	u_int8_t   byte_data;
403	u_int16_t  word_data;
404
405	/*
406	 * LRAM is accessed on 16bit boundaries.
407	 */
408	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
409	word_data = ADV_INW(adv, ADV_LRAM_DATA);
410	if (addr & 1) {
411#if BYTE_ORDER == BIG_ENDIAN
412		byte_data = (u_int8_t)(word_data & 0xFF);
413#else
414		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415#endif
416	} else {
417#if BYTE_ORDER == BIG_ENDIAN
418		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
419#else
420		byte_data = (u_int8_t)(word_data & 0xFF);
421#endif
422	}
423	return (byte_data);
424}
425
426void
427adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
428{
429	u_int16_t word_data;
430
431	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
432	if (addr & 1) {
433		word_data &= 0x00FF;
434		word_data |= (((u_int8_t)value << 8) & 0xFF00);
435	} else {
436		word_data &= 0xFF00;
437		word_data |= ((u_int8_t)value & 0x00FF);
438	}
439	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
440}
441
442
443u_int16_t
444adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
445{
446	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
447	return (ADV_INW(adv, ADV_LRAM_DATA));
448}
449
450void
451adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
452{
453	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
454	ADV_OUTW(adv, ADV_LRAM_DATA, value);
455}
456
457/*
458 * Determine if there is a board at "iobase" by looking
459 * for the AdvanSys signatures.  Return 1 if a board is
460 * found, 0 otherwise.
461 */
462int
463adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
464{
465	u_int16_t signature;
466
467	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
468		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
469		if ((signature == ADV_1000_ID0W)
470		 || (signature == ADV_1000_ID0W_FIX))
471			return (1);
472	}
473	return (0);
474}
475
476void
477adv_lib_init(struct adv_softc *adv)
478{
479	if ((adv->type & ADV_ULTRA) != 0) {
480		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
481		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
482	} else {
483		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
484		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
485	}
486}
487
488u_int16_t
489adv_get_eeprom_config(struct adv_softc *adv, struct
490		      adv_eeprom_config  *eeprom_config)
491{
492	u_int16_t	sum;
493	u_int16_t	*wbuf;
494	u_int8_t	cfg_beg;
495	u_int8_t	cfg_end;
496	u_int8_t	s_addr;
497
498	wbuf = (u_int16_t *)eeprom_config;
499	sum = 0;
500
501	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
502		*wbuf = adv_read_eeprom_16(adv, s_addr);
503		sum += *wbuf;
504	}
505
506	if (adv->type & ADV_VL) {
507		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
508		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
509	} else {
510		cfg_beg = ADV_EEPROM_CFG_BEG;
511		cfg_end = ADV_EEPROM_MAX_ADDR;
512	}
513
514	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
515		*wbuf = adv_read_eeprom_16(adv, s_addr);
516		sum += *wbuf;
517#ifdef ADV_DEBUG_EEPROM
518		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
519#endif
520	}
521	*wbuf = adv_read_eeprom_16(adv, s_addr);
522	return (sum);
523}
524
525int
526adv_set_eeprom_config(struct adv_softc *adv,
527		      struct adv_eeprom_config *eeprom_config)
528{
529	int	retry;
530
531	retry = 0;
532	while (1) {
533		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
534			break;
535		}
536		if (++retry > ADV_EEPROM_MAX_RETRY) {
537			break;
538		}
539	}
540	return (retry > ADV_EEPROM_MAX_RETRY);
541}
542
543int
544adv_reset_chip(struct adv_softc *adv, int reset_bus)
545{
546	adv_stop_chip(adv);
547	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
548				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
549	DELAY(60);
550
551	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
552	adv_set_chip_ih(adv, ADV_INS_HALT);
553
554	if (reset_bus)
555		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
556
557	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
558	if (reset_bus)
559		DELAY(200 * 1000);
560
561	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
562	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
563	return (adv_is_chip_halted(adv));
564}
565
566int
567adv_test_external_lram(struct adv_softc* adv)
568{
569	u_int16_t	q_addr;
570	u_int16_t	saved_value;
571	int		success;
572
573	success = 0;
574
575	q_addr = ADV_QNO_TO_QADDR(241);
576	saved_value = adv_read_lram_16(adv, q_addr);
577	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
578		success = 1;
579		adv_write_lram_16(adv, q_addr, saved_value);
580	}
581	return (success);
582}
583
584
585int
586adv_init_lram_and_mcode(struct adv_softc *adv)
587{
588	u_int32_t	retval;
589
590	adv_disable_interrupt(adv);
591
592	adv_init_lram(adv);
593
594	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
595				    adv_mcode_size);
596	if (retval != adv_mcode_chksum) {
597		printf("adv%d: Microcode download failed checksum!\n",
598		       adv->unit);
599		return (1);
600	}
601
602	if (adv_init_microcode_var(adv) != 0)
603		return (1);
604
605	adv_enable_interrupt(adv);
606	return (0);
607}
608
609u_int8_t
610adv_get_chip_irq(struct adv_softc *adv)
611{
612	u_int16_t	cfg_lsw;
613	u_int8_t	chip_irq;
614
615	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
616
617	if ((adv->type & ADV_VL) != 0) {
618		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
619		if ((chip_irq == 0) ||
620		    (chip_irq == 4) ||
621		    (chip_irq == 7)) {
622			return (0);
623		}
624		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
625	}
626	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
627	if (chip_irq == 3)
628		chip_irq += 2;
629	return (chip_irq + ADV_MIN_IRQ_NO);
630}
631
632u_int8_t
633adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
634{
635	u_int16_t	cfg_lsw;
636
637	if ((adv->type & ADV_VL) != 0) {
638		if (irq_no != 0) {
639			if ((irq_no < ADV_MIN_IRQ_NO)
640			 || (irq_no > ADV_MAX_IRQ_NO)) {
641				irq_no = 0;
642			} else {
643				irq_no -= ADV_MIN_IRQ_NO - 1;
644			}
645		}
646		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
647		cfg_lsw |= 0x0010;
648		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
649		adv_toggle_irq_act(adv);
650
651		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
652		cfg_lsw |= (irq_no & 0x07) << 2;
653		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
654		adv_toggle_irq_act(adv);
655	} else if ((adv->type & ADV_ISA) != 0) {
656		if (irq_no == 15)
657			irq_no -= 2;
658		irq_no -= ADV_MIN_IRQ_NO;
659		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
660		cfg_lsw |= (irq_no & 0x03) << 2;
661		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
662	}
663	return (adv_get_chip_irq(adv));
664}
665
666void
667adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
668{
669	u_int16_t cfg_lsw;
670
671	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
672	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
673		return;
674    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
675	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
676	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
677}
678
679int
680adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
681		       u_int32_t datalen)
682{
683	struct		adv_target_transinfo* tinfo;
684	u_int32_t	*p_data_addr;
685	u_int32_t	*p_data_bcount;
686	int		disable_syn_offset_one_fix;
687	int		retval;
688	u_int		n_q_required;
689	u_int32_t	addr;
690	u_int8_t	sg_entry_cnt;
691	u_int8_t	target_ix;
692	u_int8_t	sg_entry_cnt_minus_one;
693	u_int8_t	tid_no;
694
695	scsiq->q1.q_no = 0;
696	retval = 1;  /* Default to error case */
697	target_ix = scsiq->q2.target_ix;
698	tid_no = ADV_TIX_TO_TID(target_ix);
699	tinfo = &adv->tinfo[tid_no];
700
701	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
702		/* Renegotiate if appropriate. */
703		adv_set_syncrate(adv, /*struct cam_path */NULL,
704				 tid_no, /*period*/0, /*offset*/0,
705				 ADV_TRANS_CUR);
706		if (tinfo->current.period != tinfo->goal.period) {
707			adv_msgout_sdtr(adv, tinfo->goal.period,
708					tinfo->goal.offset);
709			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
710		}
711	}
712
713	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
714		sg_entry_cnt = scsiq->sg_head->entry_cnt;
715		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
716
717#ifdef DIAGNOSTIC
718		if (sg_entry_cnt <= 1)
719			panic("adv_execute_scsi_queue: Queue "
720			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
721
722		if (sg_entry_cnt > ADV_MAX_SG_LIST)
723			panic("adv_execute_scsi_queue: "
724			      "Queue with too many segs.");
725
726		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
727			int i;
728
729			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
730				addr = scsiq->sg_head->sg_list[i].addr +
731				       scsiq->sg_head->sg_list[i].bytes;
732
733				if ((addr & 0x0003) != 0)
734					panic("adv_execute_scsi_queue: SG "
735					      "with odd address or byte count");
736			}
737		}
738#endif
739		p_data_addr =
740		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
741		p_data_bcount =
742		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
743
744		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
745		scsiq->sg_head->queue_cnt = n_q_required - 1;
746	} else {
747		p_data_addr = &scsiq->q1.data_addr;
748		p_data_bcount = &scsiq->q1.data_cnt;
749		n_q_required = 1;
750	}
751
752	disable_syn_offset_one_fix = FALSE;
753
754	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
755	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
756
757		if (datalen != 0) {
758			if (datalen < 512) {
759				disable_syn_offset_one_fix = TRUE;
760			} else {
761				if (scsiq->cdbptr[0] == INQUIRY
762				 || scsiq->cdbptr[0] == REQUEST_SENSE
763				 || scsiq->cdbptr[0] == READ_CAPACITY
764				 || scsiq->cdbptr[0] == MODE_SELECT_6
765				 || scsiq->cdbptr[0] == MODE_SENSE_6
766				 || scsiq->cdbptr[0] == MODE_SENSE_10
767				 || scsiq->cdbptr[0] == MODE_SELECT_10
768				 || scsiq->cdbptr[0] == READ_TOC) {
769					disable_syn_offset_one_fix = TRUE;
770				}
771			}
772		}
773	}
774
775	if (disable_syn_offset_one_fix) {
776		scsiq->q2.tag_code &=
777		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
778		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
779				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
780	}
781
782	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
783	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
784		u_int8_t extra_bytes;
785
786		addr = *p_data_addr + *p_data_bcount;
787		extra_bytes = addr & 0x0003;
788		if (extra_bytes != 0
789		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
790		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
791			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
792			scsiq->q1.extra_bytes = extra_bytes;
793			*p_data_bcount -= extra_bytes;
794		}
795	}
796
797	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
798	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
799		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
800
801	return (retval);
802}
803
804
805u_int8_t
806adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
807		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
808{
809	u_int16_t val;
810	u_int8_t  sg_queue_cnt;
811
812	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
813		       (u_int16_t *)scsiq,
814		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
815
816#if BYTE_ORDER == BIG_ENDIAN
817	adv_adj_endian_qdone_info(scsiq);
818#endif
819
820	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
821	scsiq->q_status = val & 0xFF;
822	scsiq->q_no = (val >> 8) & 0XFF;
823
824	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
825	scsiq->cntl = val & 0xFF;
826	sg_queue_cnt = (val >> 8) & 0xFF;
827
828	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
829	scsiq->sense_len = val & 0xFF;
830	scsiq->extra_bytes = (val >> 8) & 0xFF;
831
832	/*
833	 * Due to a bug in accessing LRAM on the 940UA, the residual
834	 * is split into separate high and low 16bit quantities.
835	 */
836	scsiq->remain_bytes =
837	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
838	scsiq->remain_bytes |=
839	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
840
841	/*
842	 * XXX Is this just a safeguard or will the counter really
843	 * have bogus upper bits?
844	 */
845	scsiq->remain_bytes &= max_dma_count;
846
847	return (sg_queue_cnt);
848}
849
850int
851adv_start_chip(struct adv_softc *adv)
852{
853	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
854	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
855		return (0);
856	return (1);
857}
858
859int
860adv_stop_execution(struct adv_softc *adv)
861{
862	int count;
863
864	count = 0;
865	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
866		adv_write_lram_8(adv, ADV_STOP_CODE_B,
867				 ADV_STOP_REQ_RISC_STOP);
868		do {
869			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
870				ADV_STOP_ACK_RISC_STOP) {
871				return (1);
872			}
873			DELAY(1000);
874		} while (count++ < 20);
875	}
876	return (0);
877}
878
879int
880adv_is_chip_halted(struct adv_softc *adv)
881{
882	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
883		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
884			return (1);
885		}
886	}
887	return (0);
888}
889
890/*
891 * XXX The numeric constants and the loops in this routine
892 * need to be documented.
893 */
894void
895adv_ack_interrupt(struct adv_softc *adv)
896{
897	u_int8_t	host_flag;
898	u_int8_t	risc_flag;
899	int		loop;
900
901	loop = 0;
902	do {
903		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
904		if (loop++ > 0x7FFF) {
905			break;
906		}
907	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
908
909	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
910	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
911			 host_flag | ADV_HOST_FLAG_ACK_INT);
912
913	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
914	loop = 0;
915	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
916		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
917		if (loop++ > 3) {
918			break;
919		}
920	}
921
922	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
923}
924
925/*
926 * Handle all conditions that may halt the chip waiting
927 * for us to intervene.
928 */
929void
930adv_isr_chip_halted(struct adv_softc *adv)
931{
932	u_int16_t	  int_halt_code;
933	u_int16_t	  halt_q_addr;
934	target_bit_vector target_mask;
935	target_bit_vector scsi_busy;
936	u_int8_t	  halt_qp;
937	u_int8_t	  target_ix;
938	u_int8_t	  q_cntl;
939	u_int8_t	  tid_no;
940
941	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
942	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
943	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
944	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
945	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
946	tid_no = ADV_TIX_TO_TID(target_ix);
947	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
948	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
949		/*
950		 * Temporarily disable the async fix by removing
951		 * this target from the list of affected targets,
952		 * setting our async rate, and then putting us
953		 * back into the mask.
954		 */
955		adv->fix_asyn_xfer &= ~target_mask;
956		adv_set_syncrate(adv, /*struct cam_path */NULL,
957				 tid_no, /*period*/0, /*offset*/0,
958				 ADV_TRANS_ACTIVE);
959		adv->fix_asyn_xfer |= target_mask;
960	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
961		adv_set_syncrate(adv, /*struct cam_path */NULL,
962				 tid_no, /*period*/0, /*offset*/0,
963				 ADV_TRANS_ACTIVE);
964	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
965		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
966				     target_mask, tid_no);
967	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
968		struct	  adv_target_transinfo* tinfo;
969		union	  ccb *ccb;
970		u_int32_t cinfo_index;
971		u_int8_t  tag_code;
972		u_int8_t  q_status;
973
974		tinfo = &adv->tinfo[tid_no];
975		q_cntl |= QC_REQ_SENSE;
976
977		/* Renegotiate if appropriate. */
978		adv_set_syncrate(adv, /*struct cam_path */NULL,
979				 tid_no, /*period*/0, /*offset*/0,
980				 ADV_TRANS_CUR);
981		if (tinfo->current.period != tinfo->goal.period) {
982			adv_msgout_sdtr(adv, tinfo->goal.period,
983					tinfo->goal.offset);
984			q_cntl |= QC_MSG_OUT;
985		}
986		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
987
988		/* Don't tag request sense commands */
989		tag_code = adv_read_lram_8(adv,
990					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
991		tag_code &=
992		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
993
994		if ((adv->fix_asyn_xfer & target_mask) != 0
995		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
996			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
997				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
998		}
999		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1000				 tag_code);
1001		q_status = adv_read_lram_8(adv,
1002					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1003		q_status |= (QS_READY | QS_BUSY);
1004		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1005				 q_status);
1006		/*
1007		 * Freeze the devq until we can handle the sense condition.
1008		 */
1009		cinfo_index =
1010		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1011		ccb = adv->ccb_infos[cinfo_index].ccb;
1012		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1013		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1014		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1015			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1016			      /*queued_only*/TRUE);
1017		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1018		scsi_busy &= ~target_mask;
1019		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1020		/*
1021		 * Ensure we have enough time to actually
1022		 * retrieve the sense.
1023		 */
1024		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1025		ccb->ccb_h.timeout_ch =
1026		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1027	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1028		struct	ext_msg out_msg;
1029
1030		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1031				       (u_int16_t *) &out_msg,
1032				       sizeof(out_msg)/2);
1033
1034		if ((out_msg.msg_type == MSG_EXTENDED)
1035		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1036		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1037
1038			/* Revert to Async */
1039			adv_set_syncrate(adv, /*struct cam_path */NULL,
1040					 tid_no, /*period*/0, /*offset*/0,
1041					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1042		}
1043		q_cntl &= ~QC_MSG_OUT;
1044		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1045	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1046		u_int8_t scsi_status;
1047		union ccb *ccb;
1048		u_int32_t cinfo_index;
1049
1050		scsi_status = adv_read_lram_8(adv, halt_q_addr
1051					      + ADV_SCSIQ_SCSI_STATUS);
1052		cinfo_index =
1053		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1054		ccb = adv->ccb_infos[cinfo_index].ccb;
1055		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1056		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1057		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1058		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1059			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1060			      /*queued_only*/TRUE);
1061		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1062		scsi_busy &= ~target_mask;
1063		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1064	} else {
1065		printf("Unhandled Halt Code %x\n", int_halt_code);
1066	}
1067	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1068}
1069
1070void
1071adv_sdtr_to_period_offset(struct adv_softc *adv,
1072			  u_int8_t sync_data, u_int8_t *period,
1073			  u_int8_t *offset, int tid)
1074{
1075	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1076	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1077		*period = *offset = 0;
1078	} else {
1079		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1080		*offset = sync_data & 0xF;
1081	}
1082}
1083
1084void
1085adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1086		 u_int tid, u_int period, u_int offset, u_int type)
1087{
1088	struct adv_target_transinfo* tinfo;
1089	u_int old_period;
1090	u_int old_offset;
1091	u_int8_t sdtr_data;
1092
1093	tinfo = &adv->tinfo[tid];
1094
1095	/* Filter our input */
1096	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1097					      &offset, tid);
1098
1099	old_period = tinfo->current.period;
1100	old_offset = tinfo->current.offset;
1101
1102	if ((type & ADV_TRANS_CUR) != 0
1103	 && ((old_period != period || old_offset != offset)
1104	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1105		int s;
1106		int halted;
1107
1108		s = splcam();
1109		halted = adv_is_chip_halted(adv);
1110		if (halted == 0)
1111			/* Must halt the chip first */
1112			adv_host_req_chip_halt(adv);
1113
1114		/* Update current hardware settings */
1115		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1116
1117		/*
1118		 * If a target can run in sync mode, we don't need
1119		 * to check it for sync problems.
1120		 */
1121		if (offset != 0)
1122			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1123
1124		if (halted == 0)
1125			/* Start the chip again */
1126			adv_start_chip(adv);
1127
1128		splx(s);
1129		tinfo->current.period = period;
1130		tinfo->current.offset = offset;
1131
1132		if (path != NULL) {
1133			/*
1134			 * Tell the SCSI layer about the
1135			 * new transfer parameters.
1136			 */
1137			struct	ccb_trans_settings neg;
1138			memset(&neg, 0, sizeof (neg));
1139#ifdef	CAM_NEW_TRAN_CODE
1140			struct ccb_trans_settings_spi *spi =
1141			    &neg.xport_specific.spi;
1142
1143			neg.protocol = PROTO_SCSI;
1144			neg.protocol_version = SCSI_REV_2;
1145			neg.transport = XPORT_SPI;
1146			neg.transport_version = 2;
1147
1148			spi->sync_offset = offset;
1149			spi->sync_period = period;
1150			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1151			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1152#else
1153			neg.sync_period = period;
1154			neg.sync_offset = offset;
1155			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1156				  | CCB_TRANS_SYNC_OFFSET_VALID;
1157#endif
1158			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1159			xpt_async(AC_TRANSFER_NEG, path, &neg);
1160		}
1161	}
1162
1163	if ((type & ADV_TRANS_GOAL) != 0) {
1164		tinfo->goal.period = period;
1165		tinfo->goal.offset = offset;
1166	}
1167
1168	if ((type & ADV_TRANS_USER) != 0) {
1169		tinfo->user.period = period;
1170		tinfo->user.offset = offset;
1171	}
1172}
1173
1174u_int8_t
1175adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1176			  u_int *offset, int tid)
1177{
1178	u_int i;
1179	u_int dummy_offset;
1180	u_int dummy_period;
1181
1182	if (offset == NULL) {
1183		dummy_offset = 0;
1184		offset = &dummy_offset;
1185	}
1186
1187	if (period == NULL) {
1188		dummy_period = 0;
1189		period = &dummy_period;
1190	}
1191
1192	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1193	if (*period != 0 && *offset != 0) {
1194		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1195			if (*period <= adv->sdtr_period_tbl[i]) {
1196				/*
1197				 * When responding to a target that requests
1198				 * sync, the requested  rate may fall between
1199				 * two rates that we can output, but still be
1200				 * a rate that we can receive.  Because of this,
1201				 * we want to respond to the target with
1202				 * the same rate that it sent to us even
1203				 * if the period we use to send data to it
1204				 * is lower.  Only lower the response period
1205				 * if we must.
1206				 */
1207				if (i == 0 /* Our maximum rate */)
1208					*period = adv->sdtr_period_tbl[0];
1209				return ((i << 4) | *offset);
1210			}
1211		}
1212	}
1213
1214	/* Must go async */
1215	*period = 0;
1216	*offset = 0;
1217	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1218		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1219	return (0);
1220}
1221
1222/* Internal Routines */
1223
1224static void
1225adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1226		       u_int16_t *buffer, int count)
1227{
1228	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1229	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1230}
1231
1232static void
1233adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1234			u_int16_t *buffer, int count)
1235{
1236	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1237	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1238}
1239
1240static void
1241adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1242		 u_int16_t set_value, int count)
1243{
1244	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1245	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1246			      set_value, count);
1247}
1248
1249static u_int32_t
1250adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1251{
1252	u_int32_t	sum;
1253	int		i;
1254
1255	sum = 0;
1256	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1257	for (i = 0; i < count; i++)
1258		sum += ADV_INW(adv, ADV_LRAM_DATA);
1259	return (sum);
1260}
1261
1262static int
1263adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1264			     u_int16_t value)
1265{
1266	int	retval;
1267
1268	retval = 0;
1269	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1270	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1271	DELAY(10000);
1272	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1273	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1274		retval = 1;
1275	return (retval);
1276}
1277
1278static u_int32_t
1279adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1280{
1281	u_int16_t           val_low, val_high;
1282
1283	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1284
1285#if BYTE_ORDER == BIG_ENDIAN
1286	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1287	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1288#else
1289	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1290	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1291#endif
1292
1293	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1294}
1295
1296static void
1297adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1298{
1299	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1300
1301#if BYTE_ORDER == BIG_ENDIAN
1302	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1303	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1304#else
1305	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1306	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1307#endif
1308}
1309
1310static void
1311adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1312			u_int32_t *buffer, int count)
1313{
1314	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1315	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1316}
1317
1318static u_int16_t
1319adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1320{
1321	u_int16_t read_wval;
1322	u_int8_t  cmd_reg;
1323
1324	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1325	DELAY(1000);
1326	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1327	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1328	DELAY(1000);
1329	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1330	DELAY(1000);
1331	return (read_wval);
1332}
1333
1334static u_int16_t
1335adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1336{
1337	u_int16_t	read_value;
1338
1339	read_value = adv_read_eeprom_16(adv, addr);
1340	if (read_value != value) {
1341		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1342		DELAY(1000);
1343
1344		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1345		DELAY(1000);
1346
1347		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1348		DELAY(20 * 1000);
1349
1350		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1351		DELAY(1000);
1352		read_value = adv_read_eeprom_16(adv, addr);
1353	}
1354	return (read_value);
1355}
1356
1357static int
1358adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1359{
1360	u_int8_t read_back;
1361	int	 retry;
1362
1363	retry = 0;
1364	while (1) {
1365		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1366		DELAY(1000);
1367		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1368		if (read_back == cmd_reg) {
1369			return (1);
1370		}
1371		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1372			return (0);
1373		}
1374	}
1375}
1376
1377static int
1378adv_set_eeprom_config_once(struct adv_softc *adv,
1379			   struct adv_eeprom_config *eeprom_config)
1380{
1381	int		n_error;
1382	u_int16_t	*wbuf;
1383	u_int16_t	sum;
1384	u_int8_t	s_addr;
1385	u_int8_t	cfg_beg;
1386	u_int8_t	cfg_end;
1387
1388	wbuf = (u_int16_t *)eeprom_config;
1389	n_error = 0;
1390	sum = 0;
1391	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1392		sum += *wbuf;
1393		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1394			n_error++;
1395		}
1396	}
1397	if (adv->type & ADV_VL) {
1398		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1399		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1400	} else {
1401		cfg_beg = ADV_EEPROM_CFG_BEG;
1402		cfg_end = ADV_EEPROM_MAX_ADDR;
1403	}
1404
1405	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1406		sum += *wbuf;
1407		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1408			n_error++;
1409		}
1410	}
1411	*wbuf = sum;
1412	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1413		n_error++;
1414	}
1415	wbuf = (u_int16_t *)eeprom_config;
1416	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1417		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1418			n_error++;
1419		}
1420	}
1421	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1422		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1423			n_error++;
1424		}
1425	}
1426	return (n_error);
1427}
1428
1429static u_int32_t
1430adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1431		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1432{
1433	u_int32_t chksum;
1434	u_int16_t mcode_lram_size;
1435	u_int16_t mcode_chksum;
1436
1437	mcode_lram_size = mcode_size >> 1;
1438	/* XXX Why zero the memory just before you write the whole thing?? */
1439	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1440	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1441
1442	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1443	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1444						   ((mcode_size - s_addr
1445						     - ADV_CODE_SEC_BEG) >> 1));
1446	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1447	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1448	return (chksum);
1449}
1450
1451static void
1452adv_reinit_lram(struct adv_softc *adv) {
1453	adv_init_lram(adv);
1454	adv_init_qlink_var(adv);
1455}
1456
1457static void
1458adv_init_lram(struct adv_softc *adv)
1459{
1460	u_int8_t  i;
1461	u_int16_t s_addr;
1462
1463	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1464			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1465
1466	i = ADV_MIN_ACTIVE_QNO;
1467	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1468
1469	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1470	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1471	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1472	i++;
1473	s_addr += ADV_QBLK_SIZE;
1474	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1475		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1476		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1477		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1478	}
1479
1480	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1481	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1482	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1483	i++;
1484	s_addr += ADV_QBLK_SIZE;
1485
1486	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1487		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1488		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1489		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1490	}
1491}
1492
1493static int
1494adv_init_microcode_var(struct adv_softc *adv)
1495{
1496	int	 i;
1497
1498	for (i = 0; i <= ADV_MAX_TID; i++) {
1499
1500		/* Start out async all around */
1501		adv_set_syncrate(adv, /*path*/NULL,
1502				 i, 0, 0,
1503				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1504	}
1505
1506	adv_init_qlink_var(adv);
1507
1508	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1509	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1510
1511	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1512
1513	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1514
1515	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1516	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1517		printf("adv%d: Unable to set program counter. Aborting.\n",
1518		       adv->unit);
1519		return (1);
1520	}
1521	return (0);
1522}
1523
1524static void
1525adv_init_qlink_var(struct adv_softc *adv)
1526{
1527	int	  i;
1528	u_int16_t lram_addr;
1529
1530	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1531	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1532
1533	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1534	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1535
1536	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1537			 (u_int8_t)((int) adv->max_openings + 1));
1538	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1539			 (u_int8_t)((int) adv->max_openings + 2));
1540
1541	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1542
1543	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1544	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1545	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1546	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1547	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1548	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1549
1550	lram_addr = ADV_QADR_BEG;
1551	for (i = 0; i < 32; i++, lram_addr += 2)
1552		adv_write_lram_16(adv, lram_addr, 0);
1553}
1554
1555static void
1556adv_disable_interrupt(struct adv_softc *adv)
1557{
1558	u_int16_t cfg;
1559
1560	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1561	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1562}
1563
1564static void
1565adv_enable_interrupt(struct adv_softc *adv)
1566{
1567	u_int16_t cfg;
1568
1569	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1570	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1571}
1572
1573static void
1574adv_toggle_irq_act(struct adv_softc *adv)
1575{
1576	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1577	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1578}
1579
1580void
1581adv_start_execution(struct adv_softc *adv)
1582{
1583	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1584		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1585	}
1586}
1587
1588int
1589adv_stop_chip(struct adv_softc *adv)
1590{
1591	u_int8_t cc_val;
1592
1593	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1594		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1595	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1596	adv_set_chip_ih(adv, ADV_INS_HALT);
1597	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1598	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1599		return (0);
1600	}
1601	return (1);
1602}
1603
1604static int
1605adv_host_req_chip_halt(struct adv_softc *adv)
1606{
1607	int	 count;
1608	u_int8_t saved_stop_code;
1609
1610	if (adv_is_chip_halted(adv))
1611		return (1);
1612
1613	count = 0;
1614	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1615	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1616			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1617	while (adv_is_chip_halted(adv) == 0
1618	    && count++ < 2000)
1619		;
1620
1621	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1622	return (count < 2000);
1623}
1624
1625static void
1626adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1627{
1628	adv_set_bank(adv, 1);
1629	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1630	adv_set_bank(adv, 0);
1631}
1632
1633#if 0
1634static u_int8_t
1635adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1636{
1637	u_int8_t scsi_ctrl;
1638
1639	adv_set_bank(adv, 1);
1640	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1641	adv_set_bank(adv, 0);
1642	return (scsi_ctrl);
1643}
1644#endif
1645
1646/*
1647 * XXX Looks like more padding issues in this routine as well.
1648 *     There has to be a way to turn this into an insw.
1649 */
1650static void
1651adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1652	       u_int16_t *inbuf, int words)
1653{
1654	int	i;
1655
1656	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1657	for (i = 0; i < words; i++, inbuf++) {
1658		if (i == 5) {
1659			continue;
1660		}
1661		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1662	}
1663}
1664
1665static u_int
1666adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1667{
1668	u_int	  cur_used_qs;
1669	u_int	  cur_free_qs;
1670
1671	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1672
1673	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1674		cur_free_qs = adv->max_openings - cur_used_qs;
1675		return (cur_free_qs);
1676	}
1677	adv->openings_needed = n_qs;
1678	return (0);
1679}
1680
1681static u_int8_t
1682adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1683		      u_int8_t n_free_q)
1684{
1685	int i;
1686
1687	for (i = 0; i < n_free_q; i++) {
1688		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1689		if (free_q_head == ADV_QLINK_END)
1690			break;
1691	}
1692	return (free_q_head);
1693}
1694
1695static u_int8_t
1696adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1697{
1698	u_int16_t	q_addr;
1699	u_int8_t	next_qp;
1700	u_int8_t	q_status;
1701
1702	next_qp = ADV_QLINK_END;
1703	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1704	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1705
1706	if ((q_status & QS_READY) == 0)
1707		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1708
1709	return (next_qp);
1710}
1711
1712static int
1713adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1714		    u_int8_t n_q_required)
1715{
1716	u_int8_t	free_q_head;
1717	u_int8_t	next_qp;
1718	u_int8_t	tid_no;
1719	u_int8_t	target_ix;
1720	int		retval;
1721
1722	retval = 1;
1723	target_ix = scsiq->q2.target_ix;
1724	tid_no = ADV_TIX_TO_TID(target_ix);
1725	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1726	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1727	    != ADV_QLINK_END) {
1728		scsiq->q1.q_no = free_q_head;
1729
1730		/*
1731		 * Now that we know our Q number, point our sense
1732		 * buffer pointer to a bus dma mapped area where
1733		 * we can dma the data to.
1734		 */
1735		scsiq->q1.sense_addr = adv->sense_physbase
1736		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1737		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1738		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1739		adv->cur_active += n_q_required;
1740		retval = 0;
1741	}
1742	return (retval);
1743}
1744
1745
1746static void
1747adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1748			    u_int q_no)
1749{
1750	u_int8_t	sg_list_dwords;
1751	u_int8_t	sg_index, i;
1752	u_int8_t	sg_entry_cnt;
1753	u_int8_t	next_qp;
1754	u_int16_t	q_addr;
1755	struct		adv_sg_head *sg_head;
1756	struct		adv_sg_list_q scsi_sg_q;
1757
1758	sg_head = scsiq->sg_head;
1759
1760	if (sg_head) {
1761		sg_entry_cnt = sg_head->entry_cnt - 1;
1762#ifdef DIAGNOSTIC
1763		if (sg_entry_cnt == 0)
1764			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1765			      "a SG list but only one element");
1766		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1767			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1768			      "a SG list but QC_SG_HEAD not set");
1769#endif
1770		q_addr = ADV_QNO_TO_QADDR(q_no);
1771		sg_index = 1;
1772		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1773		scsi_sg_q.sg_head_qp = q_no;
1774		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1775		for (i = 0; i < sg_head->queue_cnt; i++) {
1776			u_int8_t segs_this_q;
1777
1778			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1779				segs_this_q = ADV_SG_LIST_PER_Q;
1780			else {
1781				/* This will be the last segment then */
1782				segs_this_q = sg_entry_cnt;
1783				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1784			}
1785			scsi_sg_q.seq_no = i + 1;
1786			sg_list_dwords = segs_this_q << 1;
1787			if (i == 0) {
1788				scsi_sg_q.sg_list_cnt = segs_this_q;
1789				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1790			} else {
1791				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1792				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1793			}
1794			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1795			scsi_sg_q.q_no = next_qp;
1796			q_addr = ADV_QNO_TO_QADDR(next_qp);
1797
1798			adv_write_lram_16_multi(adv,
1799						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1800						(u_int16_t *)&scsi_sg_q,
1801						sizeof(scsi_sg_q) >> 1);
1802			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1803						(u_int32_t *)&sg_head->sg_list[sg_index],
1804						sg_list_dwords);
1805			sg_entry_cnt -= segs_this_q;
1806			sg_index += ADV_SG_LIST_PER_Q;
1807		}
1808	}
1809	adv_put_ready_queue(adv, scsiq, q_no);
1810}
1811
1812static void
1813adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1814		    u_int q_no)
1815{
1816	struct		adv_target_transinfo* tinfo;
1817	u_int		q_addr;
1818	u_int		tid_no;
1819
1820	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1821	tinfo = &adv->tinfo[tid_no];
1822	if ((tinfo->current.period != tinfo->goal.period)
1823	 || (tinfo->current.offset != tinfo->goal.offset)) {
1824
1825		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1826		scsiq->q1.cntl |= QC_MSG_OUT;
1827	}
1828	q_addr = ADV_QNO_TO_QADDR(q_no);
1829
1830	scsiq->q1.status = QS_FREE;
1831
1832	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1833				(u_int16_t *)scsiq->cdbptr,
1834				scsiq->q2.cdb_len >> 1);
1835
1836#if BYTE_ORDER == BIG_ENDIAN
1837	adv_adj_scsiq_endian(scsiq);
1838#endif
1839
1840	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1841		      (u_int16_t *) &scsiq->q1.cntl,
1842		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1843
1844#ifdef CC_WRITE_IO_COUNT
1845	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1846			  adv->req_count);
1847#endif
1848
1849#ifdef CC_CLEAR_DMA_REMAIN
1850
1851	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1852	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1853#endif
1854
1855	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1856			  (scsiq->q1.q_no << 8) | QS_READY);
1857}
1858
1859static void
1860adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1861	      u_int16_t *buffer, int words)
1862{
1863	int	i;
1864
1865	/*
1866	 * XXX This routine makes *gross* assumptions
1867	 * about padding in the data structures.
1868	 * Either the data structures should have explicit
1869	 * padding members added, or they should have padding
1870	 * turned off via compiler attributes depending on
1871	 * which yields better overall performance.  My hunch
1872	 * would be that turning off padding would be the
1873	 * faster approach as an outsw is much faster than
1874	 * this crude loop and accessing un-aligned data
1875	 * members isn't *that* expensive.  The other choice
1876	 * would be to modify the ASC script so that the
1877	 * the adv_scsiq_1 structure can be re-arranged so
1878	 * padding isn't required.
1879	 */
1880	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1881	for (i = 0; i < words; i++, buffer++) {
1882		if (i == 2 || i == 10) {
1883			continue;
1884		}
1885		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1886	}
1887}
1888
1889#if BYTE_ORDER == BIG_ENDIAN
1890void
1891adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1892{
1893
1894	panic("adv(4) not supported on big-endian machines.\n");
1895}
1896
1897void
1898adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1899{
1900
1901	panic("adv(4) not supported on big-endian machines.\n");
1902}
1903#endif
1904
1905static void
1906adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1907		     u_int8_t q_cntl, target_bit_vector target_mask,
1908		     int tid_no)
1909{
1910	struct	ext_msg ext_msg;
1911
1912	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1913			       sizeof(ext_msg) >> 1);
1914	if ((ext_msg.msg_type == MSG_EXTENDED)
1915	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1916	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1917		union	  ccb *ccb;
1918		struct	  adv_target_transinfo* tinfo;
1919		u_int32_t cinfo_index;
1920		u_int	 period;
1921		u_int	 offset;
1922		int	 sdtr_accept;
1923		u_int8_t orig_offset;
1924
1925		cinfo_index =
1926		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1927		ccb = adv->ccb_infos[cinfo_index].ccb;
1928		tinfo = &adv->tinfo[tid_no];
1929		sdtr_accept = TRUE;
1930
1931		orig_offset = ext_msg.req_ack_offset;
1932		if (ext_msg.xfer_period < tinfo->goal.period) {
1933                	sdtr_accept = FALSE;
1934			ext_msg.xfer_period = tinfo->goal.period;
1935		}
1936
1937		/* Perform range checking */
1938		period = ext_msg.xfer_period;
1939		offset = ext_msg.req_ack_offset;
1940		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1941		ext_msg.xfer_period = period;
1942		ext_msg.req_ack_offset = offset;
1943
1944		/* Record our current sync settings */
1945		adv_set_syncrate(adv, ccb->ccb_h.path,
1946				 tid_no, ext_msg.xfer_period,
1947				 ext_msg.req_ack_offset,
1948				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1949
1950		/* Offset too high or large period forced async */
1951		if (orig_offset != ext_msg.req_ack_offset)
1952			sdtr_accept = FALSE;
1953
1954		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1955			/* Valid response to our requested negotiation */
1956			q_cntl &= ~QC_MSG_OUT;
1957		} else {
1958			/* Must Respond */
1959			q_cntl |= QC_MSG_OUT;
1960			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1961					ext_msg.req_ack_offset);
1962		}
1963
1964	} else if (ext_msg.msg_type == MSG_EXTENDED
1965		&& ext_msg.msg_req == MSG_EXT_WDTR
1966		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1967
1968		ext_msg.wdtr_width = 0;
1969		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1970					(u_int16_t *)&ext_msg,
1971					sizeof(ext_msg) >> 1);
1972		q_cntl |= QC_MSG_OUT;
1973        } else {
1974
1975		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1976		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1977					(u_int16_t *)&ext_msg,
1978					sizeof(ext_msg) >> 1);
1979		q_cntl |= QC_MSG_OUT;
1980        }
1981	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1982}
1983
1984static void
1985adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1986		u_int8_t sdtr_offset)
1987{
1988	struct	 ext_msg sdtr_buf;
1989
1990	sdtr_buf.msg_type = MSG_EXTENDED;
1991	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1992	sdtr_buf.msg_req = MSG_EXT_SDTR;
1993	sdtr_buf.xfer_period = sdtr_period;
1994	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1995	sdtr_buf.req_ack_offset = sdtr_offset;
1996	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1997				(u_int16_t *) &sdtr_buf,
1998				sizeof(sdtr_buf) / 2);
1999}
2000
2001int
2002adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
2003	      u_int32_t status, int queued_only)
2004{
2005	u_int16_t q_addr;
2006	u_int8_t  q_no;
2007	struct adv_q_done_info scsiq_buf;
2008	struct adv_q_done_info *scsiq;
2009	u_int8_t  target_ix;
2010	int	  count;
2011
2012	scsiq = &scsiq_buf;
2013	target_ix = ADV_TIDLUN_TO_IX(target, lun);
2014	count = 0;
2015	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2016		struct adv_ccb_info *ccb_info;
2017		q_addr = ADV_QNO_TO_QADDR(q_no);
2018
2019		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2020		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2021		if (((scsiq->q_status & QS_READY) != 0)
2022		 && ((scsiq->q_status & QS_ABORTED) == 0)
2023		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2024		 && (scsiq->d2.target_ix == target_ix)
2025		 && (queued_only == 0
2026		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2027		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2028			union ccb *aborted_ccb;
2029			struct adv_ccb_info *cinfo;
2030
2031			scsiq->q_status |= QS_ABORTED;
2032			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2033					 scsiq->q_status);
2034			aborted_ccb = ccb_info->ccb;
2035			/* Don't clobber earlier error codes */
2036			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2037			  == CAM_REQ_INPROG)
2038				aborted_ccb->ccb_h.status |= status;
2039			cinfo = (struct adv_ccb_info *)
2040			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2041			cinfo->state |= ACCB_ABORT_QUEUED;
2042			count++;
2043		}
2044	}
2045	return (count);
2046}
2047
2048int
2049adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2050{
2051	int count;
2052	int i;
2053	union ccb *ccb;
2054
2055	i = 200;
2056	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2057	    && i--)
2058		DELAY(1000);
2059	adv_reset_chip(adv, initiate_bus_reset);
2060	adv_reinit_lram(adv);
2061	for (i = 0; i <= ADV_MAX_TID; i++)
2062		adv_set_syncrate(adv, NULL, i, /*period*/0,
2063				 /*offset*/0, ADV_TRANS_CUR);
2064	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2065
2066	/* Tell the XPT layer that a bus reset occured */
2067	if (adv->path != NULL)
2068		xpt_async(AC_BUS_RESET, adv->path, NULL);
2069
2070	count = 0;
2071	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2072		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2073			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2074		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2075		count++;
2076	}
2077
2078	adv_start_chip(adv);
2079	return (count);
2080}
2081
2082static void
2083adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2084{
2085	int orig_id;
2086
2087    	adv_set_bank(adv, 1);
2088    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2089    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2090	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2091		adv_set_bank(adv, 0);
2092		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2093	}
2094    	adv_set_bank(adv, 1);
2095    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2096	adv_set_bank(adv, 0);
2097}
2098