advlib.c revision 55945
133965Sjdp/*
233965Sjdp * Low level routines for the Advanced Systems Inc. SCSI controllers chips
333965Sjdp *
433965Sjdp * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
533965Sjdp * All rights reserved.
633965Sjdp *
733965Sjdp * Redistribution and use in source and binary forms, with or without
833965Sjdp * modification, are permitted provided that the following conditions
933965Sjdp * are met:
1033965Sjdp * 1. Redistributions of source code must retain the above copyright
1133965Sjdp *    notice, this list of conditions, and the following disclaimer,
1233965Sjdp *    without modification, immediately at the beginning of the file.
1333965Sjdp * 2. Redistributions in binary form must reproduce the above copyright
1433965Sjdp *    notice, this list of conditions and the following disclaimer in the
1533965Sjdp *    documentation and/or other materials provided with the distribution.
1633965Sjdp * 3. The name of the author may not be used to endorse or promote products
1733965Sjdp *    derived from this software without specific prior written permission.
1833965Sjdp *
1933965Sjdp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
2033965Sjdp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2133965Sjdp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2233965Sjdp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
2333965Sjdp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2433965Sjdp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2533965Sjdp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2633965Sjdp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2733965Sjdp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2833965Sjdp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2933965Sjdp * SUCH DAMAGE.
3033965Sjdp *
3133965Sjdp * $FreeBSD: head/sys/dev/advansys/advlib.c 55945 2000-01-14 03:33:38Z gibbs $
3233965Sjdp */
3333965Sjdp/*
3433965Sjdp * Ported from:
3533965Sjdp * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
3633965Sjdp *
3733965Sjdp * Copyright (c) 1995-1996 Advanced System Products, Inc.
3833965Sjdp * All Rights Reserved.
3933965Sjdp *
4033965Sjdp * Redistribution and use in source and binary forms, with or without
4133965Sjdp * modification, are permitted provided that redistributions of source
4233965Sjdp * code retain the above copyright notice and this comment without
4333965Sjdp * modification.
4433965Sjdp */
4533965Sjdp
4633965Sjdp#include <sys/param.h>
4733965Sjdp#include <sys/kernel.h>
4833965Sjdp#include <sys/systm.h>
4933965Sjdp
5033965Sjdp#include <machine/bus_pio.h>
5133965Sjdp#include <machine/bus.h>
5233965Sjdp#include <machine/clock.h>
5333965Sjdp
5433965Sjdp#include <cam/cam.h>
5533965Sjdp#include <cam/cam_ccb.h>
5633965Sjdp#include <cam/cam_sim.h>
5733965Sjdp#include <cam/cam_xpt_sim.h>
5833965Sjdp
5933965Sjdp#include <cam/scsi/scsi_all.h>
6033965Sjdp#include <cam/scsi/scsi_message.h>
6133965Sjdp#include <cam/scsi/scsi_da.h>
6233965Sjdp#include <cam/scsi/scsi_cd.h>
6333965Sjdp
6433965Sjdp#include <vm/vm.h>
6533965Sjdp#include <vm/vm_param.h>
6633965Sjdp#include <vm/pmap.h>
6733965Sjdp
6833965Sjdp#include <dev/advansys/advansys.h>
6933965Sjdp#include <dev/advansys/advmcode.h>
7033965Sjdp
7133965Sjdpstruct adv_quirk_entry {
7233965Sjdp	struct scsi_inquiry_pattern inq_pat;
7333965Sjdp	u_int8_t quirks;
7433965Sjdp#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
7533965Sjdp#define ADV_QUIRK_FIX_ASYN_XFER		0x02
7633965Sjdp};
7733965Sjdp
7833965Sjdpstatic struct adv_quirk_entry adv_quirk_table[] =
7933965Sjdp{
8033965Sjdp	{
8133965Sjdp		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
8233965Sjdp		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
8333965Sjdp	},
84	{
85		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
86		0
87	},
88	{
89		{
90		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
91		  "TANDBERG", " TDC 36", "*"
92		},
93		0
94	},
95	{
96		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
97		0
98	},
99	{
100		{
101		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
102		  "*", "*", "*"
103		},
104		0
105	},
106	{
107		{
108		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
109		  "*", "*", "*"
110		},
111		0
112	},
113	{
114		/* Default quirk entry */
115		{
116		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
117		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
118                },
119                ADV_QUIRK_FIX_ASYN_XFER,
120	}
121};
122
123/*
124 * Allowable periods in ns
125 */
126static u_int8_t adv_sdtr_period_tbl[] =
127{
128	25,
129	30,
130	35,
131	40,
132	50,
133	60,
134	70,
135	85
136};
137
138static u_int8_t adv_sdtr_period_tbl_ultra[] =
139{
140	12,
141	19,
142	25,
143	32,
144	38,
145	44,
146	50,
147	57,
148	63,
149	69,
150	75,
151	82,
152	88,
153	94,
154	100,
155	107
156};
157
158struct ext_msg {
159	u_int8_t msg_type;
160	u_int8_t msg_len;
161	u_int8_t msg_req;
162	union {
163		struct {
164			u_int8_t sdtr_xfer_period;
165			u_int8_t sdtr_req_ack_offset;
166		} sdtr;
167		struct {
168       			u_int8_t wdtr_width;
169		} wdtr;
170		struct {
171			u_int8_t mdp[4];
172		} mdp;
173	} u_ext_msg;
174	u_int8_t res;
175};
176
177#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
178#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
179#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
180#define	mdp_b3		u_ext_msg.mdp_b3
181#define	mdp_b2		u_ext_msg.mdp_b2
182#define	mdp_b1		u_ext_msg.mdp_b1
183#define	mdp_b0		u_ext_msg.mdp_b0
184
185/*
186 * Some of the early PCI adapters have problems with
187 * async transfers.  Instead use an offset of 1.
188 */
189#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
190
191/* LRAM routines */
192static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
193					u_int16_t *buffer, int count);
194static void	 adv_write_lram_16_multi(struct adv_softc *adv,
195					 u_int16_t s_addr, u_int16_t *buffer,
196					 int count);
197static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
198				  u_int16_t set_value, int count);
199static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200				  int count);
201
202static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
203					      u_int16_t addr, u_int16_t value);
204static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
205
206
207static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
208				   u_int32_t value);
209static void	 adv_write_lram_32_multi(struct adv_softc *adv,
210					 u_int16_t s_addr, u_int32_t *buffer,
211					 int count);
212
213/* EEPROM routines */
214static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
215static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
216				     u_int16_t value);
217static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
218					  u_int8_t cmd_reg);
219static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
220					    struct adv_eeprom_config *eeconfig);
221
222/* Initialization */
223static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
224				    u_int16_t *mcode_buf, u_int16_t mcode_size);
225
226static void	 adv_reinit_lram(struct adv_softc *adv);
227static void	 adv_init_lram(struct adv_softc *adv);
228static int	 adv_init_microcode_var(struct adv_softc *adv);
229static void	 adv_init_qlink_var(struct adv_softc *adv);
230
231/* Interrupts */
232static void	 adv_disable_interrupt(struct adv_softc *adv);
233static void	 adv_enable_interrupt(struct adv_softc *adv);
234static void	 adv_toggle_irq_act(struct adv_softc *adv);
235
236/* Chip Control */
237static int	 adv_host_req_chip_halt(struct adv_softc *adv);
238static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
239#if UNUSED
240static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
241#endif
242
243/* Queue handling and execution */
244static __inline int
245		 adv_sgcount_to_qcount(int sgcount);
246
247static __inline int
248adv_sgcount_to_qcount(int sgcount)
249{
250	int	n_sg_list_qs;
251
252	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
253	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
254		n_sg_list_qs++;
255	return (n_sg_list_qs + 1);
256}
257
258static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
259				u_int16_t *inbuf, int words);
260static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
261static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
262				       u_int8_t free_q_head, u_int8_t n_free_q);
263static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
264				      u_int8_t free_q_head);
265static int	 adv_send_scsi_queue(struct adv_softc *adv,
266				     struct adv_scsi_q *scsiq,
267				     u_int8_t n_q_required);
268static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
269					     struct adv_scsi_q *scsiq,
270					     u_int q_no);
271static void	 adv_put_ready_queue(struct adv_softc *adv,
272				     struct adv_scsi_q *scsiq, u_int q_no);
273static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
274			       u_int16_t *buffer, int words);
275
276/* Messages */
277static void	 adv_handle_extmsg_in(struct adv_softc *adv,
278				      u_int16_t halt_q_addr, u_int8_t q_cntl,
279				      target_bit_vector target_id,
280				      int tid);
281static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
282				 u_int8_t sdtr_offset);
283static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
284					u_int8_t sdtr_data);
285
286
287/* Exported functions first */
288
289void
290advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
291{
292	struct adv_softc *adv;
293
294	adv = (struct adv_softc *)callback_arg;
295	switch (code) {
296	case AC_FOUND_DEVICE:
297	{
298		struct ccb_getdev *cgd;
299		target_bit_vector target_mask;
300		int num_entries;
301        	caddr_t match;
302		struct adv_quirk_entry *entry;
303		struct adv_target_transinfo* tinfo;
304
305		cgd = (struct ccb_getdev *)arg;
306
307		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
308
309		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
310		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
311				       (caddr_t)adv_quirk_table,
312				       num_entries, sizeof(*adv_quirk_table),
313				       scsi_inquiry_match);
314
315		if (match == NULL)
316			panic("advasync: device didn't match wildcard entry!!");
317
318		entry = (struct adv_quirk_entry *)match;
319
320		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
321			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
322				adv->fix_asyn_xfer_always |= target_mask;
323			else
324				adv->fix_asyn_xfer_always &= ~target_mask;
325			/*
326			 * We start out life with all bits set and clear them
327			 * after we've determined that the fix isn't necessary.
328			 * It may well be that we've already cleared a target
329			 * before the full inquiry session completes, so don't
330			 * gratuitously set a target bit even if it has this
331			 * quirk.  But, if the quirk exonerates a device, clear
332			 * the bit now.
333			 */
334			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
335				adv->fix_asyn_xfer &= ~target_mask;
336		}
337		/*
338		 * Reset our sync settings now that we've determined
339		 * what quirks are in effect for the device.
340		 */
341		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
342		adv_set_syncrate(adv, cgd->ccb_h.path,
343				 cgd->ccb_h.target_id,
344				 tinfo->current.period,
345				 tinfo->current.offset,
346				 ADV_TRANS_CUR);
347		break;
348	}
349	case AC_LOST_DEVICE:
350	{
351		u_int target_mask;
352
353		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
354			target_mask = 0x01 << xpt_path_target_id(path);
355			adv->fix_asyn_xfer |= target_mask;
356		}
357
358		/*
359		 * Revert to async transfers
360		 * for the next device.
361		 */
362		adv_set_syncrate(adv, /*path*/NULL,
363				 xpt_path_target_id(path),
364				 /*period*/0,
365				 /*offset*/0,
366				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
367	}
368	default:
369		break;
370	}
371}
372
373void
374adv_set_bank(struct adv_softc *adv, u_int8_t bank)
375{
376	u_int8_t control;
377
378	/*
379	 * Start out with the bank reset to 0
380	 */
381	control = ADV_INB(adv, ADV_CHIP_CTRL)
382		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
383			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
384			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
385	if (bank == 1) {
386		control |= ADV_CC_BANK_ONE;
387	} else if (bank == 2) {
388		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
389	}
390	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
391}
392
393u_int8_t
394adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
395{
396	u_int8_t   byte_data;
397	u_int16_t  word_data;
398
399	/*
400	 * LRAM is accessed on 16bit boundaries.
401	 */
402	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
403	word_data = ADV_INW(adv, ADV_LRAM_DATA);
404	if (addr & 1) {
405#if BYTE_ORDER == BIG_ENDIAN
406		byte_data = (u_int8_t)(word_data & 0xFF);
407#else
408		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
409#endif
410	} else {
411#if BYTE_ORDER == BIG_ENDIAN
412		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
413#else
414		byte_data = (u_int8_t)(word_data & 0xFF);
415#endif
416	}
417	return (byte_data);
418}
419
420void
421adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
422{
423	u_int16_t word_data;
424
425	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
426	if (addr & 1) {
427		word_data &= 0x00FF;
428		word_data |= (((u_int8_t)value << 8) & 0xFF00);
429	} else {
430		word_data &= 0xFF00;
431		word_data |= ((u_int8_t)value & 0x00FF);
432	}
433	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
434}
435
436
437u_int16_t
438adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
439{
440	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
441	return (ADV_INW(adv, ADV_LRAM_DATA));
442}
443
444void
445adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
446{
447	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
448	ADV_OUTW(adv, ADV_LRAM_DATA, value);
449}
450
451/*
452 * Determine if there is a board at "iobase" by looking
453 * for the AdvanSys signatures.  Return 1 if a board is
454 * found, 0 otherwise.
455 */
456int
457adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
458{
459	u_int16_t signature;
460
461	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
462		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
463		if ((signature == ADV_1000_ID0W)
464		 || (signature == ADV_1000_ID0W_FIX))
465			return (1);
466	}
467	return (0);
468}
469
470void
471adv_lib_init(struct adv_softc *adv)
472{
473	if ((adv->type & ADV_ULTRA) != 0) {
474		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
475		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
476	} else {
477		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
478		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
479	}
480}
481
482u_int16_t
483adv_get_eeprom_config(struct adv_softc *adv, struct
484		      adv_eeprom_config  *eeprom_config)
485{
486	u_int16_t	sum;
487	u_int16_t	*wbuf;
488	u_int8_t	cfg_beg;
489	u_int8_t	cfg_end;
490	u_int8_t	s_addr;
491
492	wbuf = (u_int16_t *)eeprom_config;
493	sum = 0;
494
495	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
496		*wbuf = adv_read_eeprom_16(adv, s_addr);
497		sum += *wbuf;
498	}
499
500	if (adv->type & ADV_VL) {
501		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
502		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
503	} else {
504		cfg_beg = ADV_EEPROM_CFG_BEG;
505		cfg_end = ADV_EEPROM_MAX_ADDR;
506	}
507
508	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
509		*wbuf = adv_read_eeprom_16(adv, s_addr);
510		sum += *wbuf;
511#if ADV_DEBUG_EEPROM
512		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
513#endif
514	}
515	*wbuf = adv_read_eeprom_16(adv, s_addr);
516	return (sum);
517}
518
519int
520adv_set_eeprom_config(struct adv_softc *adv,
521		      struct adv_eeprom_config *eeprom_config)
522{
523	int	retry;
524
525	retry = 0;
526	while (1) {
527		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
528			break;
529		}
530		if (++retry > ADV_EEPROM_MAX_RETRY) {
531			break;
532		}
533	}
534	return (retry > ADV_EEPROM_MAX_RETRY);
535}
536
537int
538adv_reset_chip(struct adv_softc *adv, int reset_bus)
539{
540	adv_stop_chip(adv);
541	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
542				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
543	DELAY(60);
544
545	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
546	adv_set_chip_ih(adv, ADV_INS_HALT);
547
548	if (reset_bus)
549		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
550
551	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
552	if (reset_bus)
553		DELAY(200 * 1000);
554
555	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
556	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
557	return (adv_is_chip_halted(adv));
558}
559
560int
561adv_test_external_lram(struct adv_softc* adv)
562{
563	u_int16_t	q_addr;
564	u_int16_t	saved_value;
565	int		success;
566
567	success = 0;
568
569	q_addr = ADV_QNO_TO_QADDR(241);
570	saved_value = adv_read_lram_16(adv, q_addr);
571	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
572		success = 1;
573		adv_write_lram_16(adv, q_addr, saved_value);
574	}
575	return (success);
576}
577
578
579int
580adv_init_lram_and_mcode(struct adv_softc *adv)
581{
582	u_int32_t	retval;
583
584	adv_disable_interrupt(adv);
585
586	adv_init_lram(adv);
587
588	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
589				    adv_mcode_size);
590	if (retval != adv_mcode_chksum) {
591		printf("adv%d: Microcode download failed checksum!\n",
592		       adv->unit);
593		return (1);
594	}
595
596	if (adv_init_microcode_var(adv) != 0)
597		return (1);
598
599	adv_enable_interrupt(adv);
600	return (0);
601}
602
603u_int8_t
604adv_get_chip_irq(struct adv_softc *adv)
605{
606	u_int16_t	cfg_lsw;
607	u_int8_t	chip_irq;
608
609	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
610
611	if ((adv->type & ADV_VL) != 0) {
612		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
613		if ((chip_irq == 0) ||
614		    (chip_irq == 4) ||
615		    (chip_irq == 7)) {
616			return (0);
617		}
618		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
619	}
620	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
621	if (chip_irq == 3)
622		chip_irq += 2;
623	return (chip_irq + ADV_MIN_IRQ_NO);
624}
625
626u_int8_t
627adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
628{
629	u_int16_t	cfg_lsw;
630
631	if ((adv->type & ADV_VL) != 0) {
632		if (irq_no != 0) {
633			if ((irq_no < ADV_MIN_IRQ_NO)
634			 || (irq_no > ADV_MAX_IRQ_NO)) {
635				irq_no = 0;
636			} else {
637				irq_no -= ADV_MIN_IRQ_NO - 1;
638			}
639		}
640		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
641		cfg_lsw |= 0x0010;
642		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
643		adv_toggle_irq_act(adv);
644
645		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
646		cfg_lsw |= (irq_no & 0x07) << 2;
647		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
648		adv_toggle_irq_act(adv);
649	} else if ((adv->type & ADV_ISA) != 0) {
650		if (irq_no == 15)
651			irq_no -= 2;
652		irq_no -= ADV_MIN_IRQ_NO;
653		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
654		cfg_lsw |= (irq_no & 0x03) << 2;
655		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
656	}
657	return (adv_get_chip_irq(adv));
658}
659
660void
661adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
662{
663	u_int16_t cfg_lsw;
664
665	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
666	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
667		return;
668    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
669	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
670	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
671}
672
673int
674adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
675		       u_int32_t datalen)
676{
677	struct		adv_target_transinfo* tinfo;
678	u_int32_t	*p_data_addr;
679	u_int32_t	*p_data_bcount;
680	int		disable_syn_offset_one_fix;
681	int		retval;
682	u_int		n_q_required;
683	u_int32_t	addr;
684	u_int8_t	sg_entry_cnt;
685	u_int8_t	target_ix;
686	u_int8_t	sg_entry_cnt_minus_one;
687	u_int8_t	tid_no;
688
689	scsiq->q1.q_no = 0;
690	retval = 1;  /* Default to error case */
691	target_ix = scsiq->q2.target_ix;
692	tid_no = ADV_TIX_TO_TID(target_ix);
693	tinfo = &adv->tinfo[tid_no];
694
695	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
696		/* Renegotiate if appropriate. */
697		adv_set_syncrate(adv, /*struct cam_path */NULL,
698				 tid_no, /*period*/0, /*offset*/0,
699				 ADV_TRANS_CUR);
700		if (tinfo->current.period != tinfo->goal.period) {
701			adv_msgout_sdtr(adv, tinfo->goal.period,
702					tinfo->goal.offset);
703			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
704		}
705	}
706
707	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
708		sg_entry_cnt = scsiq->sg_head->entry_cnt;
709		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
710
711#ifdef DIAGNOSTIC
712		if (sg_entry_cnt <= 1)
713			panic("adv_execute_scsi_queue: Queue "
714			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
715
716		if (sg_entry_cnt > ADV_MAX_SG_LIST)
717			panic("adv_execute_scsi_queue: "
718			      "Queue with too many segs.");
719
720		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
721			int i;
722
723			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
724				addr = scsiq->sg_head->sg_list[i].addr +
725				       scsiq->sg_head->sg_list[i].bytes;
726
727				if ((addr & 0x0003) != 0)
728					panic("adv_execute_scsi_queue: SG "
729					      "with odd address or byte count");
730			}
731		}
732#endif
733		p_data_addr =
734		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
735		p_data_bcount =
736		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
737
738		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
739		scsiq->sg_head->queue_cnt = n_q_required - 1;
740	} else {
741		p_data_addr = &scsiq->q1.data_addr;
742		p_data_bcount = &scsiq->q1.data_cnt;
743		n_q_required = 1;
744	}
745
746	disable_syn_offset_one_fix = FALSE;
747
748	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
749	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
750
751		if (datalen != 0) {
752			if (datalen < 512) {
753				disable_syn_offset_one_fix = TRUE;
754			} else {
755				if (scsiq->cdbptr[0] == INQUIRY
756				 || scsiq->cdbptr[0] == REQUEST_SENSE
757				 || scsiq->cdbptr[0] == READ_CAPACITY
758				 || scsiq->cdbptr[0] == MODE_SELECT_6
759				 || scsiq->cdbptr[0] == MODE_SENSE_6
760				 || scsiq->cdbptr[0] == MODE_SENSE_10
761				 || scsiq->cdbptr[0] == MODE_SELECT_10
762				 || scsiq->cdbptr[0] == READ_TOC) {
763					disable_syn_offset_one_fix = TRUE;
764				}
765			}
766		}
767	}
768
769	if (disable_syn_offset_one_fix) {
770		scsiq->q2.tag_code &=
771		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
772		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
773				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
774	}
775
776	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
777	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
778		u_int8_t extra_bytes;
779
780		addr = *p_data_addr + *p_data_bcount;
781		extra_bytes = addr & 0x0003;
782		if (extra_bytes != 0
783		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
784		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
785			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
786			scsiq->q1.extra_bytes = extra_bytes;
787			*p_data_bcount -= extra_bytes;
788		}
789	}
790
791	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
792	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
793		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
794
795	return (retval);
796}
797
798
799u_int8_t
800adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
801		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
802{
803	u_int16_t val;
804	u_int8_t  sg_queue_cnt;
805
806	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
807		       (u_int16_t *)scsiq,
808		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
809
810#if BYTE_ORDER == BIG_ENDIAN
811	adv_adj_endian_qdone_info(scsiq);
812#endif
813
814	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
815	scsiq->q_status = val & 0xFF;
816	scsiq->q_no = (val >> 8) & 0XFF;
817
818	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
819	scsiq->cntl = val & 0xFF;
820	sg_queue_cnt = (val >> 8) & 0xFF;
821
822	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
823	scsiq->sense_len = val & 0xFF;
824	scsiq->extra_bytes = (val >> 8) & 0xFF;
825
826	/*
827	 * Due to a bug in accessing LRAM on the 940UA, the residual
828	 * is split into separate high and low 16bit quantities.
829	 */
830	scsiq->remain_bytes =
831	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
832	scsiq->remain_bytes |=
833	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
834
835	/*
836	 * XXX Is this just a safeguard or will the counter really
837	 * have bogus upper bits?
838	 */
839	scsiq->remain_bytes &= max_dma_count;
840
841	return (sg_queue_cnt);
842}
843
844int
845adv_start_chip(struct adv_softc *adv)
846{
847	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
848	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
849		return (0);
850	return (1);
851}
852
853int
854adv_stop_execution(struct adv_softc *adv)
855{
856	int count;
857
858	count = 0;
859	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
860		adv_write_lram_8(adv, ADV_STOP_CODE_B,
861				 ADV_STOP_REQ_RISC_STOP);
862		do {
863			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
864				ADV_STOP_ACK_RISC_STOP) {
865				return (1);
866			}
867			DELAY(1000);
868		} while (count++ < 20);
869	}
870	return (0);
871}
872
873int
874adv_is_chip_halted(struct adv_softc *adv)
875{
876	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
877		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
878			return (1);
879		}
880	}
881	return (0);
882}
883
884/*
885 * XXX The numeric constants and the loops in this routine
886 * need to be documented.
887 */
888void
889adv_ack_interrupt(struct adv_softc *adv)
890{
891	u_int8_t	host_flag;
892	u_int8_t	risc_flag;
893	int		loop;
894
895	loop = 0;
896	do {
897		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
898		if (loop++ > 0x7FFF) {
899			break;
900		}
901	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
902
903	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
904	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
905			 host_flag | ADV_HOST_FLAG_ACK_INT);
906
907	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
908	loop = 0;
909	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
910		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
911		if (loop++ > 3) {
912			break;
913		}
914	}
915
916	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
917}
918
919/*
920 * Handle all conditions that may halt the chip waiting
921 * for us to intervene.
922 */
923void
924adv_isr_chip_halted(struct adv_softc *adv)
925{
926	u_int16_t	  int_halt_code;
927	u_int16_t	  halt_q_addr;
928	target_bit_vector target_mask;
929	target_bit_vector scsi_busy;
930	u_int8_t	  halt_qp;
931	u_int8_t	  target_ix;
932	u_int8_t	  q_cntl;
933	u_int8_t	  tid_no;
934
935	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
936	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
937	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
938	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
939	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
940	tid_no = ADV_TIX_TO_TID(target_ix);
941	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
942	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
943		/*
944		 * Temporarily disable the async fix by removing
945		 * this target from the list of affected targets,
946		 * setting our async rate, and then putting us
947		 * back into the mask.
948		 */
949		adv->fix_asyn_xfer &= ~target_mask;
950		adv_set_syncrate(adv, /*struct cam_path */NULL,
951				 tid_no, /*period*/0, /*offset*/0,
952				 ADV_TRANS_ACTIVE);
953		adv->fix_asyn_xfer |= target_mask;
954	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
955		adv_set_syncrate(adv, /*struct cam_path */NULL,
956				 tid_no, /*period*/0, /*offset*/0,
957				 ADV_TRANS_ACTIVE);
958	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
959		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
960				     target_mask, tid_no);
961	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
962		struct	  adv_target_transinfo* tinfo;
963		union	  ccb *ccb;
964		u_int32_t cinfo_index;
965		u_int8_t  tag_code;
966		u_int8_t  q_status;
967
968		tinfo = &adv->tinfo[tid_no];
969		q_cntl |= QC_REQ_SENSE;
970
971		/* Renegotiate if appropriate. */
972		adv_set_syncrate(adv, /*struct cam_path */NULL,
973				 tid_no, /*period*/0, /*offset*/0,
974				 ADV_TRANS_CUR);
975		if (tinfo->current.period != tinfo->goal.period) {
976			adv_msgout_sdtr(adv, tinfo->goal.period,
977					tinfo->goal.offset);
978			q_cntl |= QC_MSG_OUT;
979		}
980		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
981
982		/* Don't tag request sense commands */
983		tag_code = adv_read_lram_8(adv,
984					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
985		tag_code &=
986		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
987
988		if ((adv->fix_asyn_xfer & target_mask) != 0
989		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
990			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
991				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
992		}
993		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
994				 tag_code);
995		q_status = adv_read_lram_8(adv,
996					   halt_q_addr + ADV_SCSIQ_B_STATUS);
997		q_status |= (QS_READY | QS_BUSY);
998		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
999				 q_status);
1000		/*
1001		 * Freeze the devq until we can handle the sense condition.
1002		 */
1003		cinfo_index =
1004		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1005		ccb = adv->ccb_infos[cinfo_index].ccb;
1006		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1007		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1008		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1009			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1010			      /*queued_only*/TRUE);
1011		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1012		scsi_busy &= ~target_mask;
1013		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1014		/*
1015		 * Ensure we have enough time to actually
1016		 * retrieve the sense.
1017		 */
1018		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1019		ccb->ccb_h.timeout_ch =
1020		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1021	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1022		struct	ext_msg out_msg;
1023
1024		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1025				       (u_int16_t *) &out_msg,
1026				       sizeof(out_msg)/2);
1027
1028		if ((out_msg.msg_type == MSG_EXTENDED)
1029		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1030		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1031
1032			/* Revert to Async */
1033			adv_set_syncrate(adv, /*struct cam_path */NULL,
1034					 tid_no, /*period*/0, /*offset*/0,
1035					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1036		}
1037		q_cntl &= ~QC_MSG_OUT;
1038		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1039	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1040		u_int8_t scsi_status;
1041		union ccb *ccb;
1042		u_int32_t cinfo_index;
1043
1044		scsi_status = adv_read_lram_8(adv, halt_q_addr
1045					      + ADV_SCSIQ_SCSI_STATUS);
1046		cinfo_index =
1047		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1048		ccb = adv->ccb_infos[cinfo_index].ccb;
1049		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1050		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1051		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1052		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1053			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1054			      /*queued_only*/TRUE);
1055		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1056		scsi_busy &= ~target_mask;
1057		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1058	} else {
1059		printf("Unhandled Halt Code %x\n", int_halt_code);
1060	}
1061	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1062}
1063
1064void
1065adv_sdtr_to_period_offset(struct adv_softc *adv,
1066			  u_int8_t sync_data, u_int8_t *period,
1067			  u_int8_t *offset, int tid)
1068{
1069	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1070	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1071		*period = *offset = 0;
1072	} else {
1073		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1074		*offset = sync_data & 0xF;
1075	}
1076}
1077
1078void
1079adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1080		 u_int tid, u_int period, u_int offset, u_int type)
1081{
1082	struct adv_target_transinfo* tinfo;
1083	u_int old_period;
1084	u_int old_offset;
1085	u_int8_t sdtr_data;
1086
1087	tinfo = &adv->tinfo[tid];
1088
1089	/* Filter our input */
1090	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1091					      &offset, tid);
1092
1093	old_period = tinfo->current.period;
1094	old_offset = tinfo->current.offset;
1095
1096	if ((type & ADV_TRANS_CUR) != 0
1097	 && ((old_period != period || old_offset != offset)
1098	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1099		int s;
1100		int halted;
1101
1102		s = splcam();
1103		halted = adv_is_chip_halted(adv);
1104		if (halted == 0)
1105			/* Must halt the chip first */
1106			adv_host_req_chip_halt(adv);
1107
1108		/* Update current hardware settings */
1109		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1110
1111		/*
1112		 * If a target can run in sync mode, we don't need
1113		 * to check it for sync problems.
1114		 */
1115		if (offset != 0)
1116			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1117
1118		if (halted == 0)
1119			/* Start the chip again */
1120			adv_start_chip(adv);
1121
1122		splx(s);
1123		tinfo->current.period = period;
1124		tinfo->current.offset = offset;
1125
1126		if (path != NULL) {
1127			/*
1128			 * Tell the SCSI layer about the
1129			 * new transfer parameters.
1130			 */
1131			struct	ccb_trans_settings neg;
1132
1133			neg.sync_period = period;
1134			neg.sync_offset = offset;
1135			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1136				  | CCB_TRANS_SYNC_OFFSET_VALID;
1137			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1138			xpt_async(AC_TRANSFER_NEG, path, &neg);
1139		}
1140	}
1141
1142	if ((type & ADV_TRANS_GOAL) != 0) {
1143		tinfo->goal.period = period;
1144		tinfo->goal.offset = offset;
1145	}
1146
1147	if ((type & ADV_TRANS_USER) != 0) {
1148		tinfo->user.period = period;
1149		tinfo->user.offset = offset;
1150	}
1151}
1152
1153u_int8_t
1154adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1155			  u_int *offset, int tid)
1156{
1157	u_int i;
1158	u_int dummy_offset;
1159	u_int dummy_period;
1160
1161	if (offset == NULL) {
1162		dummy_offset = 0;
1163		offset = &dummy_offset;
1164	}
1165
1166	if (period == NULL) {
1167		dummy_period = 0;
1168		period = &dummy_period;
1169	}
1170
1171#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1172
1173	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1174	if (*period != 0 && *offset != 0) {
1175		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1176			if (*period <= adv->sdtr_period_tbl[i]) {
1177				/*
1178				 * When responding to a target that requests
1179				 * sync, the requested  rate may fall between
1180				 * two rates that we can output, but still be
1181				 * a rate that we can receive.  Because of this,
1182				 * we want to respond to the target with
1183				 * the same rate that it sent to us even
1184				 * if the period we use to send data to it
1185				 * is lower.  Only lower the response period
1186				 * if we must.
1187				 */
1188				if (i == 0 /* Our maximum rate */)
1189					*period = adv->sdtr_period_tbl[0];
1190				return ((i << 4) | *offset);
1191			}
1192		}
1193	}
1194
1195	/* Must go async */
1196	*period = 0;
1197	*offset = 0;
1198	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1199		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1200	return (0);
1201}
1202
1203/* Internal Routines */
1204
1205static void
1206adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1207		       u_int16_t *buffer, int count)
1208{
1209	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1210	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1211}
1212
1213static void
1214adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1215			u_int16_t *buffer, int count)
1216{
1217	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1218	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1219}
1220
1221static void
1222adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1223		 u_int16_t set_value, int count)
1224{
1225	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1226	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1227			      set_value, count);
1228}
1229
1230static u_int32_t
1231adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1232{
1233	u_int32_t	sum;
1234	int		i;
1235
1236	sum = 0;
1237	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238	for (i = 0; i < count; i++)
1239		sum += ADV_INW(adv, ADV_LRAM_DATA);
1240	return (sum);
1241}
1242
1243static int
1244adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1245			     u_int16_t value)
1246{
1247	int	retval;
1248
1249	retval = 0;
1250	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1251	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1252	DELAY(10000);
1253	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1254	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1255		retval = 1;
1256	return (retval);
1257}
1258
1259static u_int32_t
1260adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1261{
1262	u_int16_t           val_low, val_high;
1263
1264	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1265
1266#if BYTE_ORDER == BIG_ENDIAN
1267	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1268	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1269#else
1270	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1272#endif
1273
1274	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1275}
1276
1277static void
1278adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1279{
1280	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1281
1282#if BYTE_ORDER == BIG_ENDIAN
1283	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1284	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1285#else
1286	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1288#endif
1289}
1290
1291static void
1292adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1293			u_int32_t *buffer, int count)
1294{
1295	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1296	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1297}
1298
1299static u_int16_t
1300adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1301{
1302	u_int16_t read_wval;
1303	u_int8_t  cmd_reg;
1304
1305	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1306	DELAY(1000);
1307	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1308	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1309	DELAY(1000);
1310	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1311	DELAY(1000);
1312	return (read_wval);
1313}
1314
1315static u_int16_t
1316adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1317{
1318	u_int16_t	read_value;
1319
1320	read_value = adv_read_eeprom_16(adv, addr);
1321	if (read_value != value) {
1322		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1323		DELAY(1000);
1324
1325		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1326		DELAY(1000);
1327
1328		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1329		DELAY(20 * 1000);
1330
1331		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1332		DELAY(1000);
1333		read_value = adv_read_eeprom_16(adv, addr);
1334	}
1335	return (read_value);
1336}
1337
1338static int
1339adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1340{
1341	u_int8_t read_back;
1342	int	 retry;
1343
1344	retry = 0;
1345	while (1) {
1346		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1347		DELAY(1000);
1348		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1349		if (read_back == cmd_reg) {
1350			return (1);
1351		}
1352		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1353			return (0);
1354		}
1355	}
1356}
1357
1358static int
1359adv_set_eeprom_config_once(struct adv_softc *adv,
1360			   struct adv_eeprom_config *eeprom_config)
1361{
1362	int		n_error;
1363	u_int16_t	*wbuf;
1364	u_int16_t	sum;
1365	u_int8_t	s_addr;
1366	u_int8_t	cfg_beg;
1367	u_int8_t	cfg_end;
1368
1369	wbuf = (u_int16_t *)eeprom_config;
1370	n_error = 0;
1371	sum = 0;
1372	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1373		sum += *wbuf;
1374		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1375			n_error++;
1376		}
1377	}
1378	if (adv->type & ADV_VL) {
1379		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1380		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1381	} else {
1382		cfg_beg = ADV_EEPROM_CFG_BEG;
1383		cfg_end = ADV_EEPROM_MAX_ADDR;
1384	}
1385
1386	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1387		sum += *wbuf;
1388		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1389			n_error++;
1390		}
1391	}
1392	*wbuf = sum;
1393	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1394		n_error++;
1395	}
1396	wbuf = (u_int16_t *)eeprom_config;
1397	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1398		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1399			n_error++;
1400		}
1401	}
1402	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1403		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1404			n_error++;
1405		}
1406	}
1407	return (n_error);
1408}
1409
1410static u_int32_t
1411adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1412		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1413{
1414	u_int32_t chksum;
1415	u_int16_t mcode_lram_size;
1416	u_int16_t mcode_chksum;
1417
1418	mcode_lram_size = mcode_size >> 1;
1419	/* XXX Why zero the memory just before you write the whole thing?? */
1420	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1421	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1422
1423	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1424	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1425						   ((mcode_size - s_addr
1426						     - ADV_CODE_SEC_BEG) >> 1));
1427	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1428	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1429	return (chksum);
1430}
1431
1432static void
1433adv_reinit_lram(struct adv_softc *adv) {
1434	adv_init_lram(adv);
1435	adv_init_qlink_var(adv);
1436}
1437
1438static void
1439adv_init_lram(struct adv_softc *adv)
1440{
1441	u_int8_t  i;
1442	u_int16_t s_addr;
1443
1444	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1445			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1446
1447	i = ADV_MIN_ACTIVE_QNO;
1448	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1449
1450	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1451	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1452	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1453	i++;
1454	s_addr += ADV_QBLK_SIZE;
1455	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1456		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1457		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1458		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1459	}
1460
1461	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1462	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1463	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1464	i++;
1465	s_addr += ADV_QBLK_SIZE;
1466
1467	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1468		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1469		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1470		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471	}
1472}
1473
1474static int
1475adv_init_microcode_var(struct adv_softc *adv)
1476{
1477	int	 i;
1478
1479	for (i = 0; i <= ADV_MAX_TID; i++) {
1480
1481		/* Start out async all around */
1482		adv_set_syncrate(adv, /*path*/NULL,
1483				 i, 0, 0,
1484				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1485	}
1486
1487	adv_init_qlink_var(adv);
1488
1489	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1490	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1491
1492	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1493
1494	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1495
1496	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1497	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1498		printf("adv%d: Unable to set program counter. Aborting.\n",
1499		       adv->unit);
1500		return (1);
1501	}
1502	return (0);
1503}
1504
1505static void
1506adv_init_qlink_var(struct adv_softc *adv)
1507{
1508	int	  i;
1509	u_int16_t lram_addr;
1510
1511	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1512	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1513
1514	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1515	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1516
1517	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1518			 (u_int8_t)((int) adv->max_openings + 1));
1519	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1520			 (u_int8_t)((int) adv->max_openings + 2));
1521
1522	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1523
1524	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1525	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1526	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1527	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1528	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1529	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1530
1531	lram_addr = ADV_QADR_BEG;
1532	for (i = 0; i < 32; i++, lram_addr += 2)
1533		adv_write_lram_16(adv, lram_addr, 0);
1534}
1535
1536static void
1537adv_disable_interrupt(struct adv_softc *adv)
1538{
1539	u_int16_t cfg;
1540
1541	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1542	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1543}
1544
1545static void
1546adv_enable_interrupt(struct adv_softc *adv)
1547{
1548	u_int16_t cfg;
1549
1550	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1551	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1552}
1553
1554static void
1555adv_toggle_irq_act(struct adv_softc *adv)
1556{
1557	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1558	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1559}
1560
1561void
1562adv_start_execution(struct adv_softc *adv)
1563{
1564	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1565		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1566	}
1567}
1568
1569int
1570adv_stop_chip(struct adv_softc *adv)
1571{
1572	u_int8_t cc_val;
1573
1574	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1575		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1576	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1577	adv_set_chip_ih(adv, ADV_INS_HALT);
1578	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1579	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1580		return (0);
1581	}
1582	return (1);
1583}
1584
1585static int
1586adv_host_req_chip_halt(struct adv_softc *adv)
1587{
1588	int	 count;
1589	u_int8_t saved_stop_code;
1590
1591	if (adv_is_chip_halted(adv))
1592		return (1);
1593
1594	count = 0;
1595	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1596	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1597			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1598	while (adv_is_chip_halted(adv) == 0
1599	    && count++ < 2000)
1600		;
1601
1602	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1603	return (count < 2000);
1604}
1605
1606static void
1607adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1608{
1609	adv_set_bank(adv, 1);
1610	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1611	adv_set_bank(adv, 0);
1612}
1613
1614#if UNUSED
1615static u_int8_t
1616adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1617{
1618	u_int8_t scsi_ctrl;
1619
1620	adv_set_bank(adv, 1);
1621	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1622	adv_set_bank(adv, 0);
1623	return (scsi_ctrl);
1624}
1625#endif
1626
1627/*
1628 * XXX Looks like more padding issues in this routine as well.
1629 *     There has to be a way to turn this into an insw.
1630 */
1631static void
1632adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1633	       u_int16_t *inbuf, int words)
1634{
1635	int	i;
1636
1637	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1638	for (i = 0; i < words; i++, inbuf++) {
1639		if (i == 5) {
1640			continue;
1641		}
1642		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1643	}
1644}
1645
1646static u_int
1647adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1648{
1649	u_int	  cur_used_qs;
1650	u_int	  cur_free_qs;
1651
1652	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1653
1654	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1655		cur_free_qs = adv->max_openings - cur_used_qs;
1656		return (cur_free_qs);
1657	}
1658	adv->openings_needed = n_qs;
1659	return (0);
1660}
1661
1662static u_int8_t
1663adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1664		      u_int8_t n_free_q)
1665{
1666	int i;
1667
1668	for (i = 0; i < n_free_q; i++) {
1669		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1670		if (free_q_head == ADV_QLINK_END)
1671			break;
1672	}
1673	return (free_q_head);
1674}
1675
1676static u_int8_t
1677adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1678{
1679	u_int16_t	q_addr;
1680	u_int8_t	next_qp;
1681	u_int8_t	q_status;
1682
1683	next_qp = ADV_QLINK_END;
1684	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1685	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1686
1687	if ((q_status & QS_READY) == 0)
1688		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1689
1690	return (next_qp);
1691}
1692
1693static int
1694adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1695		    u_int8_t n_q_required)
1696{
1697	u_int8_t	free_q_head;
1698	u_int8_t	next_qp;
1699	u_int8_t	tid_no;
1700	u_int8_t	target_ix;
1701	int		retval;
1702
1703	retval = 1;
1704	target_ix = scsiq->q2.target_ix;
1705	tid_no = ADV_TIX_TO_TID(target_ix);
1706	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1707	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1708	    != ADV_QLINK_END) {
1709		scsiq->q1.q_no = free_q_head;
1710
1711		/*
1712		 * Now that we know our Q number, point our sense
1713		 * buffer pointer to a bus dma mapped area where
1714		 * we can dma the data to.
1715		 */
1716		scsiq->q1.sense_addr = adv->sense_physbase
1717		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1718		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1719		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1720		adv->cur_active += n_q_required;
1721		retval = 0;
1722	}
1723	return (retval);
1724}
1725
1726
1727static void
1728adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1729			    u_int q_no)
1730{
1731	u_int8_t	sg_list_dwords;
1732	u_int8_t	sg_index, i;
1733	u_int8_t	sg_entry_cnt;
1734	u_int8_t	next_qp;
1735	u_int16_t	q_addr;
1736	struct		adv_sg_head *sg_head;
1737	struct		adv_sg_list_q scsi_sg_q;
1738
1739	sg_head = scsiq->sg_head;
1740
1741	if (sg_head) {
1742		sg_entry_cnt = sg_head->entry_cnt - 1;
1743#ifdef DIAGNOSTIC
1744		if (sg_entry_cnt == 0)
1745			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746			      "a SG list but only one element");
1747		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1748			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749			      "a SG list but QC_SG_HEAD not set");
1750#endif
1751		q_addr = ADV_QNO_TO_QADDR(q_no);
1752		sg_index = 1;
1753		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1754		scsi_sg_q.sg_head_qp = q_no;
1755		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1756		for (i = 0; i < sg_head->queue_cnt; i++) {
1757			u_int8_t segs_this_q;
1758
1759			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1760				segs_this_q = ADV_SG_LIST_PER_Q;
1761			else {
1762				/* This will be the last segment then */
1763				segs_this_q = sg_entry_cnt;
1764				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1765			}
1766			scsi_sg_q.seq_no = i + 1;
1767			sg_list_dwords = segs_this_q << 1;
1768			if (i == 0) {
1769				scsi_sg_q.sg_list_cnt = segs_this_q;
1770				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1771			} else {
1772				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1773				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1774			}
1775			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1776			scsi_sg_q.q_no = next_qp;
1777			q_addr = ADV_QNO_TO_QADDR(next_qp);
1778
1779			adv_write_lram_16_multi(adv,
1780						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1781						(u_int16_t *)&scsi_sg_q,
1782						sizeof(scsi_sg_q) >> 1);
1783			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1784						(u_int32_t *)&sg_head->sg_list[sg_index],
1785						sg_list_dwords);
1786			sg_entry_cnt -= segs_this_q;
1787			sg_index += ADV_SG_LIST_PER_Q;
1788		}
1789	}
1790	adv_put_ready_queue(adv, scsiq, q_no);
1791}
1792
1793static void
1794adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1795		    u_int q_no)
1796{
1797	struct		adv_target_transinfo* tinfo;
1798	u_int		q_addr;
1799	u_int		tid_no;
1800
1801	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1802	tinfo = &adv->tinfo[tid_no];
1803	if ((tinfo->current.period != tinfo->goal.period)
1804	 || (tinfo->current.offset != tinfo->goal.offset)) {
1805
1806		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1807		scsiq->q1.cntl |= QC_MSG_OUT;
1808	}
1809	q_addr = ADV_QNO_TO_QADDR(q_no);
1810
1811	scsiq->q1.status = QS_FREE;
1812
1813	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1814				(u_int16_t *)scsiq->cdbptr,
1815				scsiq->q2.cdb_len >> 1);
1816
1817#if BYTE_ORDER == BIG_ENDIAN
1818	adv_adj_scsiq_endian(scsiq);
1819#endif
1820
1821	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1822		      (u_int16_t *) &scsiq->q1.cntl,
1823		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1824
1825#if CC_WRITE_IO_COUNT
1826	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1827			  adv->req_count);
1828#endif
1829
1830#if CC_CLEAR_DMA_REMAIN
1831
1832	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1833	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1834#endif
1835
1836	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1837			  (scsiq->q1.q_no << 8) | QS_READY);
1838}
1839
1840static void
1841adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1842	      u_int16_t *buffer, int words)
1843{
1844	int	i;
1845
1846	/*
1847	 * XXX This routine makes *gross* assumptions
1848	 * about padding in the data structures.
1849	 * Either the data structures should have explicit
1850	 * padding members added, or they should have padding
1851	 * turned off via compiler attributes depending on
1852	 * which yields better overall performance.  My hunch
1853	 * would be that turning off padding would be the
1854	 * faster approach as an outsw is much faster than
1855	 * this crude loop and accessing un-aligned data
1856	 * members isn't *that* expensive.  The other choice
1857	 * would be to modify the ASC script so that the
1858	 * the adv_scsiq_1 structure can be re-arranged so
1859	 * padding isn't required.
1860	 */
1861	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1862	for (i = 0; i < words; i++, buffer++) {
1863		if (i == 2 || i == 10) {
1864			continue;
1865		}
1866		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1867	}
1868}
1869
1870static void
1871adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1872		     u_int8_t q_cntl, target_bit_vector target_mask,
1873		     int tid_no)
1874{
1875	struct	ext_msg ext_msg;
1876
1877	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1878			       sizeof(ext_msg) >> 1);
1879	if ((ext_msg.msg_type == MSG_EXTENDED)
1880	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1881	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1882		union	  ccb *ccb;
1883		struct	  adv_target_transinfo* tinfo;
1884		u_int32_t cinfo_index;
1885		u_int	 period;
1886		u_int	 offset;
1887		int	 sdtr_accept;
1888		u_int8_t orig_offset;
1889
1890		cinfo_index =
1891		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1892		ccb = adv->ccb_infos[cinfo_index].ccb;
1893		tinfo = &adv->tinfo[tid_no];
1894		sdtr_accept = TRUE;
1895
1896		orig_offset = ext_msg.req_ack_offset;
1897		if (ext_msg.xfer_period < tinfo->goal.period) {
1898                	sdtr_accept = FALSE;
1899			ext_msg.xfer_period = tinfo->goal.period;
1900		}
1901
1902		/* Perform range checking */
1903		period = ext_msg.xfer_period;
1904		offset = ext_msg.req_ack_offset;
1905		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1906		ext_msg.xfer_period = period;
1907		ext_msg.req_ack_offset = offset;
1908
1909		/* Record our current sync settings */
1910		adv_set_syncrate(adv, ccb->ccb_h.path,
1911				 tid_no, ext_msg.xfer_period,
1912				 ext_msg.req_ack_offset,
1913				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1914
1915		/* Offset too high or large period forced async */
1916		if (orig_offset != ext_msg.req_ack_offset)
1917			sdtr_accept = FALSE;
1918
1919		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1920			/* Valid response to our requested negotiation */
1921			q_cntl &= ~QC_MSG_OUT;
1922		} else {
1923			/* Must Respond */
1924			q_cntl |= QC_MSG_OUT;
1925			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1926					ext_msg.req_ack_offset);
1927		}
1928
1929	} else if (ext_msg.msg_type == MSG_EXTENDED
1930		&& ext_msg.msg_req == MSG_EXT_WDTR
1931		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1932
1933		ext_msg.wdtr_width = 0;
1934		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1935					(u_int16_t *)&ext_msg,
1936					sizeof(ext_msg) >> 1);
1937		q_cntl |= QC_MSG_OUT;
1938        } else {
1939
1940		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1941		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1942					(u_int16_t *)&ext_msg,
1943					sizeof(ext_msg) >> 1);
1944		q_cntl |= QC_MSG_OUT;
1945        }
1946	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1947}
1948
1949static void
1950adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1951		u_int8_t sdtr_offset)
1952{
1953	struct	 ext_msg sdtr_buf;
1954
1955	sdtr_buf.msg_type = MSG_EXTENDED;
1956	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1957	sdtr_buf.msg_req = MSG_EXT_SDTR;
1958	sdtr_buf.xfer_period = sdtr_period;
1959	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1960	sdtr_buf.req_ack_offset = sdtr_offset;
1961	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1962				(u_int16_t *) &sdtr_buf,
1963				sizeof(sdtr_buf) / 2);
1964}
1965
1966int
1967adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1968	      u_int32_t status, int queued_only)
1969{
1970	u_int16_t q_addr;
1971	u_int8_t  q_no;
1972	struct adv_q_done_info scsiq_buf;
1973	struct adv_q_done_info *scsiq;
1974	u_int8_t  target_ix;
1975	int	  count;
1976
1977	scsiq = &scsiq_buf;
1978	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1979	count = 0;
1980	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1981		struct adv_ccb_info *ccb_info;
1982		q_addr = ADV_QNO_TO_QADDR(q_no);
1983
1984		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1985		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1986		if (((scsiq->q_status & QS_READY) != 0)
1987		 && ((scsiq->q_status & QS_ABORTED) == 0)
1988		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1989		 && (scsiq->d2.target_ix == target_ix)
1990		 && (queued_only == 0
1991		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1992		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1993			union ccb *aborted_ccb;
1994			struct adv_ccb_info *cinfo;
1995
1996			scsiq->q_status |= QS_ABORTED;
1997			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1998					 scsiq->q_status);
1999			aborted_ccb = ccb_info->ccb;
2000			/* Don't clobber earlier error codes */
2001			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2002			  == CAM_REQ_INPROG)
2003				aborted_ccb->ccb_h.status |= status;
2004			cinfo = (struct adv_ccb_info *)
2005			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2006			cinfo->state |= ACCB_ABORT_QUEUED;
2007			count++;
2008		}
2009	}
2010	return (count);
2011}
2012
2013int
2014adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2015{
2016	int count;
2017	int i;
2018	union ccb *ccb;
2019
2020	i = 200;
2021	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2022	    && i--)
2023		DELAY(1000);
2024	adv_reset_chip(adv, initiate_bus_reset);
2025	adv_reinit_lram(adv);
2026	for (i = 0; i <= ADV_MAX_TID; i++)
2027		adv_set_syncrate(adv, NULL, i, /*period*/0,
2028				 /*offset*/0, ADV_TRANS_CUR);
2029	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2030
2031	/* Tell the XPT layer that a bus reset occured */
2032	if (adv->path != NULL)
2033		xpt_async(AC_BUS_RESET, adv->path, NULL);
2034
2035	count = 0;
2036	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2037		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2038			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2039		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2040		count++;
2041	}
2042
2043	adv_start_chip(adv);
2044	return (count);
2045}
2046
2047static void
2048adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2049{
2050	int orig_id;
2051
2052    	adv_set_bank(adv, 1);
2053    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2054    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2055	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2056		adv_set_bank(adv, 0);
2057		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2058	}
2059    	adv_set_bank(adv, 1);
2060    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2061	adv_set_bank(adv, 0);
2062}
2063