advlib.c revision 39505
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *      $Id: advlib.c,v 1.5 1998/09/15 07:03:33 gibbs Exp $
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <machine/bus_pio.h>
50#include <machine/bus.h>
51#include <machine/clock.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_xpt_sim.h>
57
58#include <cam/scsi/scsi_all.h>
59#include <cam/scsi/scsi_message.h>
60#include <cam/scsi/scsi_da.h>
61#include <cam/scsi/scsi_cd.h>
62
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/pmap.h>
66
67#include <dev/advansys/advansys.h>
68#include <dev/advansys/advmcode.h>
69
70struct adv_quirk_entry {
71	struct scsi_inquiry_pattern inq_pat;
72	u_int8_t quirks;
73#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
74#define ADV_QUIRK_FIX_ASYN_XFER		0x02
75};
76
77static struct adv_quirk_entry adv_quirk_table[] =
78{
79	{
80		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
81		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
82	},
83	{
84		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
85		0
86	},
87	{
88		{
89		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
90		  "TANDBERG", " TDC 36", "*"
91		},
92		0
93	},
94	{
95		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
96		0
97	},
98	{
99		{
100		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
101		  "*", "*", "*"
102		},
103		0
104	},
105	{
106		{
107		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
108		  "*", "*", "*"
109		},
110		0
111	},
112	{
113		/* Default quirk entry */
114		{
115		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
116		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
117                },
118                ADV_QUIRK_FIX_ASYN_XFER,
119	}
120};
121
122/*
123 * Allowable periods in ns
124 */
125u_int8_t adv_sdtr_period_tbl[] =
126{
127	25,
128	30,
129	35,
130	40,
131	50,
132	60,
133	70,
134	85
135};
136
137u_int8_t adv_sdtr_period_tbl_ultra[] =
138{
139	12,
140	19,
141	25,
142	32,
143	38,
144	44,
145	50,
146	57,
147	63,
148	69,
149	75,
150	82,
151	88,
152	94,
153	100,
154	107
155};
156
157struct ext_msg {
158	u_int8_t msg_type;
159	u_int8_t msg_len;
160	u_int8_t msg_req;
161	union {
162		struct {
163			u_int8_t sdtr_xfer_period;
164			u_int8_t sdtr_req_ack_offset;
165		} sdtr;
166		struct {
167       			u_int8_t wdtr_width;
168		} wdtr;
169		struct {
170			u_int8_t mdp[4];
171		} mdp;
172	} u_ext_msg;
173	u_int8_t res;
174};
175
176#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
177#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
178#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
179#define	mdp_b3		u_ext_msg.mdp_b3
180#define	mdp_b2		u_ext_msg.mdp_b2
181#define	mdp_b1		u_ext_msg.mdp_b1
182#define	mdp_b0		u_ext_msg.mdp_b0
183
184/*
185 * Some of the early PCI adapters have problems with
186 * async transfers.  Instead use an offset of 1.
187 */
188#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
189
190/* LRAM routines */
191static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
192					u_int16_t *buffer, int count);
193static void	 adv_write_lram_16_multi(struct adv_softc *adv,
194					 u_int16_t s_addr, u_int16_t *buffer,
195					 int count);
196static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
197				  u_int16_t set_value, int count);
198static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
199				  int count);
200
201static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
202					      u_int16_t addr, u_int16_t value);
203static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
204
205
206static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
207				   u_int32_t value);
208static void	 adv_write_lram_32_multi(struct adv_softc *adv,
209					 u_int16_t s_addr, u_int32_t *buffer,
210					 int count);
211
212/* EEPROM routines */
213static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
214static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
215				     u_int16_t value);
216static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
217					  u_int8_t cmd_reg);
218static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
219					    struct adv_eeprom_config *eeconfig);
220
221/* Initialization */
222static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
223				    u_int16_t *mcode_buf, u_int16_t mcode_size);
224
225static void	 adv_reinit_lram(struct adv_softc *adv);
226static void	 adv_init_lram(struct adv_softc *adv);
227static int	 adv_init_microcode_var(struct adv_softc *adv);
228static void	 adv_init_qlink_var(struct adv_softc *adv);
229
230/* Interrupts */
231static void	 adv_disable_interrupt(struct adv_softc *adv);
232static void	 adv_enable_interrupt(struct adv_softc *adv);
233static void	 adv_toggle_irq_act(struct adv_softc *adv);
234
235/* Chip Control */
236static int	 adv_stop_chip(struct adv_softc *adv);
237static int	 adv_host_req_chip_halt(struct adv_softc *adv);
238static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
239#if UNUSED
240static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
241#endif
242
243/* Queue handling and execution */
244static int	 adv_sgcount_to_qcount(int sgcount);
245static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
246				u_int16_t *inbuf, int words);
247static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
248static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
249				       u_int8_t free_q_head, u_int8_t n_free_q);
250static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
251				      u_int8_t free_q_head);
252static int	 adv_send_scsi_queue(struct adv_softc *adv,
253				     struct adv_scsi_q *scsiq,
254				     u_int8_t n_q_required);
255static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
256					     struct adv_scsi_q *scsiq,
257					     u_int q_no);
258static void	 adv_put_ready_queue(struct adv_softc *adv,
259				     struct adv_scsi_q *scsiq, u_int q_no);
260static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
261			       u_int16_t *buffer, int words);
262
263/* Messages */
264static void	 adv_handle_extmsg_in(struct adv_softc *adv,
265				      u_int16_t halt_q_addr, u_int8_t q_cntl,
266				      target_bit_vector target_id,
267				      int tid);
268static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
269				 u_int8_t sdtr_offset);
270static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
271					u_int8_t sdtr_data);
272
273
274/* Exported functions first */
275
276void
277advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
278{
279	struct adv_softc *adv;
280
281	adv = (struct adv_softc *)callback_arg;
282	switch (code) {
283	case AC_FOUND_DEVICE:
284	{
285		struct ccb_getdev *cgd;
286		target_bit_vector target_mask;
287		int num_entries;
288        	caddr_t match;
289		struct adv_quirk_entry *entry;
290		struct adv_target_transinfo* tinfo;
291
292		cgd = (struct ccb_getdev *)arg;
293
294		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
295
296		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
297		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
298				       (caddr_t)adv_quirk_table,
299				       num_entries, sizeof(*adv_quirk_table),
300				       scsi_inquiry_match);
301
302		if (match == NULL)
303			panic("advasync: device didn't match wildcard entry!!");
304
305		entry = (struct adv_quirk_entry *)match;
306
307		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
308			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
309				adv->fix_asyn_xfer_always |= target_mask;
310			else
311				adv->fix_asyn_xfer_always &= ~target_mask;
312			/*
313			 * We start out life with all bits set and clear them
314			 * after we've determined that the fix isn't necessary.
315			 * It may well be that we've already cleared a target
316			 * before the full inquiry session completes, so don't
317			 * gratuitously set a target bit even if it has this
318			 * quirk.  But, if the quirk exonerates a device, clear
319			 * the bit now.
320			 */
321			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
322				adv->fix_asyn_xfer &= ~target_mask;
323		}
324		/*
325		 * Reset our sync settings now that we've determined
326		 * what quirks are in effect for the device.
327		 */
328		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
329		adv_set_syncrate(adv, cgd->ccb_h.path,
330				 cgd->ccb_h.target_id,
331				 tinfo->current.period,
332				 tinfo->current.offset,
333				 ADV_TRANS_CUR);
334		break;
335	}
336	case AC_LOST_DEVICE:
337	{
338		u_int target_mask;
339
340		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
341			target_mask = 0x01 << xpt_path_target_id(path);
342			adv->fix_asyn_xfer |= target_mask;
343		}
344
345		/*
346		 * Revert to async transfers
347		 * for the next device.
348		 */
349		adv_set_syncrate(adv, /*path*/NULL,
350				 xpt_path_target_id(path),
351				 /*period*/0,
352				 /*offset*/0,
353				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
354	}
355	default:
356		break;
357	}
358}
359
360void
361adv_set_bank(struct adv_softc *adv, u_int8_t bank)
362{
363	u_int8_t control;
364
365	/*
366	 * Start out with the bank reset to 0
367	 */
368	control = ADV_INB(adv, ADV_CHIP_CTRL)
369		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
370			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
371			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
372	if (bank == 1) {
373		control |= ADV_CC_BANK_ONE;
374	} else if (bank == 2) {
375		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
376	}
377	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
378}
379
380u_int8_t
381adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
382{
383	u_int8_t   byte_data;
384	u_int16_t  word_data;
385
386	/*
387	 * LRAM is accessed on 16bit boundaries.
388	 */
389	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
390	word_data = ADV_INW(adv, ADV_LRAM_DATA);
391	if (addr & 1) {
392#if BYTE_ORDER == BIG_ENDIAN
393		byte_data = (u_int8_t)(word_data & 0xFF);
394#else
395		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
396#endif
397	} else {
398#if BYTE_ORDER == BIG_ENDIAN
399		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
400#else
401		byte_data = (u_int8_t)(word_data & 0xFF);
402#endif
403	}
404	return (byte_data);
405}
406
407void
408adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
409{
410	u_int16_t word_data;
411
412	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
413	if (addr & 1) {
414		word_data &= 0x00FF;
415		word_data |= (((u_int8_t)value << 8) & 0xFF00);
416	} else {
417		word_data &= 0xFF00;
418		word_data |= ((u_int8_t)value & 0x00FF);
419	}
420	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
421}
422
423
424u_int16_t
425adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
426{
427	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
428	return (ADV_INW(adv, ADV_LRAM_DATA));
429}
430
431void
432adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
433{
434	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
435	ADV_OUTW(adv, ADV_LRAM_DATA, value);
436}
437
438/*
439 * Determine if there is a board at "iobase" by looking
440 * for the AdvanSys signatures.  Return 1 if a board is
441 * found, 0 otherwise.
442 */
443int
444adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
445{
446	u_int16_t signature;
447
448	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
449		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
450		if ((signature == ADV_1000_ID0W)
451		 || (signature == ADV_1000_ID0W_FIX))
452			return (1);
453	}
454	return (0);
455}
456
457void
458adv_lib_init(struct adv_softc *adv)
459{
460	if ((adv->type & ADV_ULTRA) != 0) {
461		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
462		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
463	} else {
464		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
465		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
466	}
467}
468
469u_int16_t
470adv_get_eeprom_config(struct adv_softc *adv, struct
471		      adv_eeprom_config  *eeprom_config)
472{
473	u_int16_t	sum;
474	u_int16_t	*wbuf;
475	u_int8_t	cfg_beg;
476	u_int8_t	cfg_end;
477	u_int8_t	s_addr;
478
479	wbuf = (u_int16_t *)eeprom_config;
480	sum = 0;
481
482	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
483		*wbuf = adv_read_eeprom_16(adv, s_addr);
484		sum += *wbuf;
485	}
486
487	if (adv->type & ADV_VL) {
488		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
489		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
490	} else {
491		cfg_beg = ADV_EEPROM_CFG_BEG;
492		cfg_end = ADV_EEPROM_MAX_ADDR;
493	}
494
495	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
496		*wbuf = adv_read_eeprom_16(adv, s_addr);
497		sum += *wbuf;
498#if ADV_DEBUG_EEPROM
499		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
500#endif
501	}
502	*wbuf = adv_read_eeprom_16(adv, s_addr);
503	return (sum);
504}
505
506int
507adv_set_eeprom_config(struct adv_softc *adv,
508		      struct adv_eeprom_config *eeprom_config)
509{
510	int	retry;
511
512	retry = 0;
513	while (1) {
514		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
515			break;
516		}
517		if (++retry > ADV_EEPROM_MAX_RETRY) {
518			break;
519		}
520	}
521	return (retry > ADV_EEPROM_MAX_RETRY);
522}
523
524int
525adv_reset_chip_and_scsi_bus(struct adv_softc *adv)
526{
527	adv_stop_chip(adv);
528	ADV_OUTB(adv, ADV_CHIP_CTRL,
529		 ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
530	DELAY(200 * 1000);
531
532	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
533	adv_set_chip_ih(adv, ADV_INS_HALT);
534
535	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
536	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
537	DELAY(200 * 1000);
538	return (adv_is_chip_halted(adv));
539}
540
541int
542adv_test_external_lram(struct adv_softc* adv)
543{
544	u_int16_t	q_addr;
545	u_int16_t	saved_value;
546	int		success;
547
548	success = 0;
549
550	q_addr = ADV_QNO_TO_QADDR(241);
551	saved_value = adv_read_lram_16(adv, q_addr);
552	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
553		success = 1;
554		adv_write_lram_16(adv, q_addr, saved_value);
555	}
556	return (success);
557}
558
559
560int
561adv_init_lram_and_mcode(struct adv_softc *adv)
562{
563	u_int32_t	retval;
564
565	adv_disable_interrupt(adv);
566
567	adv_init_lram(adv);
568
569	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
570				    adv_mcode_size);
571	if (retval != adv_mcode_chksum) {
572		printf("adv%d: Microcode download failed checksum!\n",
573		       adv->unit);
574		return (1);
575	}
576
577	if (adv_init_microcode_var(adv) != 0)
578		return (1);
579
580	adv_enable_interrupt(adv);
581	return (0);
582}
583
584u_int8_t
585adv_get_chip_irq(struct adv_softc *adv)
586{
587	u_int16_t	cfg_lsw;
588	u_int8_t	chip_irq;
589
590	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
591
592	if ((adv->type & ADV_VL) != 0) {
593		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
594		if ((chip_irq == 0) ||
595		    (chip_irq == 4) ||
596		    (chip_irq == 7)) {
597			return (0);
598		}
599		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
600	}
601	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
602	if (chip_irq == 3)
603		chip_irq += 2;
604	return (chip_irq + ADV_MIN_IRQ_NO);
605}
606
607u_int8_t
608adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
609{
610	u_int16_t	cfg_lsw;
611
612	if ((adv->type & ADV_VL) != 0) {
613		if (irq_no != 0) {
614			if ((irq_no < ADV_MIN_IRQ_NO)
615			 || (irq_no > ADV_MAX_IRQ_NO)) {
616				irq_no = 0;
617			} else {
618				irq_no -= ADV_MIN_IRQ_NO - 1;
619			}
620		}
621		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
622		cfg_lsw |= 0x0010;
623		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
624		adv_toggle_irq_act(adv);
625
626		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
627		cfg_lsw |= (irq_no & 0x07) << 2;
628		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
629		adv_toggle_irq_act(adv);
630	} else if ((adv->type & ADV_ISA) != 0) {
631		if (irq_no == 15)
632			irq_no -= 2;
633		irq_no -= ADV_MIN_IRQ_NO;
634		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
635		cfg_lsw |= (irq_no & 0x03) << 2;
636		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
637	}
638	return (adv_get_chip_irq(adv));
639}
640
641void
642adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
643{
644	u_int16_t cfg_lsw;
645
646	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
647	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
648		return;
649    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
650	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
651	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
652}
653
654int
655adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
656		       u_int32_t datalen)
657{
658	struct		adv_target_transinfo* tinfo;
659	u_int32_t	*p_data_addr;
660	u_int32_t	*p_data_bcount;
661	int		disable_syn_offset_one_fix;
662	int		retval;
663	u_int		n_q_required;
664	u_int32_t	addr;
665	u_int8_t	sg_entry_cnt;
666	u_int8_t	target_ix;
667	u_int8_t	sg_entry_cnt_minus_one;
668	u_int8_t	tid_no;
669
670	scsiq->q1.q_no = 0;
671	retval = 1;  /* Default to error case */
672	target_ix = scsiq->q2.target_ix;
673	tid_no = ADV_TIX_TO_TID(target_ix);
674	tinfo = &adv->tinfo[tid_no];
675
676	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
677		/* Renegotiate if appropriate. */
678		adv_set_syncrate(adv, /*struct cam_path */NULL,
679				 tid_no, /*period*/0, /*offset*/0,
680				 ADV_TRANS_CUR);
681		if (tinfo->current.period != tinfo->goal.period) {
682			adv_msgout_sdtr(adv, tinfo->goal.period,
683					tinfo->goal.offset);
684			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
685		}
686	}
687
688	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
689		sg_entry_cnt = scsiq->sg_head->entry_cnt;
690		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
691
692#ifdef DIAGNOSTIC
693		if (sg_entry_cnt <= 1)
694			panic("adv_execute_scsi_queue: Queue "
695			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
696
697		if (sg_entry_cnt > ADV_MAX_SG_LIST)
698			panic("adv_execute_scsi_queue: "
699			      "Queue with too many segs.");
700
701		if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
702			int i;
703
704			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
705				addr = scsiq->sg_head->sg_list[i].addr +
706				       scsiq->sg_head->sg_list[i].bytes;
707
708				if ((addr & 0x0003) != 0)
709					panic("adv_execute_scsi_queue: SG "
710					      "with odd address or byte count");
711			}
712		}
713#endif
714		p_data_addr =
715		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
716		p_data_bcount =
717		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
718
719		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
720		scsiq->sg_head->queue_cnt = n_q_required - 1;
721	} else {
722		p_data_addr = &scsiq->q1.data_addr;
723		p_data_bcount = &scsiq->q1.data_cnt;
724		n_q_required = 1;
725	}
726
727	disable_syn_offset_one_fix = FALSE;
728
729	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
730	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
731
732		if (datalen != 0) {
733			if (datalen < 512) {
734				disable_syn_offset_one_fix = TRUE;
735			} else {
736				if (scsiq->cdbptr[0] == INQUIRY
737				 || scsiq->cdbptr[0] == REQUEST_SENSE
738				 || scsiq->cdbptr[0] == READ_CAPACITY
739				 || scsiq->cdbptr[0] == MODE_SELECT_6
740				 || scsiq->cdbptr[0] == MODE_SENSE_6
741				 || scsiq->cdbptr[0] == MODE_SENSE_10
742				 || scsiq->cdbptr[0] == MODE_SELECT_10
743				 || scsiq->cdbptr[0] == READ_TOC) {
744					disable_syn_offset_one_fix = TRUE;
745				}
746			}
747		}
748	}
749
750	if (disable_syn_offset_one_fix) {
751		scsiq->q2.tag_code &=
752		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
753		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
754				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
755	}
756
757	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
758	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
759		u_int8_t extra_bytes;
760
761		addr = *p_data_addr + *p_data_bcount;
762		extra_bytes = addr & 0x0003;
763		if (extra_bytes != 0
764		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
765		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
766			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
767			scsiq->q1.extra_bytes = extra_bytes;
768			*p_data_bcount -= extra_bytes;
769		}
770	}
771
772	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
773	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
774		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
775
776	return (retval);
777}
778
779
780u_int8_t
781adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
782		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
783{
784	u_int16_t val;
785	u_int8_t  sg_queue_cnt;
786
787	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
788		       (u_int16_t *)scsiq,
789		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
790
791#if BYTE_ORDER == BIG_ENDIAN
792	adv_adj_endian_qdone_info(scsiq);
793#endif
794
795	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
796	scsiq->q_status = val & 0xFF;
797	scsiq->q_no = (val >> 8) & 0XFF;
798
799	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
800	scsiq->cntl = val & 0xFF;
801	sg_queue_cnt = (val >> 8) & 0xFF;
802
803	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
804	scsiq->sense_len = val & 0xFF;
805	scsiq->extra_bytes = (val >> 8) & 0xFF;
806
807	scsiq->remain_bytes =
808	    adv_read_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
809	/*
810	 * XXX Is this just a safeguard or will the counter really
811	 * have bogus upper bits?
812	 */
813	scsiq->remain_bytes &= max_dma_count;
814
815	return (sg_queue_cnt);
816}
817
818int
819adv_start_chip(struct adv_softc *adv)
820{
821	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
822	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
823		return (0);
824	return (1);
825}
826
827int
828adv_stop_execution(struct adv_softc *adv)
829{
830	int count;
831
832	count = 0;
833	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
834		adv_write_lram_8(adv, ADV_STOP_CODE_B,
835				 ADV_STOP_REQ_RISC_STOP);
836		do {
837			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
838				ADV_STOP_ACK_RISC_STOP) {
839				return (1);
840			}
841			DELAY(1000);
842		} while (count++ < 20);
843	}
844	return (0);
845}
846
847int
848adv_is_chip_halted(struct adv_softc *adv)
849{
850	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
851		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
852			return (1);
853		}
854	}
855	return (0);
856}
857
858/*
859 * XXX The numeric constants and the loops in this routine
860 * need to be documented.
861 */
862void
863adv_ack_interrupt(struct adv_softc *adv)
864{
865	u_int8_t	host_flag;
866	u_int8_t	risc_flag;
867	int		loop;
868
869	loop = 0;
870	do {
871		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
872		if (loop++ > 0x7FFF) {
873			break;
874		}
875	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
876
877	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
878	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
879			 host_flag | ADV_HOST_FLAG_ACK_INT);
880
881	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
882	loop = 0;
883	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
884		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
885		if (loop++ > 3) {
886			break;
887		}
888	}
889
890	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
891}
892
893/*
894 * Handle all conditions that may halt the chip waiting
895 * for us to intervene.
896 */
897void
898adv_isr_chip_halted(struct adv_softc *adv)
899{
900	u_int16_t	  int_halt_code;
901	u_int16_t	  halt_q_addr;
902	target_bit_vector target_mask;
903	target_bit_vector scsi_busy;
904	u_int8_t	  halt_qp;
905	u_int8_t	  target_ix;
906	u_int8_t	  q_cntl;
907	u_int8_t	  tid_no;
908
909	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
910	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
911	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
912	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
913	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
914	tid_no = ADV_TIX_TO_TID(target_ix);
915	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
916	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
917		/*
918		 * Temporarily disable the async fix by removing
919		 * this target from the list of affected targets,
920		 * setting our async rate, and then putting us
921		 * back into the mask.
922		 */
923		adv->fix_asyn_xfer &= ~target_mask;
924		adv_set_syncrate(adv, /*struct cam_path */NULL,
925				 tid_no, /*period*/0, /*offset*/0,
926				 ADV_TRANS_ACTIVE);
927		adv->fix_asyn_xfer |= target_mask;
928	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
929		adv_set_syncrate(adv, /*struct cam_path */NULL,
930				 tid_no, /*period*/0, /*offset*/0,
931				 ADV_TRANS_ACTIVE);
932	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
933		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
934				     target_mask, tid_no);
935	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
936		struct	 adv_target_transinfo* tinfo;
937		union	 ccb *ccb;
938		u_int8_t tag_code;
939		u_int8_t q_status;
940
941		tinfo = &adv->tinfo[tid_no];
942		q_cntl |= QC_REQ_SENSE;
943
944		/* Renegotiate if appropriate. */
945		adv_set_syncrate(adv, /*struct cam_path */NULL,
946				 tid_no, /*period*/0, /*offset*/0,
947				 ADV_TRANS_CUR);
948		if (tinfo->current.period != tinfo->goal.period) {
949			adv_msgout_sdtr(adv, tinfo->goal.period,
950					tinfo->goal.offset);
951			q_cntl |= QC_MSG_OUT;
952		}
953		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
954
955		/* Don't tag request sense commands */
956		tag_code = adv_read_lram_8(adv,
957					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
958		tag_code &=
959		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
960
961		if ((adv->fix_asyn_xfer & target_mask) != 0
962		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
963			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
964				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
965		}
966		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
967				 tag_code);
968		q_status = adv_read_lram_8(adv,
969					   halt_q_addr + ADV_SCSIQ_B_STATUS);
970		q_status |= (QS_READY | QS_BUSY);
971		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
972				 q_status);
973		/*
974		 * Freeze the devq until we can handle the sense condition.
975		 */
976		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
977							 + ADV_SCSIQ_D_CCBPTR);
978		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
979		ccb->ccb_h.status |= CAM_DEV_QFRZN;
980		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
981			      /*ccb*/NULL, CAM_REQUEUE_REQ,
982			      /*queued_only*/TRUE);
983		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
984		scsi_busy &= ~target_mask;
985		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
986	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
987		struct	ext_msg out_msg;
988
989		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
990				       (u_int16_t *) &out_msg,
991				       sizeof(out_msg)/2);
992
993		if ((out_msg.msg_type == MSG_EXTENDED)
994		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
995		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
996
997			/* Revert to Async */
998			adv_set_syncrate(adv, /*struct cam_path */NULL,
999					 tid_no, /*period*/0, /*offset*/0,
1000					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1001		}
1002		q_cntl &= ~QC_MSG_OUT;
1003		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1004	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1005		u_int8_t scsi_status;
1006		union ccb *ccb;
1007
1008		scsi_status = adv_read_lram_8(adv, halt_q_addr
1009					      + ADV_SCSIQ_SCSI_STATUS);
1010		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1011						     + ADV_SCSIQ_D_CCBPTR);
1012		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1013		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1014		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1015			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1016			      /*queued_only*/TRUE);
1017		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1018		scsi_busy &= ~target_mask;
1019		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1020	}
1021	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1022}
1023
1024void
1025adv_sdtr_to_period_offset(struct adv_softc *adv,
1026			  u_int8_t sync_data, u_int8_t *period,
1027			  u_int8_t *offset, int tid)
1028{
1029	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1030	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1031		*period = *offset = 0;
1032	} else {
1033		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1034		*offset = sync_data & 0xF;
1035	}
1036}
1037
1038void
1039adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1040		 u_int tid, u_int period, u_int offset, u_int type)
1041{
1042	struct adv_target_transinfo* tinfo;
1043	u_int old_period;
1044	u_int old_offset;
1045	u_int8_t sdtr_data;
1046
1047	tinfo = &adv->tinfo[tid];
1048
1049	/* Filter our input */
1050	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1051					      &offset, tid);
1052
1053	old_period = tinfo->current.period;
1054	old_offset = tinfo->current.offset;
1055
1056	if ((type & ADV_TRANS_CUR) != 0
1057	 && ((old_period != period || old_offset != offset)
1058	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1059		int s;
1060		int halted;
1061
1062		s = splcam();
1063		halted = adv_is_chip_halted(adv);
1064		if (halted == 0)
1065			/* Must halt the chip first */
1066			adv_host_req_chip_halt(adv);
1067
1068		/* Update current hardware settings */
1069		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1070
1071		/*
1072		 * If a target can run in sync mode, we don't need
1073		 * to check it for sync problems.
1074		 */
1075		if (offset != 0)
1076			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1077
1078		if (halted == 0)
1079			/* Start the chip again */
1080			adv_start_chip(adv);
1081
1082		splx(s);
1083		tinfo->current.period = period;
1084		tinfo->current.offset = offset;
1085
1086		if (path != NULL) {
1087			/*
1088			 * Tell the SCSI layer about the
1089			 * new transfer parameters.
1090			 */
1091			struct	ccb_trans_settings neg;
1092
1093			neg.sync_period = period;
1094			neg.sync_offset = offset;
1095			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1096				  | CCB_TRANS_SYNC_OFFSET_VALID;
1097			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1098			xpt_async(AC_TRANSFER_NEG, path, &neg);
1099		}
1100	}
1101
1102	if ((type & ADV_TRANS_GOAL) != 0) {
1103		tinfo->goal.period = period;
1104		tinfo->goal.offset = offset;
1105	}
1106
1107	if ((type & ADV_TRANS_USER) != 0) {
1108		tinfo->user.period = period;
1109		tinfo->user.offset = offset;
1110	}
1111}
1112
1113u_int8_t
1114adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1115			  u_int *offset, int tid)
1116{
1117	u_int i;
1118	u_int dummy_offset;
1119	u_int dummy_period;
1120
1121	if (offset == NULL) {
1122		dummy_offset = 0;
1123		offset = &dummy_offset;
1124	}
1125
1126	if (period == NULL) {
1127		dummy_period = 0;
1128		period = &dummy_period;
1129	}
1130
1131#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1132
1133	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1134	if (*period != 0 && *offset != 0) {
1135		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1136			if (*period <= adv->sdtr_period_tbl[i]) {
1137				/*
1138				 * When responding to a target that requests
1139				 * sync, the requested  rate may fall between
1140				 * two rates that we can output, but still be
1141				 * a rate that we can receive.  Because of this,
1142				 * we want to respond to the target with
1143				 * the same rate that it sent to us even
1144				 * if the period we use to send data to it
1145				 * is lower.  Only lower the response period
1146				 * if we must.
1147				 */
1148				if (i == 0 /* Our maximum rate */)
1149					*period = adv->sdtr_period_tbl[0];
1150				return ((i << 4) | *offset);
1151			}
1152		}
1153	}
1154
1155	/* Must go async */
1156	*period = 0;
1157	*offset = 0;
1158	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1159		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1160	return (0);
1161}
1162
1163/* Internal Routines */
1164
1165static void
1166adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1167		       u_int16_t *buffer, int count)
1168{
1169	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1170	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1171}
1172
1173static void
1174adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1175			u_int16_t *buffer, int count)
1176{
1177	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1178	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1179}
1180
1181static void
1182adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1183		 u_int16_t set_value, int count)
1184{
1185	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1186	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1187			      set_value, count);
1188}
1189
1190static u_int32_t
1191adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1192{
1193	u_int32_t	sum;
1194	int		i;
1195
1196	sum = 0;
1197	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1198	for (i = 0; i < count; i++)
1199		sum += ADV_INW(adv, ADV_LRAM_DATA);
1200	return (sum);
1201}
1202
1203static int
1204adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1205			     u_int16_t value)
1206{
1207	int	retval;
1208
1209	retval = 0;
1210	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1211	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1212	DELAY(10000);
1213	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1214	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1215		retval = 1;
1216	return (retval);
1217}
1218
1219static u_int32_t
1220adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1221{
1222	u_int16_t           val_low, val_high;
1223
1224	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1225
1226#if BYTE_ORDER == BIG_ENDIAN
1227	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1228	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1229#else
1230	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1231	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1232#endif
1233
1234	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1235}
1236
1237static void
1238adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1239{
1240	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1241
1242#if BYTE_ORDER == BIG_ENDIAN
1243	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1244	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1245#else
1246	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1247	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1248#endif
1249}
1250
1251static void
1252adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1253			u_int32_t *buffer, int count)
1254{
1255	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1256	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1257}
1258
1259static u_int16_t
1260adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1261{
1262	u_int16_t read_wval;
1263	u_int8_t  cmd_reg;
1264
1265	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1266	DELAY(1000);
1267	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1268	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1269	DELAY(1000);
1270	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1271	DELAY(1000);
1272	return (read_wval);
1273}
1274
1275static u_int16_t
1276adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1277{
1278	u_int16_t	read_value;
1279
1280	read_value = adv_read_eeprom_16(adv, addr);
1281	if (read_value != value) {
1282		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1283		DELAY(1000);
1284
1285		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1286		DELAY(1000);
1287
1288		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1289		DELAY(20 * 1000);
1290
1291		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1292		DELAY(1000);
1293		read_value = adv_read_eeprom_16(adv, addr);
1294	}
1295	return (read_value);
1296}
1297
1298static int
1299adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1300{
1301	u_int8_t read_back;
1302	int	 retry;
1303
1304	retry = 0;
1305	while (1) {
1306		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1307		DELAY(1000);
1308		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1309		if (read_back == cmd_reg) {
1310			return (1);
1311		}
1312		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1313			return (0);
1314		}
1315	}
1316}
1317
1318static int
1319adv_set_eeprom_config_once(struct adv_softc *adv,
1320			   struct adv_eeprom_config *eeprom_config)
1321{
1322	int		n_error;
1323	u_int16_t	*wbuf;
1324	u_int16_t	sum;
1325	u_int8_t	s_addr;
1326	u_int8_t	cfg_beg;
1327	u_int8_t	cfg_end;
1328
1329	wbuf = (u_int16_t *)eeprom_config;
1330	n_error = 0;
1331	sum = 0;
1332	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1333		sum += *wbuf;
1334		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1335			n_error++;
1336		}
1337	}
1338	if (adv->type & ADV_VL) {
1339		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1340		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1341	} else {
1342		cfg_beg = ADV_EEPROM_CFG_BEG;
1343		cfg_end = ADV_EEPROM_MAX_ADDR;
1344	}
1345
1346	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1347		sum += *wbuf;
1348		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1349			n_error++;
1350		}
1351	}
1352	*wbuf = sum;
1353	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1354		n_error++;
1355	}
1356	wbuf = (u_int16_t *)eeprom_config;
1357	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1358		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1359			n_error++;
1360		}
1361	}
1362	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1363		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1364			n_error++;
1365		}
1366	}
1367	return (n_error);
1368}
1369
1370static u_int32_t
1371adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1372		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1373{
1374	u_int32_t chksum;
1375	u_int16_t mcode_lram_size;
1376	u_int16_t mcode_chksum;
1377
1378	mcode_lram_size = mcode_size >> 1;
1379	/* XXX Why zero the memory just before you write the whole thing?? */
1380	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1381	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1382
1383	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1384	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1385						   ((mcode_size - s_addr
1386						     - ADV_CODE_SEC_BEG) >> 1));
1387	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1388	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1389	return (chksum);
1390}
1391
1392static void
1393adv_reinit_lram(struct adv_softc *adv) {
1394	adv_init_lram(adv);
1395	adv_init_qlink_var(adv);
1396}
1397
1398static void
1399adv_init_lram(struct adv_softc *adv)
1400{
1401	u_int8_t  i;
1402	u_int16_t s_addr;
1403
1404	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1405			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1406
1407	i = ADV_MIN_ACTIVE_QNO;
1408	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1409
1410	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1411	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1412	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1413	i++;
1414	s_addr += ADV_QBLK_SIZE;
1415	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1416		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1417		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1418		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1419	}
1420
1421	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1422	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1423	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1424	i++;
1425	s_addr += ADV_QBLK_SIZE;
1426
1427	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1428		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1429		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1430		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1431	}
1432}
1433
1434static int
1435adv_init_microcode_var(struct adv_softc *adv)
1436{
1437	int	 i;
1438
1439	for (i = 0; i <= ADV_MAX_TID; i++) {
1440
1441		/* Start out async all around */
1442		adv_set_syncrate(adv, /*path*/NULL,
1443				 i, 0, 0,
1444				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1445	}
1446
1447	adv_init_qlink_var(adv);
1448
1449	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1450	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1451
1452	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1453
1454	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1455
1456	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1457	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1458		printf("adv%d: Unable to set program counter. Aborting.\n",
1459		       adv->unit);
1460		return (1);
1461	}
1462	return (0);
1463}
1464
1465static void
1466adv_init_qlink_var(struct adv_softc *adv)
1467{
1468	int	  i;
1469	u_int16_t lram_addr;
1470
1471	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1472	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1473
1474	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1475	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1476
1477	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1478			 (u_int8_t)((int) adv->max_openings + 1));
1479	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1480			 (u_int8_t)((int) adv->max_openings + 2));
1481
1482	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1483
1484	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1485	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1486	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1487	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1488	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1489	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1490
1491	lram_addr = ADV_QADR_BEG;
1492	for (i = 0; i < 32; i++, lram_addr += 2)
1493		adv_write_lram_16(adv, lram_addr, 0);
1494}
1495
1496static void
1497adv_disable_interrupt(struct adv_softc *adv)
1498{
1499	u_int16_t cfg;
1500
1501	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1502	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1503}
1504
1505static void
1506adv_enable_interrupt(struct adv_softc *adv)
1507{
1508	u_int16_t cfg;
1509
1510	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1511	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1512}
1513
1514static void
1515adv_toggle_irq_act(struct adv_softc *adv)
1516{
1517	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1518	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1519}
1520
1521void
1522adv_start_execution(struct adv_softc *adv)
1523{
1524	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1525		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1526	}
1527}
1528
1529static int
1530adv_stop_chip(struct adv_softc *adv)
1531{
1532	u_int8_t cc_val;
1533
1534	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1535		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1536	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1537	adv_set_chip_ih(adv, ADV_INS_HALT);
1538	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1539	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1540		return (0);
1541	}
1542	return (1);
1543}
1544
1545static int
1546adv_host_req_chip_halt(struct adv_softc *adv)
1547{
1548	int	 count;
1549	u_int8_t saved_stop_code;
1550
1551	if (adv_is_chip_halted(adv))
1552		return (1);
1553
1554	count = 0;
1555	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1556	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1557			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1558	while (adv_is_chip_halted(adv) == 0
1559	    && count++ < 2000)
1560		;
1561
1562	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1563	return (count < 2000);
1564}
1565
1566static void
1567adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1568{
1569	adv_set_bank(adv, 1);
1570	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1571	adv_set_bank(adv, 0);
1572}
1573
1574#if UNUSED
1575static u_int8_t
1576adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1577{
1578	u_int8_t scsi_ctrl;
1579
1580	adv_set_bank(adv, 1);
1581	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1582	adv_set_bank(adv, 0);
1583	return (scsi_ctrl);
1584}
1585#endif
1586
1587static int
1588adv_sgcount_to_qcount(int sgcount)
1589{
1590	int	n_sg_list_qs;
1591
1592	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
1593	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
1594		n_sg_list_qs++;
1595	return (n_sg_list_qs + 1);
1596}
1597
1598/*
1599 * XXX Looks like more padding issues in this routine as well.
1600 *     There has to be a way to turn this into an insw.
1601 */
1602static void
1603adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1604	       u_int16_t *inbuf, int words)
1605{
1606	int	i;
1607
1608	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1609	for (i = 0; i < words; i++, inbuf++) {
1610		if (i == 5) {
1611			continue;
1612		}
1613		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1614	}
1615}
1616
1617static u_int
1618adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1619{
1620	u_int	  cur_used_qs;
1621	u_int	  cur_free_qs;
1622
1623	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1624
1625	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1626		cur_free_qs = adv->max_openings - cur_used_qs;
1627		return (cur_free_qs);
1628	}
1629	adv->openings_needed = n_qs;
1630	return (0);
1631}
1632
1633static u_int8_t
1634adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1635		      u_int8_t n_free_q)
1636{
1637	int i;
1638
1639	for (i = 0; i < n_free_q; i++) {
1640		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1641		if (free_q_head == ADV_QLINK_END)
1642			break;
1643	}
1644	return (free_q_head);
1645}
1646
1647static u_int8_t
1648adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1649{
1650	u_int16_t	q_addr;
1651	u_int8_t	next_qp;
1652	u_int8_t	q_status;
1653
1654	next_qp = ADV_QLINK_END;
1655	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1656	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1657
1658	if ((q_status & QS_READY) == 0)
1659		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1660
1661	return (next_qp);
1662}
1663
1664static int
1665adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1666		    u_int8_t n_q_required)
1667{
1668	u_int8_t	free_q_head;
1669	u_int8_t	next_qp;
1670	u_int8_t	tid_no;
1671	u_int8_t	target_ix;
1672	int		retval;
1673
1674	retval = 1;
1675	target_ix = scsiq->q2.target_ix;
1676	tid_no = ADV_TIX_TO_TID(target_ix);
1677	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1678	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1679	    != ADV_QLINK_END) {
1680		scsiq->q1.q_no = free_q_head;
1681
1682		/*
1683		 * Now that we know our Q number, point our sense
1684		 * buffer pointer to a bus dma mapped area where
1685		 * we can dma the data to.
1686		 */
1687		scsiq->q1.sense_addr = adv->sense_physbase
1688		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1689		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1690		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1691		adv->cur_active += n_q_required;
1692		retval = 0;
1693	}
1694	return (retval);
1695}
1696
1697
1698static void
1699adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1700			    u_int q_no)
1701{
1702	u_int8_t	sg_list_dwords;
1703	u_int8_t	sg_index, i;
1704	u_int8_t	sg_entry_cnt;
1705	u_int8_t	next_qp;
1706	u_int16_t	q_addr;
1707	struct		adv_sg_head *sg_head;
1708	struct		adv_sg_list_q scsi_sg_q;
1709
1710	sg_head = scsiq->sg_head;
1711
1712	if (sg_head) {
1713		sg_entry_cnt = sg_head->entry_cnt - 1;
1714#ifdef DIAGNOSTIC
1715		if (sg_entry_cnt == 0)
1716			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1717			      "a SG list but only one element");
1718		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1719			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1720			      "a SG list but QC_SG_HEAD not set");
1721#endif
1722		q_addr = ADV_QNO_TO_QADDR(q_no);
1723		sg_index = 1;
1724		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1725		scsi_sg_q.sg_head_qp = q_no;
1726		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1727		for (i = 0; i < sg_head->queue_cnt; i++) {
1728			u_int8_t segs_this_q;
1729
1730			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1731				segs_this_q = ADV_SG_LIST_PER_Q;
1732			else {
1733				/* This will be the last segment then */
1734				segs_this_q = sg_entry_cnt;
1735				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1736			}
1737			scsi_sg_q.seq_no = i + 1;
1738			sg_list_dwords = segs_this_q << 1;
1739			if (i == 0) {
1740				scsi_sg_q.sg_list_cnt = segs_this_q;
1741				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1742			} else {
1743				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1744				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1745			}
1746			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1747			scsi_sg_q.q_no = next_qp;
1748			q_addr = ADV_QNO_TO_QADDR(next_qp);
1749
1750			adv_write_lram_16_multi(adv,
1751						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1752						(u_int16_t *)&scsi_sg_q,
1753						sizeof(scsi_sg_q) >> 1);
1754			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1755						(u_int32_t *)&sg_head->sg_list[sg_index],
1756						sg_list_dwords);
1757			sg_entry_cnt -= segs_this_q;
1758			sg_index += ADV_SG_LIST_PER_Q;
1759		}
1760	}
1761	adv_put_ready_queue(adv, scsiq, q_no);
1762}
1763
1764static void
1765adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1766		    u_int q_no)
1767{
1768	struct		adv_target_transinfo* tinfo;
1769	u_int		q_addr;
1770	u_int		tid_no;
1771
1772	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1773	tinfo = &adv->tinfo[tid_no];
1774	if (tinfo->current.period != tinfo->goal.period) {
1775
1776		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1777		scsiq->q1.cntl |= QC_MSG_OUT;
1778	}
1779	q_addr = ADV_QNO_TO_QADDR(q_no);
1780
1781	scsiq->q1.status = QS_FREE;
1782
1783	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1784				(u_int16_t *)scsiq->cdbptr,
1785				scsiq->q2.cdb_len >> 1);
1786
1787#if BYTE_ORDER == BIG_ENDIAN
1788	adv_adj_scsiq_endian(scsiq);
1789#endif
1790
1791	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1792		      (u_int16_t *) &scsiq->q1.cntl,
1793		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1794
1795#if CC_WRITE_IO_COUNT
1796	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1797			  adv->req_count);
1798#endif
1799
1800#if CC_CLEAR_DMA_REMAIN
1801
1802	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1803	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1804#endif
1805
1806	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1807			  (scsiq->q1.q_no << 8) | QS_READY);
1808}
1809
1810static void
1811adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1812	      u_int16_t *buffer, int words)
1813{
1814	int	i;
1815
1816	/*
1817	 * XXX This routine makes *gross* assumptions
1818	 * about padding in the data structures.
1819	 * Either the data structures should have explicit
1820	 * padding members added, or they should have padding
1821	 * turned off via compiler attributes depending on
1822	 * which yields better overall performance.  My hunch
1823	 * would be that turning off padding would be the
1824	 * faster approach as an outsw is much faster than
1825	 * this crude loop and accessing un-aligned data
1826	 * members isn't *that* expensive.  The other choice
1827	 * would be to modify the ASC script so that the
1828	 * the adv_scsiq_1 structure can be re-arranged so
1829	 * padding isn't required.
1830	 */
1831	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1832	for (i = 0; i < words; i++, buffer++) {
1833		if (i == 2 || i == 10) {
1834			continue;
1835		}
1836		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1837	}
1838}
1839
1840static void
1841adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1842		     u_int8_t q_cntl, target_bit_vector target_mask,
1843		     int tid_no)
1844{
1845	struct	ext_msg ext_msg;
1846
1847	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1848			       sizeof(ext_msg) >> 1);
1849	if ((ext_msg.msg_type == MSG_EXTENDED)
1850	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1851	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1852		union	 ccb *ccb;
1853		struct	 adv_target_transinfo* tinfo;
1854		u_int	 period;
1855		u_int	 offset;
1856		int	 sdtr_accept;
1857		u_int8_t orig_offset;
1858
1859		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1860							 + ADV_SCSIQ_D_CCBPTR);
1861		tinfo = &adv->tinfo[tid_no];
1862		sdtr_accept = TRUE;
1863
1864		orig_offset = ext_msg.req_ack_offset;
1865		if (ext_msg.xfer_period < tinfo->goal.period) {
1866                	sdtr_accept = FALSE;
1867			ext_msg.xfer_period = tinfo->goal.period;
1868		}
1869
1870		/* Perform range checking */
1871		period = ext_msg.xfer_period;
1872		offset = ext_msg.req_ack_offset;
1873		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1874		ext_msg.xfer_period = period;
1875		ext_msg.req_ack_offset = offset;
1876
1877		/* Record our current sync settings */
1878		adv_set_syncrate(adv, ccb->ccb_h.path,
1879				 tid_no, ext_msg.xfer_period,
1880				 ext_msg.req_ack_offset,
1881				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1882
1883		/* Offset too high or large period forced async */
1884		if (orig_offset != ext_msg.req_ack_offset)
1885			sdtr_accept = FALSE;
1886
1887		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1888			/* Valid response to our requested negotiation */
1889			q_cntl &= ~QC_MSG_OUT;
1890		} else {
1891			/* Must Respond */
1892			q_cntl |= QC_MSG_OUT;
1893			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1894					ext_msg.req_ack_offset);
1895		}
1896
1897	} else if (ext_msg.msg_type == MSG_EXTENDED
1898		&& ext_msg.msg_req == MSG_EXT_WDTR
1899		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1900
1901		ext_msg.wdtr_width = 0;
1902		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1903					(u_int16_t *)&ext_msg,
1904					sizeof(ext_msg) >> 1);
1905		q_cntl |= QC_MSG_OUT;
1906        } else {
1907
1908		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1909		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1910					(u_int16_t *)&ext_msg,
1911					sizeof(ext_msg) >> 1);
1912		q_cntl |= QC_MSG_OUT;
1913        }
1914	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1915}
1916
1917static void
1918adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1919		u_int8_t sdtr_offset)
1920{
1921	struct	 ext_msg sdtr_buf;
1922
1923	sdtr_buf.msg_type = MSG_EXTENDED;
1924	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1925	sdtr_buf.msg_req = MSG_EXT_SDTR;
1926	sdtr_buf.xfer_period = sdtr_period;
1927	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1928	sdtr_buf.req_ack_offset = sdtr_offset;
1929	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1930				(u_int16_t *) &sdtr_buf,
1931				sizeof(sdtr_buf) / 2);
1932}
1933
1934int
1935adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1936	      u_int32_t status, int queued_only)
1937{
1938	u_int16_t q_addr;
1939	u_int8_t  q_no;
1940	struct adv_q_done_info scsiq_buf;
1941	struct adv_q_done_info *scsiq;
1942	u_int8_t  target_ix;
1943	int	  count;
1944
1945	scsiq = &scsiq_buf;
1946	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1947	count = 0;
1948	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1949		q_addr = ADV_QNO_TO_QADDR(q_no);
1950
1951		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1952		if (((scsiq->q_status & QS_READY) != 0)
1953		 && ((scsiq->q_status & QS_ABORTED) == 0)
1954		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1955		 && (scsiq->d2.target_ix == target_ix)
1956		 && (queued_only == 0
1957		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1958		 && (ccb == NULL || (ccb == (union ccb *)scsiq->d2.ccb_ptr))) {
1959			union ccb *aborted_ccb;
1960			struct adv_ccb_info *cinfo;
1961
1962			scsiq->q_status |= QS_ABORTED;
1963			scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
1964			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1965					 scsiq->q_status);
1966			aborted_ccb = (union ccb *)scsiq->d2.ccb_ptr;
1967			/* Don't clobber earlier error codes */
1968			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1969			  == CAM_REQ_INPROG)
1970				aborted_ccb->ccb_h.status |= status;
1971			cinfo = (struct adv_ccb_info *)
1972			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
1973			cinfo->state |= ACCB_ABORT_QUEUED;
1974			count++;
1975		}
1976	}
1977	return (count);
1978}
1979
1980int
1981adv_reset_bus(struct adv_softc *adv)
1982{
1983	int count;
1984	int i;
1985	union ccb *ccb;
1986
1987	adv_reset_chip_and_scsi_bus(adv);
1988	adv_reinit_lram(adv);
1989	for (i = 0; i <= ADV_MAX_TID; i++) {
1990		if (adv->fix_asyn_xfer & (0x01 << i))
1991			adv_set_sdtr_reg_at_id(adv, i,
1992					       ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1993        }
1994	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1995
1996	/* Tell the XPT layer that a bus reset occured */
1997	if (adv->path != NULL)
1998		xpt_async(AC_BUS_RESET, adv->path, NULL);
1999
2000	count = 0;
2001	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2002		struct	adv_ccb_info *cinfo;
2003
2004		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2005			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2006		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2007		count++;
2008	}
2009
2010	adv_start_chip(adv);
2011	return (count);
2012}
2013
2014static void
2015adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2016{
2017	int orig_id;
2018
2019    	adv_set_bank(adv, 1);
2020    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2021    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2022	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2023		adv_set_bank(adv, 0);
2024		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2025	}
2026    	adv_set_bank(adv, 1);
2027    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2028	adv_set_bank(adv, 0);
2029}
2030