advlib.c revision 111409
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: head/sys/dev/advansys/advlib.c 111409 2003-02-24 04:44:53Z obrien $
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/kernel.h>
48#include <sys/systm.h>
49
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <sys/bus.h>
54#include <sys/rman.h>
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_sim.h>
59#include <cam/cam_xpt_sim.h>
60
61#include <cam/scsi/scsi_all.h>
62#include <cam/scsi/scsi_message.h>
63#include <cam/scsi/scsi_da.h>
64#include <cam/scsi/scsi_cd.h>
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/pmap.h>
69
70#include <dev/advansys/advansys.h>
71#include <dev/advansys/advmcode.h>
72
73struct adv_quirk_entry {
74	struct scsi_inquiry_pattern inq_pat;
75	u_int8_t quirks;
76#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
77#define ADV_QUIRK_FIX_ASYN_XFER		0x02
78};
79
80static struct adv_quirk_entry adv_quirk_table[] =
81{
82	{
83		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85	},
86	{
87		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88		0
89	},
90	{
91		{
92		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93		  "TANDBERG", " TDC 36", "*"
94		},
95		0
96	},
97	{
98		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99		0
100	},
101	{
102		{
103		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104		  "*", "*", "*"
105		},
106		0
107	},
108	{
109		{
110		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111		  "*", "*", "*"
112		},
113		0
114	},
115	{
116		/* Default quirk entry */
117		{
118		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
120                },
121                ADV_QUIRK_FIX_ASYN_XFER,
122	}
123};
124
125/*
126 * Allowable periods in ns
127 */
128static u_int8_t adv_sdtr_period_tbl[] =
129{
130	25,
131	30,
132	35,
133	40,
134	50,
135	60,
136	70,
137	85
138};
139
140static u_int8_t adv_sdtr_period_tbl_ultra[] =
141{
142	12,
143	19,
144	25,
145	32,
146	38,
147	44,
148	50,
149	57,
150	63,
151	69,
152	75,
153	82,
154	88,
155	94,
156	100,
157	107
158};
159
160struct ext_msg {
161	u_int8_t msg_type;
162	u_int8_t msg_len;
163	u_int8_t msg_req;
164	union {
165		struct {
166			u_int8_t sdtr_xfer_period;
167			u_int8_t sdtr_req_ack_offset;
168		} sdtr;
169		struct {
170       			u_int8_t wdtr_width;
171		} wdtr;
172		struct {
173			u_int8_t mdp[4];
174		} mdp;
175	} u_ext_msg;
176	u_int8_t res;
177};
178
179#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
180#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
181#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
182#define	mdp_b3		u_ext_msg.mdp_b3
183#define	mdp_b2		u_ext_msg.mdp_b2
184#define	mdp_b1		u_ext_msg.mdp_b1
185#define	mdp_b0		u_ext_msg.mdp_b0
186
187/*
188 * Some of the early PCI adapters have problems with
189 * async transfers.  Instead use an offset of 1.
190 */
191#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193/* LRAM routines */
194static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195					u_int16_t *buffer, int count);
196static void	 adv_write_lram_16_multi(struct adv_softc *adv,
197					 u_int16_t s_addr, u_int16_t *buffer,
198					 int count);
199static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200				  u_int16_t set_value, int count);
201static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202				  int count);
203
204static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
205					      u_int16_t addr, u_int16_t value);
206static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210				   u_int32_t value);
211static void	 adv_write_lram_32_multi(struct adv_softc *adv,
212					 u_int16_t s_addr, u_int32_t *buffer,
213					 int count);
214
215/* EEPROM routines */
216static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218				     u_int16_t value);
219static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220					  u_int8_t cmd_reg);
221static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
222					    struct adv_eeprom_config *eeconfig);
223
224/* Initialization */
225static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226				    u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228static void	 adv_reinit_lram(struct adv_softc *adv);
229static void	 adv_init_lram(struct adv_softc *adv);
230static int	 adv_init_microcode_var(struct adv_softc *adv);
231static void	 adv_init_qlink_var(struct adv_softc *adv);
232
233/* Interrupts */
234static void	 adv_disable_interrupt(struct adv_softc *adv);
235static void	 adv_enable_interrupt(struct adv_softc *adv);
236static void	 adv_toggle_irq_act(struct adv_softc *adv);
237
238/* Chip Control */
239static int	 adv_host_req_chip_halt(struct adv_softc *adv);
240static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241#if UNUSED
242static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243#endif
244
245/* Queue handling and execution */
246static __inline int
247		 adv_sgcount_to_qcount(int sgcount);
248
249static __inline int
250adv_sgcount_to_qcount(int sgcount)
251{
252	int	n_sg_list_qs;
253
254	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256		n_sg_list_qs++;
257	return (n_sg_list_qs + 1);
258}
259
260#if BYTE_ORDER == BIG_ENDIAN
261static void	 adv_adj_endian_qdone_info(struct adv_q_done_info *);
262static void	 adv_adj_scsiq_endian(struct adv_scsi_q *);
263#endif
264static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
265				u_int16_t *inbuf, int words);
266static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
267static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
268				       u_int8_t free_q_head, u_int8_t n_free_q);
269static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
270				      u_int8_t free_q_head);
271static int	 adv_send_scsi_queue(struct adv_softc *adv,
272				     struct adv_scsi_q *scsiq,
273				     u_int8_t n_q_required);
274static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
275					     struct adv_scsi_q *scsiq,
276					     u_int q_no);
277static void	 adv_put_ready_queue(struct adv_softc *adv,
278				     struct adv_scsi_q *scsiq, u_int q_no);
279static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
280			       u_int16_t *buffer, int words);
281
282/* Messages */
283static void	 adv_handle_extmsg_in(struct adv_softc *adv,
284				      u_int16_t halt_q_addr, u_int8_t q_cntl,
285				      target_bit_vector target_id,
286				      int tid);
287static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
288				 u_int8_t sdtr_offset);
289static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
290					u_int8_t sdtr_data);
291
292
293/* Exported functions first */
294
295void
296advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
297{
298	struct adv_softc *adv;
299
300	adv = (struct adv_softc *)callback_arg;
301	switch (code) {
302	case AC_FOUND_DEVICE:
303	{
304		struct ccb_getdev *cgd;
305		target_bit_vector target_mask;
306		int num_entries;
307        	caddr_t match;
308		struct adv_quirk_entry *entry;
309		struct adv_target_transinfo* tinfo;
310
311		cgd = (struct ccb_getdev *)arg;
312
313		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
314
315		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
316		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
317				       (caddr_t)adv_quirk_table,
318				       num_entries, sizeof(*adv_quirk_table),
319				       scsi_inquiry_match);
320
321		if (match == NULL)
322			panic("advasync: device didn't match wildcard entry!!");
323
324		entry = (struct adv_quirk_entry *)match;
325
326		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
327			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
328				adv->fix_asyn_xfer_always |= target_mask;
329			else
330				adv->fix_asyn_xfer_always &= ~target_mask;
331			/*
332			 * We start out life with all bits set and clear them
333			 * after we've determined that the fix isn't necessary.
334			 * It may well be that we've already cleared a target
335			 * before the full inquiry session completes, so don't
336			 * gratuitously set a target bit even if it has this
337			 * quirk.  But, if the quirk exonerates a device, clear
338			 * the bit now.
339			 */
340			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
341				adv->fix_asyn_xfer &= ~target_mask;
342		}
343		/*
344		 * Reset our sync settings now that we've determined
345		 * what quirks are in effect for the device.
346		 */
347		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
348		adv_set_syncrate(adv, cgd->ccb_h.path,
349				 cgd->ccb_h.target_id,
350				 tinfo->current.period,
351				 tinfo->current.offset,
352				 ADV_TRANS_CUR);
353		break;
354	}
355	case AC_LOST_DEVICE:
356	{
357		u_int target_mask;
358
359		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
360			target_mask = 0x01 << xpt_path_target_id(path);
361			adv->fix_asyn_xfer |= target_mask;
362		}
363
364		/*
365		 * Revert to async transfers
366		 * for the next device.
367		 */
368		adv_set_syncrate(adv, /*path*/NULL,
369				 xpt_path_target_id(path),
370				 /*period*/0,
371				 /*offset*/0,
372				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
373	}
374	default:
375		break;
376	}
377}
378
379void
380adv_set_bank(struct adv_softc *adv, u_int8_t bank)
381{
382	u_int8_t control;
383
384	/*
385	 * Start out with the bank reset to 0
386	 */
387	control = ADV_INB(adv, ADV_CHIP_CTRL)
388		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
389			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
390			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
391	if (bank == 1) {
392		control |= ADV_CC_BANK_ONE;
393	} else if (bank == 2) {
394		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
395	}
396	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
397}
398
399u_int8_t
400adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
401{
402	u_int8_t   byte_data;
403	u_int16_t  word_data;
404
405	/*
406	 * LRAM is accessed on 16bit boundaries.
407	 */
408	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
409	word_data = ADV_INW(adv, ADV_LRAM_DATA);
410	if (addr & 1) {
411#if BYTE_ORDER == BIG_ENDIAN
412		byte_data = (u_int8_t)(word_data & 0xFF);
413#else
414		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415#endif
416	} else {
417#if BYTE_ORDER == BIG_ENDIAN
418		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
419#else
420		byte_data = (u_int8_t)(word_data & 0xFF);
421#endif
422	}
423	return (byte_data);
424}
425
426void
427adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
428{
429	u_int16_t word_data;
430
431	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
432	if (addr & 1) {
433		word_data &= 0x00FF;
434		word_data |= (((u_int8_t)value << 8) & 0xFF00);
435	} else {
436		word_data &= 0xFF00;
437		word_data |= ((u_int8_t)value & 0x00FF);
438	}
439	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
440}
441
442
443u_int16_t
444adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
445{
446	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
447	return (ADV_INW(adv, ADV_LRAM_DATA));
448}
449
450void
451adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
452{
453	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
454	ADV_OUTW(adv, ADV_LRAM_DATA, value);
455}
456
457/*
458 * Determine if there is a board at "iobase" by looking
459 * for the AdvanSys signatures.  Return 1 if a board is
460 * found, 0 otherwise.
461 */
462int
463adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
464{
465	u_int16_t signature;
466
467	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
468		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
469		if ((signature == ADV_1000_ID0W)
470		 || (signature == ADV_1000_ID0W_FIX))
471			return (1);
472	}
473	return (0);
474}
475
476void
477adv_lib_init(struct adv_softc *adv)
478{
479	if ((adv->type & ADV_ULTRA) != 0) {
480		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
481		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
482	} else {
483		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
484		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
485	}
486}
487
488u_int16_t
489adv_get_eeprom_config(struct adv_softc *adv, struct
490		      adv_eeprom_config  *eeprom_config)
491{
492	u_int16_t	sum;
493	u_int16_t	*wbuf;
494	u_int8_t	cfg_beg;
495	u_int8_t	cfg_end;
496	u_int8_t	s_addr;
497
498	wbuf = (u_int16_t *)eeprom_config;
499	sum = 0;
500
501	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
502		*wbuf = adv_read_eeprom_16(adv, s_addr);
503		sum += *wbuf;
504	}
505
506	if (adv->type & ADV_VL) {
507		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
508		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
509	} else {
510		cfg_beg = ADV_EEPROM_CFG_BEG;
511		cfg_end = ADV_EEPROM_MAX_ADDR;
512	}
513
514	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
515		*wbuf = adv_read_eeprom_16(adv, s_addr);
516		sum += *wbuf;
517#if ADV_DEBUG_EEPROM
518		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
519#endif
520	}
521	*wbuf = adv_read_eeprom_16(adv, s_addr);
522	return (sum);
523}
524
525int
526adv_set_eeprom_config(struct adv_softc *adv,
527		      struct adv_eeprom_config *eeprom_config)
528{
529	int	retry;
530
531	retry = 0;
532	while (1) {
533		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
534			break;
535		}
536		if (++retry > ADV_EEPROM_MAX_RETRY) {
537			break;
538		}
539	}
540	return (retry > ADV_EEPROM_MAX_RETRY);
541}
542
543int
544adv_reset_chip(struct adv_softc *adv, int reset_bus)
545{
546	adv_stop_chip(adv);
547	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
548				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
549	DELAY(60);
550
551	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
552	adv_set_chip_ih(adv, ADV_INS_HALT);
553
554	if (reset_bus)
555		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
556
557	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
558	if (reset_bus)
559		DELAY(200 * 1000);
560
561	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
562	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
563	return (adv_is_chip_halted(adv));
564}
565
566int
567adv_test_external_lram(struct adv_softc* adv)
568{
569	u_int16_t	q_addr;
570	u_int16_t	saved_value;
571	int		success;
572
573	success = 0;
574
575	q_addr = ADV_QNO_TO_QADDR(241);
576	saved_value = adv_read_lram_16(adv, q_addr);
577	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
578		success = 1;
579		adv_write_lram_16(adv, q_addr, saved_value);
580	}
581	return (success);
582}
583
584
585int
586adv_init_lram_and_mcode(struct adv_softc *adv)
587{
588	u_int32_t	retval;
589
590	adv_disable_interrupt(adv);
591
592	adv_init_lram(adv);
593
594	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
595				    adv_mcode_size);
596	if (retval != adv_mcode_chksum) {
597		printf("adv%d: Microcode download failed checksum!\n",
598		       adv->unit);
599		return (1);
600	}
601
602	if (adv_init_microcode_var(adv) != 0)
603		return (1);
604
605	adv_enable_interrupt(adv);
606	return (0);
607}
608
609u_int8_t
610adv_get_chip_irq(struct adv_softc *adv)
611{
612	u_int16_t	cfg_lsw;
613	u_int8_t	chip_irq;
614
615	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
616
617	if ((adv->type & ADV_VL) != 0) {
618		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
619		if ((chip_irq == 0) ||
620		    (chip_irq == 4) ||
621		    (chip_irq == 7)) {
622			return (0);
623		}
624		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
625	}
626	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
627	if (chip_irq == 3)
628		chip_irq += 2;
629	return (chip_irq + ADV_MIN_IRQ_NO);
630}
631
632u_int8_t
633adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
634{
635	u_int16_t	cfg_lsw;
636
637	if ((adv->type & ADV_VL) != 0) {
638		if (irq_no != 0) {
639			if ((irq_no < ADV_MIN_IRQ_NO)
640			 || (irq_no > ADV_MAX_IRQ_NO)) {
641				irq_no = 0;
642			} else {
643				irq_no -= ADV_MIN_IRQ_NO - 1;
644			}
645		}
646		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
647		cfg_lsw |= 0x0010;
648		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
649		adv_toggle_irq_act(adv);
650
651		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
652		cfg_lsw |= (irq_no & 0x07) << 2;
653		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
654		adv_toggle_irq_act(adv);
655	} else if ((adv->type & ADV_ISA) != 0) {
656		if (irq_no == 15)
657			irq_no -= 2;
658		irq_no -= ADV_MIN_IRQ_NO;
659		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
660		cfg_lsw |= (irq_no & 0x03) << 2;
661		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
662	}
663	return (adv_get_chip_irq(adv));
664}
665
666void
667adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
668{
669	u_int16_t cfg_lsw;
670
671	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
672	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
673		return;
674    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
675	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
676	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
677}
678
679int
680adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
681		       u_int32_t datalen)
682{
683	struct		adv_target_transinfo* tinfo;
684	u_int32_t	*p_data_addr;
685	u_int32_t	*p_data_bcount;
686	int		disable_syn_offset_one_fix;
687	int		retval;
688	u_int		n_q_required;
689	u_int32_t	addr;
690	u_int8_t	sg_entry_cnt;
691	u_int8_t	target_ix;
692	u_int8_t	sg_entry_cnt_minus_one;
693	u_int8_t	tid_no;
694
695	scsiq->q1.q_no = 0;
696	retval = 1;  /* Default to error case */
697	target_ix = scsiq->q2.target_ix;
698	tid_no = ADV_TIX_TO_TID(target_ix);
699	tinfo = &adv->tinfo[tid_no];
700
701	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
702		/* Renegotiate if appropriate. */
703		adv_set_syncrate(adv, /*struct cam_path */NULL,
704				 tid_no, /*period*/0, /*offset*/0,
705				 ADV_TRANS_CUR);
706		if (tinfo->current.period != tinfo->goal.period) {
707			adv_msgout_sdtr(adv, tinfo->goal.period,
708					tinfo->goal.offset);
709			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
710		}
711	}
712
713	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
714		sg_entry_cnt = scsiq->sg_head->entry_cnt;
715		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
716
717#ifdef DIAGNOSTIC
718		if (sg_entry_cnt <= 1)
719			panic("adv_execute_scsi_queue: Queue "
720			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
721
722		if (sg_entry_cnt > ADV_MAX_SG_LIST)
723			panic("adv_execute_scsi_queue: "
724			      "Queue with too many segs.");
725
726		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
727			int i;
728
729			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
730				addr = scsiq->sg_head->sg_list[i].addr +
731				       scsiq->sg_head->sg_list[i].bytes;
732
733				if ((addr & 0x0003) != 0)
734					panic("adv_execute_scsi_queue: SG "
735					      "with odd address or byte count");
736			}
737		}
738#endif
739		p_data_addr =
740		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
741		p_data_bcount =
742		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
743
744		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
745		scsiq->sg_head->queue_cnt = n_q_required - 1;
746	} else {
747		p_data_addr = &scsiq->q1.data_addr;
748		p_data_bcount = &scsiq->q1.data_cnt;
749		n_q_required = 1;
750	}
751
752	disable_syn_offset_one_fix = FALSE;
753
754	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
755	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
756
757		if (datalen != 0) {
758			if (datalen < 512) {
759				disable_syn_offset_one_fix = TRUE;
760			} else {
761				if (scsiq->cdbptr[0] == INQUIRY
762				 || scsiq->cdbptr[0] == REQUEST_SENSE
763				 || scsiq->cdbptr[0] == READ_CAPACITY
764				 || scsiq->cdbptr[0] == MODE_SELECT_6
765				 || scsiq->cdbptr[0] == MODE_SENSE_6
766				 || scsiq->cdbptr[0] == MODE_SENSE_10
767				 || scsiq->cdbptr[0] == MODE_SELECT_10
768				 || scsiq->cdbptr[0] == READ_TOC) {
769					disable_syn_offset_one_fix = TRUE;
770				}
771			}
772		}
773	}
774
775	if (disable_syn_offset_one_fix) {
776		scsiq->q2.tag_code &=
777		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
778		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
779				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
780	}
781
782	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
783	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
784		u_int8_t extra_bytes;
785
786		addr = *p_data_addr + *p_data_bcount;
787		extra_bytes = addr & 0x0003;
788		if (extra_bytes != 0
789		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
790		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
791			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
792			scsiq->q1.extra_bytes = extra_bytes;
793			*p_data_bcount -= extra_bytes;
794		}
795	}
796
797	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
798	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
799		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
800
801	return (retval);
802}
803
804
805u_int8_t
806adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
807		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
808{
809	u_int16_t val;
810	u_int8_t  sg_queue_cnt;
811
812	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
813		       (u_int16_t *)scsiq,
814		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
815
816#if BYTE_ORDER == BIG_ENDIAN
817	adv_adj_endian_qdone_info(scsiq);
818#endif
819
820	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
821	scsiq->q_status = val & 0xFF;
822	scsiq->q_no = (val >> 8) & 0XFF;
823
824	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
825	scsiq->cntl = val & 0xFF;
826	sg_queue_cnt = (val >> 8) & 0xFF;
827
828	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
829	scsiq->sense_len = val & 0xFF;
830	scsiq->extra_bytes = (val >> 8) & 0xFF;
831
832	/*
833	 * Due to a bug in accessing LRAM on the 940UA, the residual
834	 * is split into separate high and low 16bit quantities.
835	 */
836	scsiq->remain_bytes =
837	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
838	scsiq->remain_bytes |=
839	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
840
841	/*
842	 * XXX Is this just a safeguard or will the counter really
843	 * have bogus upper bits?
844	 */
845	scsiq->remain_bytes &= max_dma_count;
846
847	return (sg_queue_cnt);
848}
849
850int
851adv_start_chip(struct adv_softc *adv)
852{
853	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
854	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
855		return (0);
856	return (1);
857}
858
859int
860adv_stop_execution(struct adv_softc *adv)
861{
862	int count;
863
864	count = 0;
865	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
866		adv_write_lram_8(adv, ADV_STOP_CODE_B,
867				 ADV_STOP_REQ_RISC_STOP);
868		do {
869			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
870				ADV_STOP_ACK_RISC_STOP) {
871				return (1);
872			}
873			DELAY(1000);
874		} while (count++ < 20);
875	}
876	return (0);
877}
878
879int
880adv_is_chip_halted(struct adv_softc *adv)
881{
882	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
883		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
884			return (1);
885		}
886	}
887	return (0);
888}
889
890/*
891 * XXX The numeric constants and the loops in this routine
892 * need to be documented.
893 */
894void
895adv_ack_interrupt(struct adv_softc *adv)
896{
897	u_int8_t	host_flag;
898	u_int8_t	risc_flag;
899	int		loop;
900
901	loop = 0;
902	do {
903		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
904		if (loop++ > 0x7FFF) {
905			break;
906		}
907	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
908
909	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
910	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
911			 host_flag | ADV_HOST_FLAG_ACK_INT);
912
913	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
914	loop = 0;
915	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
916		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
917		if (loop++ > 3) {
918			break;
919		}
920	}
921
922	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
923}
924
925/*
926 * Handle all conditions that may halt the chip waiting
927 * for us to intervene.
928 */
929void
930adv_isr_chip_halted(struct adv_softc *adv)
931{
932	u_int16_t	  int_halt_code;
933	u_int16_t	  halt_q_addr;
934	target_bit_vector target_mask;
935	target_bit_vector scsi_busy;
936	u_int8_t	  halt_qp;
937	u_int8_t	  target_ix;
938	u_int8_t	  q_cntl;
939	u_int8_t	  tid_no;
940
941	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
942	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
943	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
944	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
945	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
946	tid_no = ADV_TIX_TO_TID(target_ix);
947	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
948	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
949		/*
950		 * Temporarily disable the async fix by removing
951		 * this target from the list of affected targets,
952		 * setting our async rate, and then putting us
953		 * back into the mask.
954		 */
955		adv->fix_asyn_xfer &= ~target_mask;
956		adv_set_syncrate(adv, /*struct cam_path */NULL,
957				 tid_no, /*period*/0, /*offset*/0,
958				 ADV_TRANS_ACTIVE);
959		adv->fix_asyn_xfer |= target_mask;
960	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
961		adv_set_syncrate(adv, /*struct cam_path */NULL,
962				 tid_no, /*period*/0, /*offset*/0,
963				 ADV_TRANS_ACTIVE);
964	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
965		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
966				     target_mask, tid_no);
967	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
968		struct	  adv_target_transinfo* tinfo;
969		union	  ccb *ccb;
970		u_int32_t cinfo_index;
971		u_int8_t  tag_code;
972		u_int8_t  q_status;
973
974		tinfo = &adv->tinfo[tid_no];
975		q_cntl |= QC_REQ_SENSE;
976
977		/* Renegotiate if appropriate. */
978		adv_set_syncrate(adv, /*struct cam_path */NULL,
979				 tid_no, /*period*/0, /*offset*/0,
980				 ADV_TRANS_CUR);
981		if (tinfo->current.period != tinfo->goal.period) {
982			adv_msgout_sdtr(adv, tinfo->goal.period,
983					tinfo->goal.offset);
984			q_cntl |= QC_MSG_OUT;
985		}
986		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
987
988		/* Don't tag request sense commands */
989		tag_code = adv_read_lram_8(adv,
990					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
991		tag_code &=
992		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
993
994		if ((adv->fix_asyn_xfer & target_mask) != 0
995		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
996			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
997				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
998		}
999		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1000				 tag_code);
1001		q_status = adv_read_lram_8(adv,
1002					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1003		q_status |= (QS_READY | QS_BUSY);
1004		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1005				 q_status);
1006		/*
1007		 * Freeze the devq until we can handle the sense condition.
1008		 */
1009		cinfo_index =
1010		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1011		ccb = adv->ccb_infos[cinfo_index].ccb;
1012		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1013		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1014		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1015			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1016			      /*queued_only*/TRUE);
1017		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1018		scsi_busy &= ~target_mask;
1019		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1020		/*
1021		 * Ensure we have enough time to actually
1022		 * retrieve the sense.
1023		 */
1024		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1025		ccb->ccb_h.timeout_ch =
1026		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1027	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1028		struct	ext_msg out_msg;
1029
1030		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1031				       (u_int16_t *) &out_msg,
1032				       sizeof(out_msg)/2);
1033
1034		if ((out_msg.msg_type == MSG_EXTENDED)
1035		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1036		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1037
1038			/* Revert to Async */
1039			adv_set_syncrate(adv, /*struct cam_path */NULL,
1040					 tid_no, /*period*/0, /*offset*/0,
1041					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1042		}
1043		q_cntl &= ~QC_MSG_OUT;
1044		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1045	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1046		u_int8_t scsi_status;
1047		union ccb *ccb;
1048		u_int32_t cinfo_index;
1049
1050		scsi_status = adv_read_lram_8(adv, halt_q_addr
1051					      + ADV_SCSIQ_SCSI_STATUS);
1052		cinfo_index =
1053		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1054		ccb = adv->ccb_infos[cinfo_index].ccb;
1055		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1056		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1057		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1058		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1059			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1060			      /*queued_only*/TRUE);
1061		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1062		scsi_busy &= ~target_mask;
1063		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1064	} else {
1065		printf("Unhandled Halt Code %x\n", int_halt_code);
1066	}
1067	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1068}
1069
1070void
1071adv_sdtr_to_period_offset(struct adv_softc *adv,
1072			  u_int8_t sync_data, u_int8_t *period,
1073			  u_int8_t *offset, int tid)
1074{
1075	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1076	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1077		*period = *offset = 0;
1078	} else {
1079		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1080		*offset = sync_data & 0xF;
1081	}
1082}
1083
1084void
1085adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1086		 u_int tid, u_int period, u_int offset, u_int type)
1087{
1088	struct adv_target_transinfo* tinfo;
1089	u_int old_period;
1090	u_int old_offset;
1091	u_int8_t sdtr_data;
1092
1093	tinfo = &adv->tinfo[tid];
1094
1095	/* Filter our input */
1096	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1097					      &offset, tid);
1098
1099	old_period = tinfo->current.period;
1100	old_offset = tinfo->current.offset;
1101
1102	if ((type & ADV_TRANS_CUR) != 0
1103	 && ((old_period != period || old_offset != offset)
1104	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1105		int s;
1106		int halted;
1107
1108		s = splcam();
1109		halted = adv_is_chip_halted(adv);
1110		if (halted == 0)
1111			/* Must halt the chip first */
1112			adv_host_req_chip_halt(adv);
1113
1114		/* Update current hardware settings */
1115		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1116
1117		/*
1118		 * If a target can run in sync mode, we don't need
1119		 * to check it for sync problems.
1120		 */
1121		if (offset != 0)
1122			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1123
1124		if (halted == 0)
1125			/* Start the chip again */
1126			adv_start_chip(adv);
1127
1128		splx(s);
1129		tinfo->current.period = period;
1130		tinfo->current.offset = offset;
1131
1132		if (path != NULL) {
1133			/*
1134			 * Tell the SCSI layer about the
1135			 * new transfer parameters.
1136			 */
1137			struct	ccb_trans_settings neg;
1138
1139			neg.sync_period = period;
1140			neg.sync_offset = offset;
1141			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1142				  | CCB_TRANS_SYNC_OFFSET_VALID;
1143			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1144			xpt_async(AC_TRANSFER_NEG, path, &neg);
1145		}
1146	}
1147
1148	if ((type & ADV_TRANS_GOAL) != 0) {
1149		tinfo->goal.period = period;
1150		tinfo->goal.offset = offset;
1151	}
1152
1153	if ((type & ADV_TRANS_USER) != 0) {
1154		tinfo->user.period = period;
1155		tinfo->user.offset = offset;
1156	}
1157}
1158
1159u_int8_t
1160adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1161			  u_int *offset, int tid)
1162{
1163	u_int i;
1164	u_int dummy_offset;
1165	u_int dummy_period;
1166
1167	if (offset == NULL) {
1168		dummy_offset = 0;
1169		offset = &dummy_offset;
1170	}
1171
1172	if (period == NULL) {
1173		dummy_period = 0;
1174		period = &dummy_period;
1175	}
1176
1177	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1178	if (*period != 0 && *offset != 0) {
1179		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1180			if (*period <= adv->sdtr_period_tbl[i]) {
1181				/*
1182				 * When responding to a target that requests
1183				 * sync, the requested  rate may fall between
1184				 * two rates that we can output, but still be
1185				 * a rate that we can receive.  Because of this,
1186				 * we want to respond to the target with
1187				 * the same rate that it sent to us even
1188				 * if the period we use to send data to it
1189				 * is lower.  Only lower the response period
1190				 * if we must.
1191				 */
1192				if (i == 0 /* Our maximum rate */)
1193					*period = adv->sdtr_period_tbl[0];
1194				return ((i << 4) | *offset);
1195			}
1196		}
1197	}
1198
1199	/* Must go async */
1200	*period = 0;
1201	*offset = 0;
1202	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1203		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1204	return (0);
1205}
1206
1207/* Internal Routines */
1208
1209static void
1210adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1211		       u_int16_t *buffer, int count)
1212{
1213	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1214	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1215}
1216
1217static void
1218adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1219			u_int16_t *buffer, int count)
1220{
1221	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1222	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1223}
1224
1225static void
1226adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1227		 u_int16_t set_value, int count)
1228{
1229	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1230	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1231			      set_value, count);
1232}
1233
1234static u_int32_t
1235adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1236{
1237	u_int32_t	sum;
1238	int		i;
1239
1240	sum = 0;
1241	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1242	for (i = 0; i < count; i++)
1243		sum += ADV_INW(adv, ADV_LRAM_DATA);
1244	return (sum);
1245}
1246
1247static int
1248adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1249			     u_int16_t value)
1250{
1251	int	retval;
1252
1253	retval = 0;
1254	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1255	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1256	DELAY(10000);
1257	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1258	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1259		retval = 1;
1260	return (retval);
1261}
1262
1263static u_int32_t
1264adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1265{
1266	u_int16_t           val_low, val_high;
1267
1268	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1269
1270#if BYTE_ORDER == BIG_ENDIAN
1271	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1272	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1273#else
1274	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1275	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1276#endif
1277
1278	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1279}
1280
1281static void
1282adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1283{
1284	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1285
1286#if BYTE_ORDER == BIG_ENDIAN
1287	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1288	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1289#else
1290	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1291	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1292#endif
1293}
1294
1295static void
1296adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1297			u_int32_t *buffer, int count)
1298{
1299	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1300	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1301}
1302
1303static u_int16_t
1304adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1305{
1306	u_int16_t read_wval;
1307	u_int8_t  cmd_reg;
1308
1309	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1310	DELAY(1000);
1311	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1312	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1313	DELAY(1000);
1314	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1315	DELAY(1000);
1316	return (read_wval);
1317}
1318
1319static u_int16_t
1320adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1321{
1322	u_int16_t	read_value;
1323
1324	read_value = adv_read_eeprom_16(adv, addr);
1325	if (read_value != value) {
1326		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1327		DELAY(1000);
1328
1329		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1330		DELAY(1000);
1331
1332		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1333		DELAY(20 * 1000);
1334
1335		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1336		DELAY(1000);
1337		read_value = adv_read_eeprom_16(adv, addr);
1338	}
1339	return (read_value);
1340}
1341
1342static int
1343adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1344{
1345	u_int8_t read_back;
1346	int	 retry;
1347
1348	retry = 0;
1349	while (1) {
1350		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1351		DELAY(1000);
1352		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1353		if (read_back == cmd_reg) {
1354			return (1);
1355		}
1356		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1357			return (0);
1358		}
1359	}
1360}
1361
1362static int
1363adv_set_eeprom_config_once(struct adv_softc *adv,
1364			   struct adv_eeprom_config *eeprom_config)
1365{
1366	int		n_error;
1367	u_int16_t	*wbuf;
1368	u_int16_t	sum;
1369	u_int8_t	s_addr;
1370	u_int8_t	cfg_beg;
1371	u_int8_t	cfg_end;
1372
1373	wbuf = (u_int16_t *)eeprom_config;
1374	n_error = 0;
1375	sum = 0;
1376	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1377		sum += *wbuf;
1378		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1379			n_error++;
1380		}
1381	}
1382	if (adv->type & ADV_VL) {
1383		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1384		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1385	} else {
1386		cfg_beg = ADV_EEPROM_CFG_BEG;
1387		cfg_end = ADV_EEPROM_MAX_ADDR;
1388	}
1389
1390	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1391		sum += *wbuf;
1392		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1393			n_error++;
1394		}
1395	}
1396	*wbuf = sum;
1397	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1398		n_error++;
1399	}
1400	wbuf = (u_int16_t *)eeprom_config;
1401	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1402		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1403			n_error++;
1404		}
1405	}
1406	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1407		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1408			n_error++;
1409		}
1410	}
1411	return (n_error);
1412}
1413
1414static u_int32_t
1415adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1416		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1417{
1418	u_int32_t chksum;
1419	u_int16_t mcode_lram_size;
1420	u_int16_t mcode_chksum;
1421
1422	mcode_lram_size = mcode_size >> 1;
1423	/* XXX Why zero the memory just before you write the whole thing?? */
1424	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1425	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1426
1427	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1428	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1429						   ((mcode_size - s_addr
1430						     - ADV_CODE_SEC_BEG) >> 1));
1431	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1432	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1433	return (chksum);
1434}
1435
1436static void
1437adv_reinit_lram(struct adv_softc *adv) {
1438	adv_init_lram(adv);
1439	adv_init_qlink_var(adv);
1440}
1441
1442static void
1443adv_init_lram(struct adv_softc *adv)
1444{
1445	u_int8_t  i;
1446	u_int16_t s_addr;
1447
1448	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1449			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1450
1451	i = ADV_MIN_ACTIVE_QNO;
1452	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1453
1454	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1455	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1456	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1457	i++;
1458	s_addr += ADV_QBLK_SIZE;
1459	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1460		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1461		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1462		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1463	}
1464
1465	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1466	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1467	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1468	i++;
1469	s_addr += ADV_QBLK_SIZE;
1470
1471	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1472		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1473		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1474		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1475	}
1476}
1477
1478static int
1479adv_init_microcode_var(struct adv_softc *adv)
1480{
1481	int	 i;
1482
1483	for (i = 0; i <= ADV_MAX_TID; i++) {
1484
1485		/* Start out async all around */
1486		adv_set_syncrate(adv, /*path*/NULL,
1487				 i, 0, 0,
1488				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1489	}
1490
1491	adv_init_qlink_var(adv);
1492
1493	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1494	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1495
1496	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1497
1498	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1499
1500	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1501	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1502		printf("adv%d: Unable to set program counter. Aborting.\n",
1503		       adv->unit);
1504		return (1);
1505	}
1506	return (0);
1507}
1508
1509static void
1510adv_init_qlink_var(struct adv_softc *adv)
1511{
1512	int	  i;
1513	u_int16_t lram_addr;
1514
1515	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1516	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1517
1518	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1519	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1520
1521	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1522			 (u_int8_t)((int) adv->max_openings + 1));
1523	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1524			 (u_int8_t)((int) adv->max_openings + 2));
1525
1526	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1527
1528	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1529	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1530	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1531	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1532	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1533	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1534
1535	lram_addr = ADV_QADR_BEG;
1536	for (i = 0; i < 32; i++, lram_addr += 2)
1537		adv_write_lram_16(adv, lram_addr, 0);
1538}
1539
1540static void
1541adv_disable_interrupt(struct adv_softc *adv)
1542{
1543	u_int16_t cfg;
1544
1545	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1546	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1547}
1548
1549static void
1550adv_enable_interrupt(struct adv_softc *adv)
1551{
1552	u_int16_t cfg;
1553
1554	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1555	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1556}
1557
1558static void
1559adv_toggle_irq_act(struct adv_softc *adv)
1560{
1561	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1562	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1563}
1564
1565void
1566adv_start_execution(struct adv_softc *adv)
1567{
1568	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1569		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1570	}
1571}
1572
1573int
1574adv_stop_chip(struct adv_softc *adv)
1575{
1576	u_int8_t cc_val;
1577
1578	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1579		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1580	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1581	adv_set_chip_ih(adv, ADV_INS_HALT);
1582	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1583	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1584		return (0);
1585	}
1586	return (1);
1587}
1588
1589static int
1590adv_host_req_chip_halt(struct adv_softc *adv)
1591{
1592	int	 count;
1593	u_int8_t saved_stop_code;
1594
1595	if (adv_is_chip_halted(adv))
1596		return (1);
1597
1598	count = 0;
1599	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1600	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1601			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1602	while (adv_is_chip_halted(adv) == 0
1603	    && count++ < 2000)
1604		;
1605
1606	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1607	return (count < 2000);
1608}
1609
1610static void
1611adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1612{
1613	adv_set_bank(adv, 1);
1614	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1615	adv_set_bank(adv, 0);
1616}
1617
1618#if UNUSED
1619static u_int8_t
1620adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1621{
1622	u_int8_t scsi_ctrl;
1623
1624	adv_set_bank(adv, 1);
1625	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1626	adv_set_bank(adv, 0);
1627	return (scsi_ctrl);
1628}
1629#endif
1630
1631/*
1632 * XXX Looks like more padding issues in this routine as well.
1633 *     There has to be a way to turn this into an insw.
1634 */
1635static void
1636adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1637	       u_int16_t *inbuf, int words)
1638{
1639	int	i;
1640
1641	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1642	for (i = 0; i < words; i++, inbuf++) {
1643		if (i == 5) {
1644			continue;
1645		}
1646		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1647	}
1648}
1649
1650static u_int
1651adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1652{
1653	u_int	  cur_used_qs;
1654	u_int	  cur_free_qs;
1655
1656	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1657
1658	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1659		cur_free_qs = adv->max_openings - cur_used_qs;
1660		return (cur_free_qs);
1661	}
1662	adv->openings_needed = n_qs;
1663	return (0);
1664}
1665
1666static u_int8_t
1667adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1668		      u_int8_t n_free_q)
1669{
1670	int i;
1671
1672	for (i = 0; i < n_free_q; i++) {
1673		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1674		if (free_q_head == ADV_QLINK_END)
1675			break;
1676	}
1677	return (free_q_head);
1678}
1679
1680static u_int8_t
1681adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1682{
1683	u_int16_t	q_addr;
1684	u_int8_t	next_qp;
1685	u_int8_t	q_status;
1686
1687	next_qp = ADV_QLINK_END;
1688	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1689	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1690
1691	if ((q_status & QS_READY) == 0)
1692		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1693
1694	return (next_qp);
1695}
1696
1697static int
1698adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1699		    u_int8_t n_q_required)
1700{
1701	u_int8_t	free_q_head;
1702	u_int8_t	next_qp;
1703	u_int8_t	tid_no;
1704	u_int8_t	target_ix;
1705	int		retval;
1706
1707	retval = 1;
1708	target_ix = scsiq->q2.target_ix;
1709	tid_no = ADV_TIX_TO_TID(target_ix);
1710	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1711	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1712	    != ADV_QLINK_END) {
1713		scsiq->q1.q_no = free_q_head;
1714
1715		/*
1716		 * Now that we know our Q number, point our sense
1717		 * buffer pointer to a bus dma mapped area where
1718		 * we can dma the data to.
1719		 */
1720		scsiq->q1.sense_addr = adv->sense_physbase
1721		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1722		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1723		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1724		adv->cur_active += n_q_required;
1725		retval = 0;
1726	}
1727	return (retval);
1728}
1729
1730
1731static void
1732adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1733			    u_int q_no)
1734{
1735	u_int8_t	sg_list_dwords;
1736	u_int8_t	sg_index, i;
1737	u_int8_t	sg_entry_cnt;
1738	u_int8_t	next_qp;
1739	u_int16_t	q_addr;
1740	struct		adv_sg_head *sg_head;
1741	struct		adv_sg_list_q scsi_sg_q;
1742
1743	sg_head = scsiq->sg_head;
1744
1745	if (sg_head) {
1746		sg_entry_cnt = sg_head->entry_cnt - 1;
1747#ifdef DIAGNOSTIC
1748		if (sg_entry_cnt == 0)
1749			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1750			      "a SG list but only one element");
1751		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1752			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1753			      "a SG list but QC_SG_HEAD not set");
1754#endif
1755		q_addr = ADV_QNO_TO_QADDR(q_no);
1756		sg_index = 1;
1757		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1758		scsi_sg_q.sg_head_qp = q_no;
1759		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1760		for (i = 0; i < sg_head->queue_cnt; i++) {
1761			u_int8_t segs_this_q;
1762
1763			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1764				segs_this_q = ADV_SG_LIST_PER_Q;
1765			else {
1766				/* This will be the last segment then */
1767				segs_this_q = sg_entry_cnt;
1768				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1769			}
1770			scsi_sg_q.seq_no = i + 1;
1771			sg_list_dwords = segs_this_q << 1;
1772			if (i == 0) {
1773				scsi_sg_q.sg_list_cnt = segs_this_q;
1774				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1775			} else {
1776				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1777				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1778			}
1779			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1780			scsi_sg_q.q_no = next_qp;
1781			q_addr = ADV_QNO_TO_QADDR(next_qp);
1782
1783			adv_write_lram_16_multi(adv,
1784						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1785						(u_int16_t *)&scsi_sg_q,
1786						sizeof(scsi_sg_q) >> 1);
1787			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1788						(u_int32_t *)&sg_head->sg_list[sg_index],
1789						sg_list_dwords);
1790			sg_entry_cnt -= segs_this_q;
1791			sg_index += ADV_SG_LIST_PER_Q;
1792		}
1793	}
1794	adv_put_ready_queue(adv, scsiq, q_no);
1795}
1796
1797static void
1798adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1799		    u_int q_no)
1800{
1801	struct		adv_target_transinfo* tinfo;
1802	u_int		q_addr;
1803	u_int		tid_no;
1804
1805	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1806	tinfo = &adv->tinfo[tid_no];
1807	if ((tinfo->current.period != tinfo->goal.period)
1808	 || (tinfo->current.offset != tinfo->goal.offset)) {
1809
1810		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1811		scsiq->q1.cntl |= QC_MSG_OUT;
1812	}
1813	q_addr = ADV_QNO_TO_QADDR(q_no);
1814
1815	scsiq->q1.status = QS_FREE;
1816
1817	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1818				(u_int16_t *)scsiq->cdbptr,
1819				scsiq->q2.cdb_len >> 1);
1820
1821#if BYTE_ORDER == BIG_ENDIAN
1822	adv_adj_scsiq_endian(scsiq);
1823#endif
1824
1825	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1826		      (u_int16_t *) &scsiq->q1.cntl,
1827		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1828
1829#if CC_WRITE_IO_COUNT
1830	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1831			  adv->req_count);
1832#endif
1833
1834#if CC_CLEAR_DMA_REMAIN
1835
1836	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1837	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1838#endif
1839
1840	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1841			  (scsiq->q1.q_no << 8) | QS_READY);
1842}
1843
1844static void
1845adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1846	      u_int16_t *buffer, int words)
1847{
1848	int	i;
1849
1850	/*
1851	 * XXX This routine makes *gross* assumptions
1852	 * about padding in the data structures.
1853	 * Either the data structures should have explicit
1854	 * padding members added, or they should have padding
1855	 * turned off via compiler attributes depending on
1856	 * which yields better overall performance.  My hunch
1857	 * would be that turning off padding would be the
1858	 * faster approach as an outsw is much faster than
1859	 * this crude loop and accessing un-aligned data
1860	 * members isn't *that* expensive.  The other choice
1861	 * would be to modify the ASC script so that the
1862	 * the adv_scsiq_1 structure can be re-arranged so
1863	 * padding isn't required.
1864	 */
1865	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1866	for (i = 0; i < words; i++, buffer++) {
1867		if (i == 2 || i == 10) {
1868			continue;
1869		}
1870		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1871	}
1872}
1873
1874#if BYTE_ORDER == BIG_ENDIAN
1875void
1876adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1877{
1878
1879	panic("adv(4) not supported on big-endian machines.\n");
1880}
1881
1882void
1883adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1884{
1885
1886	panic("adv(4) not supported on big-endian machines.\n");
1887}
1888#endif
1889
1890static void
1891adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1892		     u_int8_t q_cntl, target_bit_vector target_mask,
1893		     int tid_no)
1894{
1895	struct	ext_msg ext_msg;
1896
1897	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1898			       sizeof(ext_msg) >> 1);
1899	if ((ext_msg.msg_type == MSG_EXTENDED)
1900	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1901	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1902		union	  ccb *ccb;
1903		struct	  adv_target_transinfo* tinfo;
1904		u_int32_t cinfo_index;
1905		u_int	 period;
1906		u_int	 offset;
1907		int	 sdtr_accept;
1908		u_int8_t orig_offset;
1909
1910		cinfo_index =
1911		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1912		ccb = adv->ccb_infos[cinfo_index].ccb;
1913		tinfo = &adv->tinfo[tid_no];
1914		sdtr_accept = TRUE;
1915
1916		orig_offset = ext_msg.req_ack_offset;
1917		if (ext_msg.xfer_period < tinfo->goal.period) {
1918                	sdtr_accept = FALSE;
1919			ext_msg.xfer_period = tinfo->goal.period;
1920		}
1921
1922		/* Perform range checking */
1923		period = ext_msg.xfer_period;
1924		offset = ext_msg.req_ack_offset;
1925		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1926		ext_msg.xfer_period = period;
1927		ext_msg.req_ack_offset = offset;
1928
1929		/* Record our current sync settings */
1930		adv_set_syncrate(adv, ccb->ccb_h.path,
1931				 tid_no, ext_msg.xfer_period,
1932				 ext_msg.req_ack_offset,
1933				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1934
1935		/* Offset too high or large period forced async */
1936		if (orig_offset != ext_msg.req_ack_offset)
1937			sdtr_accept = FALSE;
1938
1939		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1940			/* Valid response to our requested negotiation */
1941			q_cntl &= ~QC_MSG_OUT;
1942		} else {
1943			/* Must Respond */
1944			q_cntl |= QC_MSG_OUT;
1945			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1946					ext_msg.req_ack_offset);
1947		}
1948
1949	} else if (ext_msg.msg_type == MSG_EXTENDED
1950		&& ext_msg.msg_req == MSG_EXT_WDTR
1951		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1952
1953		ext_msg.wdtr_width = 0;
1954		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1955					(u_int16_t *)&ext_msg,
1956					sizeof(ext_msg) >> 1);
1957		q_cntl |= QC_MSG_OUT;
1958        } else {
1959
1960		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1961		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1962					(u_int16_t *)&ext_msg,
1963					sizeof(ext_msg) >> 1);
1964		q_cntl |= QC_MSG_OUT;
1965        }
1966	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1967}
1968
1969static void
1970adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1971		u_int8_t sdtr_offset)
1972{
1973	struct	 ext_msg sdtr_buf;
1974
1975	sdtr_buf.msg_type = MSG_EXTENDED;
1976	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1977	sdtr_buf.msg_req = MSG_EXT_SDTR;
1978	sdtr_buf.xfer_period = sdtr_period;
1979	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1980	sdtr_buf.req_ack_offset = sdtr_offset;
1981	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1982				(u_int16_t *) &sdtr_buf,
1983				sizeof(sdtr_buf) / 2);
1984}
1985
1986int
1987adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1988	      u_int32_t status, int queued_only)
1989{
1990	u_int16_t q_addr;
1991	u_int8_t  q_no;
1992	struct adv_q_done_info scsiq_buf;
1993	struct adv_q_done_info *scsiq;
1994	u_int8_t  target_ix;
1995	int	  count;
1996
1997	scsiq = &scsiq_buf;
1998	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1999	count = 0;
2000	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2001		struct adv_ccb_info *ccb_info;
2002		q_addr = ADV_QNO_TO_QADDR(q_no);
2003
2004		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2005		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2006		if (((scsiq->q_status & QS_READY) != 0)
2007		 && ((scsiq->q_status & QS_ABORTED) == 0)
2008		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2009		 && (scsiq->d2.target_ix == target_ix)
2010		 && (queued_only == 0
2011		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2012		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2013			union ccb *aborted_ccb;
2014			struct adv_ccb_info *cinfo;
2015
2016			scsiq->q_status |= QS_ABORTED;
2017			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2018					 scsiq->q_status);
2019			aborted_ccb = ccb_info->ccb;
2020			/* Don't clobber earlier error codes */
2021			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2022			  == CAM_REQ_INPROG)
2023				aborted_ccb->ccb_h.status |= status;
2024			cinfo = (struct adv_ccb_info *)
2025			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2026			cinfo->state |= ACCB_ABORT_QUEUED;
2027			count++;
2028		}
2029	}
2030	return (count);
2031}
2032
2033int
2034adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2035{
2036	int count;
2037	int i;
2038	union ccb *ccb;
2039
2040	i = 200;
2041	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2042	    && i--)
2043		DELAY(1000);
2044	adv_reset_chip(adv, initiate_bus_reset);
2045	adv_reinit_lram(adv);
2046	for (i = 0; i <= ADV_MAX_TID; i++)
2047		adv_set_syncrate(adv, NULL, i, /*period*/0,
2048				 /*offset*/0, ADV_TRANS_CUR);
2049	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2050
2051	/* Tell the XPT layer that a bus reset occured */
2052	if (adv->path != NULL)
2053		xpt_async(AC_BUS_RESET, adv->path, NULL);
2054
2055	count = 0;
2056	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2057		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2058			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2059		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2060		count++;
2061	}
2062
2063	adv_start_chip(adv);
2064	return (count);
2065}
2066
2067static void
2068adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2069{
2070	int orig_id;
2071
2072    	adv_set_bank(adv, 1);
2073    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2074    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2075	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2076		adv_set_bank(adv, 0);
2077		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2078	}
2079    	adv_set_bank(adv, 1);
2080    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2081	adv_set_bank(adv, 0);
2082}
2083