advlib.c revision 59082
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: head/sys/dev/advansys/advlib.c 59082 2000-04-07 11:32:42Z nyan $
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/kernel.h>
48#include <sys/systm.h>
49
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#include <machine/clock.h>
53#include <machine/resource.h>
54#include <sys/bus.h>
55#include <sys/rman.h>
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_sim.h>
60#include <cam/cam_xpt_sim.h>
61
62#include <cam/scsi/scsi_all.h>
63#include <cam/scsi/scsi_message.h>
64#include <cam/scsi/scsi_da.h>
65#include <cam/scsi/scsi_cd.h>
66
67#include <vm/vm.h>
68#include <vm/vm_param.h>
69#include <vm/pmap.h>
70
71#include <dev/advansys/advansys.h>
72#include <dev/advansys/advmcode.h>
73
74struct adv_quirk_entry {
75	struct scsi_inquiry_pattern inq_pat;
76	u_int8_t quirks;
77#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
78#define ADV_QUIRK_FIX_ASYN_XFER		0x02
79};
80
81static struct adv_quirk_entry adv_quirk_table[] =
82{
83	{
84		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
85		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
86	},
87	{
88		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
89		0
90	},
91	{
92		{
93		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
94		  "TANDBERG", " TDC 36", "*"
95		},
96		0
97	},
98	{
99		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
100		0
101	},
102	{
103		{
104		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
105		  "*", "*", "*"
106		},
107		0
108	},
109	{
110		{
111		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
112		  "*", "*", "*"
113		},
114		0
115	},
116	{
117		/* Default quirk entry */
118		{
119		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
120		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
121                },
122                ADV_QUIRK_FIX_ASYN_XFER,
123	}
124};
125
126/*
127 * Allowable periods in ns
128 */
129static u_int8_t adv_sdtr_period_tbl[] =
130{
131	25,
132	30,
133	35,
134	40,
135	50,
136	60,
137	70,
138	85
139};
140
141static u_int8_t adv_sdtr_period_tbl_ultra[] =
142{
143	12,
144	19,
145	25,
146	32,
147	38,
148	44,
149	50,
150	57,
151	63,
152	69,
153	75,
154	82,
155	88,
156	94,
157	100,
158	107
159};
160
161struct ext_msg {
162	u_int8_t msg_type;
163	u_int8_t msg_len;
164	u_int8_t msg_req;
165	union {
166		struct {
167			u_int8_t sdtr_xfer_period;
168			u_int8_t sdtr_req_ack_offset;
169		} sdtr;
170		struct {
171       			u_int8_t wdtr_width;
172		} wdtr;
173		struct {
174			u_int8_t mdp[4];
175		} mdp;
176	} u_ext_msg;
177	u_int8_t res;
178};
179
180#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
181#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
182#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
183#define	mdp_b3		u_ext_msg.mdp_b3
184#define	mdp_b2		u_ext_msg.mdp_b2
185#define	mdp_b1		u_ext_msg.mdp_b1
186#define	mdp_b0		u_ext_msg.mdp_b0
187
188/*
189 * Some of the early PCI adapters have problems with
190 * async transfers.  Instead use an offset of 1.
191 */
192#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
193
194/* LRAM routines */
195static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
196					u_int16_t *buffer, int count);
197static void	 adv_write_lram_16_multi(struct adv_softc *adv,
198					 u_int16_t s_addr, u_int16_t *buffer,
199					 int count);
200static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
201				  u_int16_t set_value, int count);
202static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
203				  int count);
204
205static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
206					      u_int16_t addr, u_int16_t value);
207static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
208
209
210static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
211				   u_int32_t value);
212static void	 adv_write_lram_32_multi(struct adv_softc *adv,
213					 u_int16_t s_addr, u_int32_t *buffer,
214					 int count);
215
216/* EEPROM routines */
217static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
218static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
219				     u_int16_t value);
220static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
221					  u_int8_t cmd_reg);
222static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
223					    struct adv_eeprom_config *eeconfig);
224
225/* Initialization */
226static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
227				    u_int16_t *mcode_buf, u_int16_t mcode_size);
228
229static void	 adv_reinit_lram(struct adv_softc *adv);
230static void	 adv_init_lram(struct adv_softc *adv);
231static int	 adv_init_microcode_var(struct adv_softc *adv);
232static void	 adv_init_qlink_var(struct adv_softc *adv);
233
234/* Interrupts */
235static void	 adv_disable_interrupt(struct adv_softc *adv);
236static void	 adv_enable_interrupt(struct adv_softc *adv);
237static void	 adv_toggle_irq_act(struct adv_softc *adv);
238
239/* Chip Control */
240static int	 adv_host_req_chip_halt(struct adv_softc *adv);
241static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
242#if UNUSED
243static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
244#endif
245
246/* Queue handling and execution */
247static __inline int
248		 adv_sgcount_to_qcount(int sgcount);
249
250static __inline int
251adv_sgcount_to_qcount(int sgcount)
252{
253	int	n_sg_list_qs;
254
255	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
256	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
257		n_sg_list_qs++;
258	return (n_sg_list_qs + 1);
259}
260
261static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
262				u_int16_t *inbuf, int words);
263static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
264static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
265				       u_int8_t free_q_head, u_int8_t n_free_q);
266static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
267				      u_int8_t free_q_head);
268static int	 adv_send_scsi_queue(struct adv_softc *adv,
269				     struct adv_scsi_q *scsiq,
270				     u_int8_t n_q_required);
271static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
272					     struct adv_scsi_q *scsiq,
273					     u_int q_no);
274static void	 adv_put_ready_queue(struct adv_softc *adv,
275				     struct adv_scsi_q *scsiq, u_int q_no);
276static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
277			       u_int16_t *buffer, int words);
278
279/* Messages */
280static void	 adv_handle_extmsg_in(struct adv_softc *adv,
281				      u_int16_t halt_q_addr, u_int8_t q_cntl,
282				      target_bit_vector target_id,
283				      int tid);
284static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
285				 u_int8_t sdtr_offset);
286static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
287					u_int8_t sdtr_data);
288
289
290/* Exported functions first */
291
292void
293advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
294{
295	struct adv_softc *adv;
296
297	adv = (struct adv_softc *)callback_arg;
298	switch (code) {
299	case AC_FOUND_DEVICE:
300	{
301		struct ccb_getdev *cgd;
302		target_bit_vector target_mask;
303		int num_entries;
304        	caddr_t match;
305		struct adv_quirk_entry *entry;
306		struct adv_target_transinfo* tinfo;
307
308		cgd = (struct ccb_getdev *)arg;
309
310		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
311
312		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
313		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
314				       (caddr_t)adv_quirk_table,
315				       num_entries, sizeof(*adv_quirk_table),
316				       scsi_inquiry_match);
317
318		if (match == NULL)
319			panic("advasync: device didn't match wildcard entry!!");
320
321		entry = (struct adv_quirk_entry *)match;
322
323		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
324			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
325				adv->fix_asyn_xfer_always |= target_mask;
326			else
327				adv->fix_asyn_xfer_always &= ~target_mask;
328			/*
329			 * We start out life with all bits set and clear them
330			 * after we've determined that the fix isn't necessary.
331			 * It may well be that we've already cleared a target
332			 * before the full inquiry session completes, so don't
333			 * gratuitously set a target bit even if it has this
334			 * quirk.  But, if the quirk exonerates a device, clear
335			 * the bit now.
336			 */
337			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
338				adv->fix_asyn_xfer &= ~target_mask;
339		}
340		/*
341		 * Reset our sync settings now that we've determined
342		 * what quirks are in effect for the device.
343		 */
344		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
345		adv_set_syncrate(adv, cgd->ccb_h.path,
346				 cgd->ccb_h.target_id,
347				 tinfo->current.period,
348				 tinfo->current.offset,
349				 ADV_TRANS_CUR);
350		break;
351	}
352	case AC_LOST_DEVICE:
353	{
354		u_int target_mask;
355
356		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
357			target_mask = 0x01 << xpt_path_target_id(path);
358			adv->fix_asyn_xfer |= target_mask;
359		}
360
361		/*
362		 * Revert to async transfers
363		 * for the next device.
364		 */
365		adv_set_syncrate(adv, /*path*/NULL,
366				 xpt_path_target_id(path),
367				 /*period*/0,
368				 /*offset*/0,
369				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
370	}
371	default:
372		break;
373	}
374}
375
376void
377adv_set_bank(struct adv_softc *adv, u_int8_t bank)
378{
379	u_int8_t control;
380
381	/*
382	 * Start out with the bank reset to 0
383	 */
384	control = ADV_INB(adv, ADV_CHIP_CTRL)
385		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
386			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
387			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
388	if (bank == 1) {
389		control |= ADV_CC_BANK_ONE;
390	} else if (bank == 2) {
391		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
392	}
393	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
394}
395
396u_int8_t
397adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
398{
399	u_int8_t   byte_data;
400	u_int16_t  word_data;
401
402	/*
403	 * LRAM is accessed on 16bit boundaries.
404	 */
405	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
406	word_data = ADV_INW(adv, ADV_LRAM_DATA);
407	if (addr & 1) {
408#if BYTE_ORDER == BIG_ENDIAN
409		byte_data = (u_int8_t)(word_data & 0xFF);
410#else
411		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
412#endif
413	} else {
414#if BYTE_ORDER == BIG_ENDIAN
415		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
416#else
417		byte_data = (u_int8_t)(word_data & 0xFF);
418#endif
419	}
420	return (byte_data);
421}
422
423void
424adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
425{
426	u_int16_t word_data;
427
428	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
429	if (addr & 1) {
430		word_data &= 0x00FF;
431		word_data |= (((u_int8_t)value << 8) & 0xFF00);
432	} else {
433		word_data &= 0xFF00;
434		word_data |= ((u_int8_t)value & 0x00FF);
435	}
436	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
437}
438
439
440u_int16_t
441adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
442{
443	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
444	return (ADV_INW(adv, ADV_LRAM_DATA));
445}
446
447void
448adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
449{
450	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
451	ADV_OUTW(adv, ADV_LRAM_DATA, value);
452}
453
454/*
455 * Determine if there is a board at "iobase" by looking
456 * for the AdvanSys signatures.  Return 1 if a board is
457 * found, 0 otherwise.
458 */
459int
460adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
461{
462	u_int16_t signature;
463
464	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
465		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
466		if ((signature == ADV_1000_ID0W)
467		 || (signature == ADV_1000_ID0W_FIX))
468			return (1);
469	}
470	return (0);
471}
472
473void
474adv_lib_init(struct adv_softc *adv)
475{
476	if ((adv->type & ADV_ULTRA) != 0) {
477		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
478		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
479	} else {
480		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
481		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
482	}
483}
484
485u_int16_t
486adv_get_eeprom_config(struct adv_softc *adv, struct
487		      adv_eeprom_config  *eeprom_config)
488{
489	u_int16_t	sum;
490	u_int16_t	*wbuf;
491	u_int8_t	cfg_beg;
492	u_int8_t	cfg_end;
493	u_int8_t	s_addr;
494
495	wbuf = (u_int16_t *)eeprom_config;
496	sum = 0;
497
498	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
499		*wbuf = adv_read_eeprom_16(adv, s_addr);
500		sum += *wbuf;
501	}
502
503	if (adv->type & ADV_VL) {
504		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
505		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
506	} else {
507		cfg_beg = ADV_EEPROM_CFG_BEG;
508		cfg_end = ADV_EEPROM_MAX_ADDR;
509	}
510
511	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
512		*wbuf = adv_read_eeprom_16(adv, s_addr);
513		sum += *wbuf;
514#if ADV_DEBUG_EEPROM
515		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
516#endif
517	}
518	*wbuf = adv_read_eeprom_16(adv, s_addr);
519	return (sum);
520}
521
522int
523adv_set_eeprom_config(struct adv_softc *adv,
524		      struct adv_eeprom_config *eeprom_config)
525{
526	int	retry;
527
528	retry = 0;
529	while (1) {
530		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
531			break;
532		}
533		if (++retry > ADV_EEPROM_MAX_RETRY) {
534			break;
535		}
536	}
537	return (retry > ADV_EEPROM_MAX_RETRY);
538}
539
540int
541adv_reset_chip(struct adv_softc *adv, int reset_bus)
542{
543	adv_stop_chip(adv);
544	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
545				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
546	DELAY(60);
547
548	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
549	adv_set_chip_ih(adv, ADV_INS_HALT);
550
551	if (reset_bus)
552		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
553
554	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
555	if (reset_bus)
556		DELAY(200 * 1000);
557
558	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
559	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
560	return (adv_is_chip_halted(adv));
561}
562
563int
564adv_test_external_lram(struct adv_softc* adv)
565{
566	u_int16_t	q_addr;
567	u_int16_t	saved_value;
568	int		success;
569
570	success = 0;
571
572	q_addr = ADV_QNO_TO_QADDR(241);
573	saved_value = adv_read_lram_16(adv, q_addr);
574	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
575		success = 1;
576		adv_write_lram_16(adv, q_addr, saved_value);
577	}
578	return (success);
579}
580
581
582int
583adv_init_lram_and_mcode(struct adv_softc *adv)
584{
585	u_int32_t	retval;
586
587	adv_disable_interrupt(adv);
588
589	adv_init_lram(adv);
590
591	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
592				    adv_mcode_size);
593	if (retval != adv_mcode_chksum) {
594		printf("adv%d: Microcode download failed checksum!\n",
595		       adv->unit);
596		return (1);
597	}
598
599	if (adv_init_microcode_var(adv) != 0)
600		return (1);
601
602	adv_enable_interrupt(adv);
603	return (0);
604}
605
606u_int8_t
607adv_get_chip_irq(struct adv_softc *adv)
608{
609	u_int16_t	cfg_lsw;
610	u_int8_t	chip_irq;
611
612	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
613
614	if ((adv->type & ADV_VL) != 0) {
615		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
616		if ((chip_irq == 0) ||
617		    (chip_irq == 4) ||
618		    (chip_irq == 7)) {
619			return (0);
620		}
621		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
622	}
623	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
624	if (chip_irq == 3)
625		chip_irq += 2;
626	return (chip_irq + ADV_MIN_IRQ_NO);
627}
628
629u_int8_t
630adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
631{
632	u_int16_t	cfg_lsw;
633
634	if ((adv->type & ADV_VL) != 0) {
635		if (irq_no != 0) {
636			if ((irq_no < ADV_MIN_IRQ_NO)
637			 || (irq_no > ADV_MAX_IRQ_NO)) {
638				irq_no = 0;
639			} else {
640				irq_no -= ADV_MIN_IRQ_NO - 1;
641			}
642		}
643		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
644		cfg_lsw |= 0x0010;
645		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
646		adv_toggle_irq_act(adv);
647
648		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
649		cfg_lsw |= (irq_no & 0x07) << 2;
650		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
651		adv_toggle_irq_act(adv);
652	} else if ((adv->type & ADV_ISA) != 0) {
653		if (irq_no == 15)
654			irq_no -= 2;
655		irq_no -= ADV_MIN_IRQ_NO;
656		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
657		cfg_lsw |= (irq_no & 0x03) << 2;
658		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
659	}
660	return (adv_get_chip_irq(adv));
661}
662
663void
664adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
665{
666	u_int16_t cfg_lsw;
667
668	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
669	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
670		return;
671    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
672	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
673	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
674}
675
676int
677adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
678		       u_int32_t datalen)
679{
680	struct		adv_target_transinfo* tinfo;
681	u_int32_t	*p_data_addr;
682	u_int32_t	*p_data_bcount;
683	int		disable_syn_offset_one_fix;
684	int		retval;
685	u_int		n_q_required;
686	u_int32_t	addr;
687	u_int8_t	sg_entry_cnt;
688	u_int8_t	target_ix;
689	u_int8_t	sg_entry_cnt_minus_one;
690	u_int8_t	tid_no;
691
692	scsiq->q1.q_no = 0;
693	retval = 1;  /* Default to error case */
694	target_ix = scsiq->q2.target_ix;
695	tid_no = ADV_TIX_TO_TID(target_ix);
696	tinfo = &adv->tinfo[tid_no];
697
698	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
699		/* Renegotiate if appropriate. */
700		adv_set_syncrate(adv, /*struct cam_path */NULL,
701				 tid_no, /*period*/0, /*offset*/0,
702				 ADV_TRANS_CUR);
703		if (tinfo->current.period != tinfo->goal.period) {
704			adv_msgout_sdtr(adv, tinfo->goal.period,
705					tinfo->goal.offset);
706			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
707		}
708	}
709
710	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
711		sg_entry_cnt = scsiq->sg_head->entry_cnt;
712		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
713
714#ifdef DIAGNOSTIC
715		if (sg_entry_cnt <= 1)
716			panic("adv_execute_scsi_queue: Queue "
717			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
718
719		if (sg_entry_cnt > ADV_MAX_SG_LIST)
720			panic("adv_execute_scsi_queue: "
721			      "Queue with too many segs.");
722
723		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
724			int i;
725
726			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
727				addr = scsiq->sg_head->sg_list[i].addr +
728				       scsiq->sg_head->sg_list[i].bytes;
729
730				if ((addr & 0x0003) != 0)
731					panic("adv_execute_scsi_queue: SG "
732					      "with odd address or byte count");
733			}
734		}
735#endif
736		p_data_addr =
737		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
738		p_data_bcount =
739		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
740
741		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
742		scsiq->sg_head->queue_cnt = n_q_required - 1;
743	} else {
744		p_data_addr = &scsiq->q1.data_addr;
745		p_data_bcount = &scsiq->q1.data_cnt;
746		n_q_required = 1;
747	}
748
749	disable_syn_offset_one_fix = FALSE;
750
751	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
752	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
753
754		if (datalen != 0) {
755			if (datalen < 512) {
756				disable_syn_offset_one_fix = TRUE;
757			} else {
758				if (scsiq->cdbptr[0] == INQUIRY
759				 || scsiq->cdbptr[0] == REQUEST_SENSE
760				 || scsiq->cdbptr[0] == READ_CAPACITY
761				 || scsiq->cdbptr[0] == MODE_SELECT_6
762				 || scsiq->cdbptr[0] == MODE_SENSE_6
763				 || scsiq->cdbptr[0] == MODE_SENSE_10
764				 || scsiq->cdbptr[0] == MODE_SELECT_10
765				 || scsiq->cdbptr[0] == READ_TOC) {
766					disable_syn_offset_one_fix = TRUE;
767				}
768			}
769		}
770	}
771
772	if (disable_syn_offset_one_fix) {
773		scsiq->q2.tag_code &=
774		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
775		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
776				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
777	}
778
779	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
780	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
781		u_int8_t extra_bytes;
782
783		addr = *p_data_addr + *p_data_bcount;
784		extra_bytes = addr & 0x0003;
785		if (extra_bytes != 0
786		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
787		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
788			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
789			scsiq->q1.extra_bytes = extra_bytes;
790			*p_data_bcount -= extra_bytes;
791		}
792	}
793
794	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
795	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
796		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
797
798	return (retval);
799}
800
801
802u_int8_t
803adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
804		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
805{
806	u_int16_t val;
807	u_int8_t  sg_queue_cnt;
808
809	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
810		       (u_int16_t *)scsiq,
811		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
812
813#if BYTE_ORDER == BIG_ENDIAN
814	adv_adj_endian_qdone_info(scsiq);
815#endif
816
817	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
818	scsiq->q_status = val & 0xFF;
819	scsiq->q_no = (val >> 8) & 0XFF;
820
821	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
822	scsiq->cntl = val & 0xFF;
823	sg_queue_cnt = (val >> 8) & 0xFF;
824
825	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
826	scsiq->sense_len = val & 0xFF;
827	scsiq->extra_bytes = (val >> 8) & 0xFF;
828
829	/*
830	 * Due to a bug in accessing LRAM on the 940UA, the residual
831	 * is split into separate high and low 16bit quantities.
832	 */
833	scsiq->remain_bytes =
834	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
835	scsiq->remain_bytes |=
836	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
837
838	/*
839	 * XXX Is this just a safeguard or will the counter really
840	 * have bogus upper bits?
841	 */
842	scsiq->remain_bytes &= max_dma_count;
843
844	return (sg_queue_cnt);
845}
846
847int
848adv_start_chip(struct adv_softc *adv)
849{
850	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
851	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
852		return (0);
853	return (1);
854}
855
856int
857adv_stop_execution(struct adv_softc *adv)
858{
859	int count;
860
861	count = 0;
862	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
863		adv_write_lram_8(adv, ADV_STOP_CODE_B,
864				 ADV_STOP_REQ_RISC_STOP);
865		do {
866			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
867				ADV_STOP_ACK_RISC_STOP) {
868				return (1);
869			}
870			DELAY(1000);
871		} while (count++ < 20);
872	}
873	return (0);
874}
875
876int
877adv_is_chip_halted(struct adv_softc *adv)
878{
879	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
880		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
881			return (1);
882		}
883	}
884	return (0);
885}
886
887/*
888 * XXX The numeric constants and the loops in this routine
889 * need to be documented.
890 */
891void
892adv_ack_interrupt(struct adv_softc *adv)
893{
894	u_int8_t	host_flag;
895	u_int8_t	risc_flag;
896	int		loop;
897
898	loop = 0;
899	do {
900		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
901		if (loop++ > 0x7FFF) {
902			break;
903		}
904	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
905
906	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
907	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
908			 host_flag | ADV_HOST_FLAG_ACK_INT);
909
910	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
911	loop = 0;
912	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
913		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
914		if (loop++ > 3) {
915			break;
916		}
917	}
918
919	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
920}
921
922/*
923 * Handle all conditions that may halt the chip waiting
924 * for us to intervene.
925 */
926void
927adv_isr_chip_halted(struct adv_softc *adv)
928{
929	u_int16_t	  int_halt_code;
930	u_int16_t	  halt_q_addr;
931	target_bit_vector target_mask;
932	target_bit_vector scsi_busy;
933	u_int8_t	  halt_qp;
934	u_int8_t	  target_ix;
935	u_int8_t	  q_cntl;
936	u_int8_t	  tid_no;
937
938	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
939	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
940	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
941	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
942	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
943	tid_no = ADV_TIX_TO_TID(target_ix);
944	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
945	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
946		/*
947		 * Temporarily disable the async fix by removing
948		 * this target from the list of affected targets,
949		 * setting our async rate, and then putting us
950		 * back into the mask.
951		 */
952		adv->fix_asyn_xfer &= ~target_mask;
953		adv_set_syncrate(adv, /*struct cam_path */NULL,
954				 tid_no, /*period*/0, /*offset*/0,
955				 ADV_TRANS_ACTIVE);
956		adv->fix_asyn_xfer |= target_mask;
957	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
958		adv_set_syncrate(adv, /*struct cam_path */NULL,
959				 tid_no, /*period*/0, /*offset*/0,
960				 ADV_TRANS_ACTIVE);
961	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
962		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
963				     target_mask, tid_no);
964	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
965		struct	  adv_target_transinfo* tinfo;
966		union	  ccb *ccb;
967		u_int32_t cinfo_index;
968		u_int8_t  tag_code;
969		u_int8_t  q_status;
970
971		tinfo = &adv->tinfo[tid_no];
972		q_cntl |= QC_REQ_SENSE;
973
974		/* Renegotiate if appropriate. */
975		adv_set_syncrate(adv, /*struct cam_path */NULL,
976				 tid_no, /*period*/0, /*offset*/0,
977				 ADV_TRANS_CUR);
978		if (tinfo->current.period != tinfo->goal.period) {
979			adv_msgout_sdtr(adv, tinfo->goal.period,
980					tinfo->goal.offset);
981			q_cntl |= QC_MSG_OUT;
982		}
983		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
984
985		/* Don't tag request sense commands */
986		tag_code = adv_read_lram_8(adv,
987					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
988		tag_code &=
989		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
990
991		if ((adv->fix_asyn_xfer & target_mask) != 0
992		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
993			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
994				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
995		}
996		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
997				 tag_code);
998		q_status = adv_read_lram_8(adv,
999					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1000		q_status |= (QS_READY | QS_BUSY);
1001		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1002				 q_status);
1003		/*
1004		 * Freeze the devq until we can handle the sense condition.
1005		 */
1006		cinfo_index =
1007		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1008		ccb = adv->ccb_infos[cinfo_index].ccb;
1009		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1010		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1011		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1012			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1013			      /*queued_only*/TRUE);
1014		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1015		scsi_busy &= ~target_mask;
1016		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1017		/*
1018		 * Ensure we have enough time to actually
1019		 * retrieve the sense.
1020		 */
1021		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1022		ccb->ccb_h.timeout_ch =
1023		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1024	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1025		struct	ext_msg out_msg;
1026
1027		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1028				       (u_int16_t *) &out_msg,
1029				       sizeof(out_msg)/2);
1030
1031		if ((out_msg.msg_type == MSG_EXTENDED)
1032		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1033		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1034
1035			/* Revert to Async */
1036			adv_set_syncrate(adv, /*struct cam_path */NULL,
1037					 tid_no, /*period*/0, /*offset*/0,
1038					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1039		}
1040		q_cntl &= ~QC_MSG_OUT;
1041		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1042	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1043		u_int8_t scsi_status;
1044		union ccb *ccb;
1045		u_int32_t cinfo_index;
1046
1047		scsi_status = adv_read_lram_8(adv, halt_q_addr
1048					      + ADV_SCSIQ_SCSI_STATUS);
1049		cinfo_index =
1050		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1051		ccb = adv->ccb_infos[cinfo_index].ccb;
1052		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1053		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1054		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1055		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1056			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1057			      /*queued_only*/TRUE);
1058		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1059		scsi_busy &= ~target_mask;
1060		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1061	} else {
1062		printf("Unhandled Halt Code %x\n", int_halt_code);
1063	}
1064	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1065}
1066
1067void
1068adv_sdtr_to_period_offset(struct adv_softc *adv,
1069			  u_int8_t sync_data, u_int8_t *period,
1070			  u_int8_t *offset, int tid)
1071{
1072	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1073	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1074		*period = *offset = 0;
1075	} else {
1076		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1077		*offset = sync_data & 0xF;
1078	}
1079}
1080
1081void
1082adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1083		 u_int tid, u_int period, u_int offset, u_int type)
1084{
1085	struct adv_target_transinfo* tinfo;
1086	u_int old_period;
1087	u_int old_offset;
1088	u_int8_t sdtr_data;
1089
1090	tinfo = &adv->tinfo[tid];
1091
1092	/* Filter our input */
1093	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1094					      &offset, tid);
1095
1096	old_period = tinfo->current.period;
1097	old_offset = tinfo->current.offset;
1098
1099	if ((type & ADV_TRANS_CUR) != 0
1100	 && ((old_period != period || old_offset != offset)
1101	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1102		int s;
1103		int halted;
1104
1105		s = splcam();
1106		halted = adv_is_chip_halted(adv);
1107		if (halted == 0)
1108			/* Must halt the chip first */
1109			adv_host_req_chip_halt(adv);
1110
1111		/* Update current hardware settings */
1112		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1113
1114		/*
1115		 * If a target can run in sync mode, we don't need
1116		 * to check it for sync problems.
1117		 */
1118		if (offset != 0)
1119			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1120
1121		if (halted == 0)
1122			/* Start the chip again */
1123			adv_start_chip(adv);
1124
1125		splx(s);
1126		tinfo->current.period = period;
1127		tinfo->current.offset = offset;
1128
1129		if (path != NULL) {
1130			/*
1131			 * Tell the SCSI layer about the
1132			 * new transfer parameters.
1133			 */
1134			struct	ccb_trans_settings neg;
1135
1136			neg.sync_period = period;
1137			neg.sync_offset = offset;
1138			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1139				  | CCB_TRANS_SYNC_OFFSET_VALID;
1140			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1141			xpt_async(AC_TRANSFER_NEG, path, &neg);
1142		}
1143	}
1144
1145	if ((type & ADV_TRANS_GOAL) != 0) {
1146		tinfo->goal.period = period;
1147		tinfo->goal.offset = offset;
1148	}
1149
1150	if ((type & ADV_TRANS_USER) != 0) {
1151		tinfo->user.period = period;
1152		tinfo->user.offset = offset;
1153	}
1154}
1155
1156u_int8_t
1157adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1158			  u_int *offset, int tid)
1159{
1160	u_int i;
1161	u_int dummy_offset;
1162	u_int dummy_period;
1163
1164	if (offset == NULL) {
1165		dummy_offset = 0;
1166		offset = &dummy_offset;
1167	}
1168
1169	if (period == NULL) {
1170		dummy_period = 0;
1171		period = &dummy_period;
1172	}
1173
1174#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1175
1176	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1177	if (*period != 0 && *offset != 0) {
1178		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1179			if (*period <= adv->sdtr_period_tbl[i]) {
1180				/*
1181				 * When responding to a target that requests
1182				 * sync, the requested  rate may fall between
1183				 * two rates that we can output, but still be
1184				 * a rate that we can receive.  Because of this,
1185				 * we want to respond to the target with
1186				 * the same rate that it sent to us even
1187				 * if the period we use to send data to it
1188				 * is lower.  Only lower the response period
1189				 * if we must.
1190				 */
1191				if (i == 0 /* Our maximum rate */)
1192					*period = adv->sdtr_period_tbl[0];
1193				return ((i << 4) | *offset);
1194			}
1195		}
1196	}
1197
1198	/* Must go async */
1199	*period = 0;
1200	*offset = 0;
1201	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1202		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1203	return (0);
1204}
1205
1206/* Internal Routines */
1207
1208static void
1209adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1210		       u_int16_t *buffer, int count)
1211{
1212	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1213	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1214}
1215
1216static void
1217adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1218			u_int16_t *buffer, int count)
1219{
1220	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1221	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1222}
1223
1224static void
1225adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1226		 u_int16_t set_value, int count)
1227{
1228	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1229	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1230			      set_value, count);
1231}
1232
1233static u_int32_t
1234adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1235{
1236	u_int32_t	sum;
1237	int		i;
1238
1239	sum = 0;
1240	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1241	for (i = 0; i < count; i++)
1242		sum += ADV_INW(adv, ADV_LRAM_DATA);
1243	return (sum);
1244}
1245
1246static int
1247adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1248			     u_int16_t value)
1249{
1250	int	retval;
1251
1252	retval = 0;
1253	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1254	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1255	DELAY(10000);
1256	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1257	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1258		retval = 1;
1259	return (retval);
1260}
1261
1262static u_int32_t
1263adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1264{
1265	u_int16_t           val_low, val_high;
1266
1267	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1268
1269#if BYTE_ORDER == BIG_ENDIAN
1270	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1271	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1272#else
1273	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1274	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1275#endif
1276
1277	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1278}
1279
1280static void
1281adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1282{
1283	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1284
1285#if BYTE_ORDER == BIG_ENDIAN
1286	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1287	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1288#else
1289	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1290	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1291#endif
1292}
1293
1294static void
1295adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1296			u_int32_t *buffer, int count)
1297{
1298	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1299	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1300}
1301
1302static u_int16_t
1303adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1304{
1305	u_int16_t read_wval;
1306	u_int8_t  cmd_reg;
1307
1308	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1309	DELAY(1000);
1310	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1311	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1312	DELAY(1000);
1313	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1314	DELAY(1000);
1315	return (read_wval);
1316}
1317
1318static u_int16_t
1319adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1320{
1321	u_int16_t	read_value;
1322
1323	read_value = adv_read_eeprom_16(adv, addr);
1324	if (read_value != value) {
1325		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1326		DELAY(1000);
1327
1328		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1329		DELAY(1000);
1330
1331		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1332		DELAY(20 * 1000);
1333
1334		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1335		DELAY(1000);
1336		read_value = adv_read_eeprom_16(adv, addr);
1337	}
1338	return (read_value);
1339}
1340
1341static int
1342adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1343{
1344	u_int8_t read_back;
1345	int	 retry;
1346
1347	retry = 0;
1348	while (1) {
1349		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1350		DELAY(1000);
1351		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1352		if (read_back == cmd_reg) {
1353			return (1);
1354		}
1355		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1356			return (0);
1357		}
1358	}
1359}
1360
1361static int
1362adv_set_eeprom_config_once(struct adv_softc *adv,
1363			   struct adv_eeprom_config *eeprom_config)
1364{
1365	int		n_error;
1366	u_int16_t	*wbuf;
1367	u_int16_t	sum;
1368	u_int8_t	s_addr;
1369	u_int8_t	cfg_beg;
1370	u_int8_t	cfg_end;
1371
1372	wbuf = (u_int16_t *)eeprom_config;
1373	n_error = 0;
1374	sum = 0;
1375	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1376		sum += *wbuf;
1377		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1378			n_error++;
1379		}
1380	}
1381	if (adv->type & ADV_VL) {
1382		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1383		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1384	} else {
1385		cfg_beg = ADV_EEPROM_CFG_BEG;
1386		cfg_end = ADV_EEPROM_MAX_ADDR;
1387	}
1388
1389	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1390		sum += *wbuf;
1391		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1392			n_error++;
1393		}
1394	}
1395	*wbuf = sum;
1396	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1397		n_error++;
1398	}
1399	wbuf = (u_int16_t *)eeprom_config;
1400	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1401		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1402			n_error++;
1403		}
1404	}
1405	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1406		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1407			n_error++;
1408		}
1409	}
1410	return (n_error);
1411}
1412
1413static u_int32_t
1414adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1415		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1416{
1417	u_int32_t chksum;
1418	u_int16_t mcode_lram_size;
1419	u_int16_t mcode_chksum;
1420
1421	mcode_lram_size = mcode_size >> 1;
1422	/* XXX Why zero the memory just before you write the whole thing?? */
1423	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1424	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1425
1426	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1427	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1428						   ((mcode_size - s_addr
1429						     - ADV_CODE_SEC_BEG) >> 1));
1430	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1431	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1432	return (chksum);
1433}
1434
1435static void
1436adv_reinit_lram(struct adv_softc *adv) {
1437	adv_init_lram(adv);
1438	adv_init_qlink_var(adv);
1439}
1440
1441static void
1442adv_init_lram(struct adv_softc *adv)
1443{
1444	u_int8_t  i;
1445	u_int16_t s_addr;
1446
1447	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1448			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1449
1450	i = ADV_MIN_ACTIVE_QNO;
1451	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1452
1453	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1454	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1455	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1456	i++;
1457	s_addr += ADV_QBLK_SIZE;
1458	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1459		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1460		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1461		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1462	}
1463
1464	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1465	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1466	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1467	i++;
1468	s_addr += ADV_QBLK_SIZE;
1469
1470	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1471		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1472		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1473		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1474	}
1475}
1476
1477static int
1478adv_init_microcode_var(struct adv_softc *adv)
1479{
1480	int	 i;
1481
1482	for (i = 0; i <= ADV_MAX_TID; i++) {
1483
1484		/* Start out async all around */
1485		adv_set_syncrate(adv, /*path*/NULL,
1486				 i, 0, 0,
1487				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1488	}
1489
1490	adv_init_qlink_var(adv);
1491
1492	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1493	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1494
1495	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1496
1497	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1498
1499	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1500	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1501		printf("adv%d: Unable to set program counter. Aborting.\n",
1502		       adv->unit);
1503		return (1);
1504	}
1505	return (0);
1506}
1507
1508static void
1509adv_init_qlink_var(struct adv_softc *adv)
1510{
1511	int	  i;
1512	u_int16_t lram_addr;
1513
1514	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1515	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1516
1517	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1518	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1519
1520	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1521			 (u_int8_t)((int) adv->max_openings + 1));
1522	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1523			 (u_int8_t)((int) adv->max_openings + 2));
1524
1525	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1526
1527	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1528	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1529	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1530	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1531	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1532	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1533
1534	lram_addr = ADV_QADR_BEG;
1535	for (i = 0; i < 32; i++, lram_addr += 2)
1536		adv_write_lram_16(adv, lram_addr, 0);
1537}
1538
1539static void
1540adv_disable_interrupt(struct adv_softc *adv)
1541{
1542	u_int16_t cfg;
1543
1544	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1545	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1546}
1547
1548static void
1549adv_enable_interrupt(struct adv_softc *adv)
1550{
1551	u_int16_t cfg;
1552
1553	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1554	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1555}
1556
1557static void
1558adv_toggle_irq_act(struct adv_softc *adv)
1559{
1560	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1561	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1562}
1563
1564void
1565adv_start_execution(struct adv_softc *adv)
1566{
1567	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1568		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1569	}
1570}
1571
1572int
1573adv_stop_chip(struct adv_softc *adv)
1574{
1575	u_int8_t cc_val;
1576
1577	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1578		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1579	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1580	adv_set_chip_ih(adv, ADV_INS_HALT);
1581	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1582	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1583		return (0);
1584	}
1585	return (1);
1586}
1587
1588static int
1589adv_host_req_chip_halt(struct adv_softc *adv)
1590{
1591	int	 count;
1592	u_int8_t saved_stop_code;
1593
1594	if (adv_is_chip_halted(adv))
1595		return (1);
1596
1597	count = 0;
1598	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1599	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1600			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1601	while (adv_is_chip_halted(adv) == 0
1602	    && count++ < 2000)
1603		;
1604
1605	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1606	return (count < 2000);
1607}
1608
1609static void
1610adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1611{
1612	adv_set_bank(adv, 1);
1613	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1614	adv_set_bank(adv, 0);
1615}
1616
1617#if UNUSED
1618static u_int8_t
1619adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1620{
1621	u_int8_t scsi_ctrl;
1622
1623	adv_set_bank(adv, 1);
1624	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1625	adv_set_bank(adv, 0);
1626	return (scsi_ctrl);
1627}
1628#endif
1629
1630/*
1631 * XXX Looks like more padding issues in this routine as well.
1632 *     There has to be a way to turn this into an insw.
1633 */
1634static void
1635adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1636	       u_int16_t *inbuf, int words)
1637{
1638	int	i;
1639
1640	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1641	for (i = 0; i < words; i++, inbuf++) {
1642		if (i == 5) {
1643			continue;
1644		}
1645		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1646	}
1647}
1648
1649static u_int
1650adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1651{
1652	u_int	  cur_used_qs;
1653	u_int	  cur_free_qs;
1654
1655	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1656
1657	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1658		cur_free_qs = adv->max_openings - cur_used_qs;
1659		return (cur_free_qs);
1660	}
1661	adv->openings_needed = n_qs;
1662	return (0);
1663}
1664
1665static u_int8_t
1666adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1667		      u_int8_t n_free_q)
1668{
1669	int i;
1670
1671	for (i = 0; i < n_free_q; i++) {
1672		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1673		if (free_q_head == ADV_QLINK_END)
1674			break;
1675	}
1676	return (free_q_head);
1677}
1678
1679static u_int8_t
1680adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1681{
1682	u_int16_t	q_addr;
1683	u_int8_t	next_qp;
1684	u_int8_t	q_status;
1685
1686	next_qp = ADV_QLINK_END;
1687	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1688	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1689
1690	if ((q_status & QS_READY) == 0)
1691		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1692
1693	return (next_qp);
1694}
1695
1696static int
1697adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1698		    u_int8_t n_q_required)
1699{
1700	u_int8_t	free_q_head;
1701	u_int8_t	next_qp;
1702	u_int8_t	tid_no;
1703	u_int8_t	target_ix;
1704	int		retval;
1705
1706	retval = 1;
1707	target_ix = scsiq->q2.target_ix;
1708	tid_no = ADV_TIX_TO_TID(target_ix);
1709	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1710	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1711	    != ADV_QLINK_END) {
1712		scsiq->q1.q_no = free_q_head;
1713
1714		/*
1715		 * Now that we know our Q number, point our sense
1716		 * buffer pointer to a bus dma mapped area where
1717		 * we can dma the data to.
1718		 */
1719		scsiq->q1.sense_addr = adv->sense_physbase
1720		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1721		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1722		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1723		adv->cur_active += n_q_required;
1724		retval = 0;
1725	}
1726	return (retval);
1727}
1728
1729
1730static void
1731adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1732			    u_int q_no)
1733{
1734	u_int8_t	sg_list_dwords;
1735	u_int8_t	sg_index, i;
1736	u_int8_t	sg_entry_cnt;
1737	u_int8_t	next_qp;
1738	u_int16_t	q_addr;
1739	struct		adv_sg_head *sg_head;
1740	struct		adv_sg_list_q scsi_sg_q;
1741
1742	sg_head = scsiq->sg_head;
1743
1744	if (sg_head) {
1745		sg_entry_cnt = sg_head->entry_cnt - 1;
1746#ifdef DIAGNOSTIC
1747		if (sg_entry_cnt == 0)
1748			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749			      "a SG list but only one element");
1750		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1751			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1752			      "a SG list but QC_SG_HEAD not set");
1753#endif
1754		q_addr = ADV_QNO_TO_QADDR(q_no);
1755		sg_index = 1;
1756		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1757		scsi_sg_q.sg_head_qp = q_no;
1758		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1759		for (i = 0; i < sg_head->queue_cnt; i++) {
1760			u_int8_t segs_this_q;
1761
1762			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1763				segs_this_q = ADV_SG_LIST_PER_Q;
1764			else {
1765				/* This will be the last segment then */
1766				segs_this_q = sg_entry_cnt;
1767				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1768			}
1769			scsi_sg_q.seq_no = i + 1;
1770			sg_list_dwords = segs_this_q << 1;
1771			if (i == 0) {
1772				scsi_sg_q.sg_list_cnt = segs_this_q;
1773				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1774			} else {
1775				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1776				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1777			}
1778			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1779			scsi_sg_q.q_no = next_qp;
1780			q_addr = ADV_QNO_TO_QADDR(next_qp);
1781
1782			adv_write_lram_16_multi(adv,
1783						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1784						(u_int16_t *)&scsi_sg_q,
1785						sizeof(scsi_sg_q) >> 1);
1786			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1787						(u_int32_t *)&sg_head->sg_list[sg_index],
1788						sg_list_dwords);
1789			sg_entry_cnt -= segs_this_q;
1790			sg_index += ADV_SG_LIST_PER_Q;
1791		}
1792	}
1793	adv_put_ready_queue(adv, scsiq, q_no);
1794}
1795
1796static void
1797adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1798		    u_int q_no)
1799{
1800	struct		adv_target_transinfo* tinfo;
1801	u_int		q_addr;
1802	u_int		tid_no;
1803
1804	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1805	tinfo = &adv->tinfo[tid_no];
1806	if ((tinfo->current.period != tinfo->goal.period)
1807	 || (tinfo->current.offset != tinfo->goal.offset)) {
1808
1809		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1810		scsiq->q1.cntl |= QC_MSG_OUT;
1811	}
1812	q_addr = ADV_QNO_TO_QADDR(q_no);
1813
1814	scsiq->q1.status = QS_FREE;
1815
1816	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1817				(u_int16_t *)scsiq->cdbptr,
1818				scsiq->q2.cdb_len >> 1);
1819
1820#if BYTE_ORDER == BIG_ENDIAN
1821	adv_adj_scsiq_endian(scsiq);
1822#endif
1823
1824	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1825		      (u_int16_t *) &scsiq->q1.cntl,
1826		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1827
1828#if CC_WRITE_IO_COUNT
1829	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1830			  adv->req_count);
1831#endif
1832
1833#if CC_CLEAR_DMA_REMAIN
1834
1835	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1836	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1837#endif
1838
1839	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1840			  (scsiq->q1.q_no << 8) | QS_READY);
1841}
1842
1843static void
1844adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1845	      u_int16_t *buffer, int words)
1846{
1847	int	i;
1848
1849	/*
1850	 * XXX This routine makes *gross* assumptions
1851	 * about padding in the data structures.
1852	 * Either the data structures should have explicit
1853	 * padding members added, or they should have padding
1854	 * turned off via compiler attributes depending on
1855	 * which yields better overall performance.  My hunch
1856	 * would be that turning off padding would be the
1857	 * faster approach as an outsw is much faster than
1858	 * this crude loop and accessing un-aligned data
1859	 * members isn't *that* expensive.  The other choice
1860	 * would be to modify the ASC script so that the
1861	 * the adv_scsiq_1 structure can be re-arranged so
1862	 * padding isn't required.
1863	 */
1864	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1865	for (i = 0; i < words; i++, buffer++) {
1866		if (i == 2 || i == 10) {
1867			continue;
1868		}
1869		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1870	}
1871}
1872
1873static void
1874adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1875		     u_int8_t q_cntl, target_bit_vector target_mask,
1876		     int tid_no)
1877{
1878	struct	ext_msg ext_msg;
1879
1880	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1881			       sizeof(ext_msg) >> 1);
1882	if ((ext_msg.msg_type == MSG_EXTENDED)
1883	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1884	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1885		union	  ccb *ccb;
1886		struct	  adv_target_transinfo* tinfo;
1887		u_int32_t cinfo_index;
1888		u_int	 period;
1889		u_int	 offset;
1890		int	 sdtr_accept;
1891		u_int8_t orig_offset;
1892
1893		cinfo_index =
1894		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1895		ccb = adv->ccb_infos[cinfo_index].ccb;
1896		tinfo = &adv->tinfo[tid_no];
1897		sdtr_accept = TRUE;
1898
1899		orig_offset = ext_msg.req_ack_offset;
1900		if (ext_msg.xfer_period < tinfo->goal.period) {
1901                	sdtr_accept = FALSE;
1902			ext_msg.xfer_period = tinfo->goal.period;
1903		}
1904
1905		/* Perform range checking */
1906		period = ext_msg.xfer_period;
1907		offset = ext_msg.req_ack_offset;
1908		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1909		ext_msg.xfer_period = period;
1910		ext_msg.req_ack_offset = offset;
1911
1912		/* Record our current sync settings */
1913		adv_set_syncrate(adv, ccb->ccb_h.path,
1914				 tid_no, ext_msg.xfer_period,
1915				 ext_msg.req_ack_offset,
1916				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1917
1918		/* Offset too high or large period forced async */
1919		if (orig_offset != ext_msg.req_ack_offset)
1920			sdtr_accept = FALSE;
1921
1922		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1923			/* Valid response to our requested negotiation */
1924			q_cntl &= ~QC_MSG_OUT;
1925		} else {
1926			/* Must Respond */
1927			q_cntl |= QC_MSG_OUT;
1928			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1929					ext_msg.req_ack_offset);
1930		}
1931
1932	} else if (ext_msg.msg_type == MSG_EXTENDED
1933		&& ext_msg.msg_req == MSG_EXT_WDTR
1934		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1935
1936		ext_msg.wdtr_width = 0;
1937		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1938					(u_int16_t *)&ext_msg,
1939					sizeof(ext_msg) >> 1);
1940		q_cntl |= QC_MSG_OUT;
1941        } else {
1942
1943		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1944		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1945					(u_int16_t *)&ext_msg,
1946					sizeof(ext_msg) >> 1);
1947		q_cntl |= QC_MSG_OUT;
1948        }
1949	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1950}
1951
1952static void
1953adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1954		u_int8_t sdtr_offset)
1955{
1956	struct	 ext_msg sdtr_buf;
1957
1958	sdtr_buf.msg_type = MSG_EXTENDED;
1959	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1960	sdtr_buf.msg_req = MSG_EXT_SDTR;
1961	sdtr_buf.xfer_period = sdtr_period;
1962	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1963	sdtr_buf.req_ack_offset = sdtr_offset;
1964	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1965				(u_int16_t *) &sdtr_buf,
1966				sizeof(sdtr_buf) / 2);
1967}
1968
1969int
1970adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1971	      u_int32_t status, int queued_only)
1972{
1973	u_int16_t q_addr;
1974	u_int8_t  q_no;
1975	struct adv_q_done_info scsiq_buf;
1976	struct adv_q_done_info *scsiq;
1977	u_int8_t  target_ix;
1978	int	  count;
1979
1980	scsiq = &scsiq_buf;
1981	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1982	count = 0;
1983	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1984		struct adv_ccb_info *ccb_info;
1985		q_addr = ADV_QNO_TO_QADDR(q_no);
1986
1987		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1988		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1989		if (((scsiq->q_status & QS_READY) != 0)
1990		 && ((scsiq->q_status & QS_ABORTED) == 0)
1991		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1992		 && (scsiq->d2.target_ix == target_ix)
1993		 && (queued_only == 0
1994		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1995		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1996			union ccb *aborted_ccb;
1997			struct adv_ccb_info *cinfo;
1998
1999			scsiq->q_status |= QS_ABORTED;
2000			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2001					 scsiq->q_status);
2002			aborted_ccb = ccb_info->ccb;
2003			/* Don't clobber earlier error codes */
2004			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2005			  == CAM_REQ_INPROG)
2006				aborted_ccb->ccb_h.status |= status;
2007			cinfo = (struct adv_ccb_info *)
2008			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2009			cinfo->state |= ACCB_ABORT_QUEUED;
2010			count++;
2011		}
2012	}
2013	return (count);
2014}
2015
2016int
2017adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2018{
2019	int count;
2020	int i;
2021	union ccb *ccb;
2022
2023	i = 200;
2024	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2025	    && i--)
2026		DELAY(1000);
2027	adv_reset_chip(adv, initiate_bus_reset);
2028	adv_reinit_lram(adv);
2029	for (i = 0; i <= ADV_MAX_TID; i++)
2030		adv_set_syncrate(adv, NULL, i, /*period*/0,
2031				 /*offset*/0, ADV_TRANS_CUR);
2032	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2033
2034	/* Tell the XPT layer that a bus reset occured */
2035	if (adv->path != NULL)
2036		xpt_async(AC_BUS_RESET, adv->path, NULL);
2037
2038	count = 0;
2039	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2040		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2041			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2042		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2043		count++;
2044	}
2045
2046	adv_start_chip(adv);
2047	return (count);
2048}
2049
2050static void
2051adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2052{
2053	int orig_id;
2054
2055    	adv_set_bank(adv, 1);
2056    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2057    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2058	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2059		adv_set_bank(adv, 0);
2060		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2061	}
2062    	adv_set_bank(adv, 1);
2063    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2064	adv_set_bank(adv, 0);
2065}
2066