advlib.c revision 111342
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: head/sys/dev/advansys/advlib.c 111342 2003-02-23 19:16:53Z obrien $
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/kernel.h>
48#include <sys/systm.h>
49
50#include <machine/bus_pio.h>
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <sys/bus.h>
54#include <sys/rman.h>
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_sim.h>
59#include <cam/cam_xpt_sim.h>
60
61#include <cam/scsi/scsi_all.h>
62#include <cam/scsi/scsi_message.h>
63#include <cam/scsi/scsi_da.h>
64#include <cam/scsi/scsi_cd.h>
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/pmap.h>
69
70#include <dev/advansys/advansys.h>
71#include <dev/advansys/advmcode.h>
72
73struct adv_quirk_entry {
74	struct scsi_inquiry_pattern inq_pat;
75	u_int8_t quirks;
76#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
77#define ADV_QUIRK_FIX_ASYN_XFER		0x02
78};
79
80static struct adv_quirk_entry adv_quirk_table[] =
81{
82	{
83		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85	},
86	{
87		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88		0
89	},
90	{
91		{
92		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93		  "TANDBERG", " TDC 36", "*"
94		},
95		0
96	},
97	{
98		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99		0
100	},
101	{
102		{
103		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104		  "*", "*", "*"
105		},
106		0
107	},
108	{
109		{
110		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111		  "*", "*", "*"
112		},
113		0
114	},
115	{
116		/* Default quirk entry */
117		{
118		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
120                },
121                ADV_QUIRK_FIX_ASYN_XFER,
122	}
123};
124
125/*
126 * Allowable periods in ns
127 */
128static u_int8_t adv_sdtr_period_tbl[] =
129{
130	25,
131	30,
132	35,
133	40,
134	50,
135	60,
136	70,
137	85
138};
139
140static u_int8_t adv_sdtr_period_tbl_ultra[] =
141{
142	12,
143	19,
144	25,
145	32,
146	38,
147	44,
148	50,
149	57,
150	63,
151	69,
152	75,
153	82,
154	88,
155	94,
156	100,
157	107
158};
159
160struct ext_msg {
161	u_int8_t msg_type;
162	u_int8_t msg_len;
163	u_int8_t msg_req;
164	union {
165		struct {
166			u_int8_t sdtr_xfer_period;
167			u_int8_t sdtr_req_ack_offset;
168		} sdtr;
169		struct {
170       			u_int8_t wdtr_width;
171		} wdtr;
172		struct {
173			u_int8_t mdp[4];
174		} mdp;
175	} u_ext_msg;
176	u_int8_t res;
177};
178
179#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
180#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
181#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
182#define	mdp_b3		u_ext_msg.mdp_b3
183#define	mdp_b2		u_ext_msg.mdp_b2
184#define	mdp_b1		u_ext_msg.mdp_b1
185#define	mdp_b0		u_ext_msg.mdp_b0
186
187/*
188 * Some of the early PCI adapters have problems with
189 * async transfers.  Instead use an offset of 1.
190 */
191#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193/* LRAM routines */
194static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195					u_int16_t *buffer, int count);
196static void	 adv_write_lram_16_multi(struct adv_softc *adv,
197					 u_int16_t s_addr, u_int16_t *buffer,
198					 int count);
199static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200				  u_int16_t set_value, int count);
201static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202				  int count);
203
204static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
205					      u_int16_t addr, u_int16_t value);
206static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210				   u_int32_t value);
211static void	 adv_write_lram_32_multi(struct adv_softc *adv,
212					 u_int16_t s_addr, u_int32_t *buffer,
213					 int count);
214
215/* EEPROM routines */
216static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218				     u_int16_t value);
219static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220					  u_int8_t cmd_reg);
221static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
222					    struct adv_eeprom_config *eeconfig);
223
224/* Initialization */
225static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226				    u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228static void	 adv_reinit_lram(struct adv_softc *adv);
229static void	 adv_init_lram(struct adv_softc *adv);
230static int	 adv_init_microcode_var(struct adv_softc *adv);
231static void	 adv_init_qlink_var(struct adv_softc *adv);
232
233/* Interrupts */
234static void	 adv_disable_interrupt(struct adv_softc *adv);
235static void	 adv_enable_interrupt(struct adv_softc *adv);
236static void	 adv_toggle_irq_act(struct adv_softc *adv);
237
238/* Chip Control */
239static int	 adv_host_req_chip_halt(struct adv_softc *adv);
240static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241#if UNUSED
242static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243#endif
244
245/* Queue handling and execution */
246static __inline int
247		 adv_sgcount_to_qcount(int sgcount);
248
249static __inline int
250adv_sgcount_to_qcount(int sgcount)
251{
252	int	n_sg_list_qs;
253
254	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256		n_sg_list_qs++;
257	return (n_sg_list_qs + 1);
258}
259
260static void	 adv_adj_endian_qdone_info(struct adv_q_done_info *);
261static void	 adv_adj_scsiq_endian(struct adv_scsi_q *);
262static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
263				u_int16_t *inbuf, int words);
264static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
265static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
266				       u_int8_t free_q_head, u_int8_t n_free_q);
267static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
268				      u_int8_t free_q_head);
269static int	 adv_send_scsi_queue(struct adv_softc *adv,
270				     struct adv_scsi_q *scsiq,
271				     u_int8_t n_q_required);
272static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
273					     struct adv_scsi_q *scsiq,
274					     u_int q_no);
275static void	 adv_put_ready_queue(struct adv_softc *adv,
276				     struct adv_scsi_q *scsiq, u_int q_no);
277static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
278			       u_int16_t *buffer, int words);
279
280/* Messages */
281static void	 adv_handle_extmsg_in(struct adv_softc *adv,
282				      u_int16_t halt_q_addr, u_int8_t q_cntl,
283				      target_bit_vector target_id,
284				      int tid);
285static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
286				 u_int8_t sdtr_offset);
287static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
288					u_int8_t sdtr_data);
289
290
291/* Exported functions first */
292
293void
294advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
295{
296	struct adv_softc *adv;
297
298	adv = (struct adv_softc *)callback_arg;
299	switch (code) {
300	case AC_FOUND_DEVICE:
301	{
302		struct ccb_getdev *cgd;
303		target_bit_vector target_mask;
304		int num_entries;
305        	caddr_t match;
306		struct adv_quirk_entry *entry;
307		struct adv_target_transinfo* tinfo;
308
309		cgd = (struct ccb_getdev *)arg;
310
311		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
312
313		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
314		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
315				       (caddr_t)adv_quirk_table,
316				       num_entries, sizeof(*adv_quirk_table),
317				       scsi_inquiry_match);
318
319		if (match == NULL)
320			panic("advasync: device didn't match wildcard entry!!");
321
322		entry = (struct adv_quirk_entry *)match;
323
324		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
325			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
326				adv->fix_asyn_xfer_always |= target_mask;
327			else
328				adv->fix_asyn_xfer_always &= ~target_mask;
329			/*
330			 * We start out life with all bits set and clear them
331			 * after we've determined that the fix isn't necessary.
332			 * It may well be that we've already cleared a target
333			 * before the full inquiry session completes, so don't
334			 * gratuitously set a target bit even if it has this
335			 * quirk.  But, if the quirk exonerates a device, clear
336			 * the bit now.
337			 */
338			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
339				adv->fix_asyn_xfer &= ~target_mask;
340		}
341		/*
342		 * Reset our sync settings now that we've determined
343		 * what quirks are in effect for the device.
344		 */
345		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
346		adv_set_syncrate(adv, cgd->ccb_h.path,
347				 cgd->ccb_h.target_id,
348				 tinfo->current.period,
349				 tinfo->current.offset,
350				 ADV_TRANS_CUR);
351		break;
352	}
353	case AC_LOST_DEVICE:
354	{
355		u_int target_mask;
356
357		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
358			target_mask = 0x01 << xpt_path_target_id(path);
359			adv->fix_asyn_xfer |= target_mask;
360		}
361
362		/*
363		 * Revert to async transfers
364		 * for the next device.
365		 */
366		adv_set_syncrate(adv, /*path*/NULL,
367				 xpt_path_target_id(path),
368				 /*period*/0,
369				 /*offset*/0,
370				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
371	}
372	default:
373		break;
374	}
375}
376
377void
378adv_set_bank(struct adv_softc *adv, u_int8_t bank)
379{
380	u_int8_t control;
381
382	/*
383	 * Start out with the bank reset to 0
384	 */
385	control = ADV_INB(adv, ADV_CHIP_CTRL)
386		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
387			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
388			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
389	if (bank == 1) {
390		control |= ADV_CC_BANK_ONE;
391	} else if (bank == 2) {
392		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
393	}
394	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
395}
396
397u_int8_t
398adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
399{
400	u_int8_t   byte_data;
401	u_int16_t  word_data;
402
403	/*
404	 * LRAM is accessed on 16bit boundaries.
405	 */
406	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
407	word_data = ADV_INW(adv, ADV_LRAM_DATA);
408	if (addr & 1) {
409#if BYTE_ORDER == BIG_ENDIAN
410		byte_data = (u_int8_t)(word_data & 0xFF);
411#else
412		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
413#endif
414	} else {
415#if BYTE_ORDER == BIG_ENDIAN
416		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
417#else
418		byte_data = (u_int8_t)(word_data & 0xFF);
419#endif
420	}
421	return (byte_data);
422}
423
424void
425adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
426{
427	u_int16_t word_data;
428
429	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
430	if (addr & 1) {
431		word_data &= 0x00FF;
432		word_data |= (((u_int8_t)value << 8) & 0xFF00);
433	} else {
434		word_data &= 0xFF00;
435		word_data |= ((u_int8_t)value & 0x00FF);
436	}
437	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
438}
439
440
441u_int16_t
442adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
443{
444	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
445	return (ADV_INW(adv, ADV_LRAM_DATA));
446}
447
448void
449adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
450{
451	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
452	ADV_OUTW(adv, ADV_LRAM_DATA, value);
453}
454
455/*
456 * Determine if there is a board at "iobase" by looking
457 * for the AdvanSys signatures.  Return 1 if a board is
458 * found, 0 otherwise.
459 */
460int
461adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
462{
463	u_int16_t signature;
464
465	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
466		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
467		if ((signature == ADV_1000_ID0W)
468		 || (signature == ADV_1000_ID0W_FIX))
469			return (1);
470	}
471	return (0);
472}
473
474void
475adv_lib_init(struct adv_softc *adv)
476{
477	if ((adv->type & ADV_ULTRA) != 0) {
478		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
479		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
480	} else {
481		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
482		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
483	}
484}
485
486u_int16_t
487adv_get_eeprom_config(struct adv_softc *adv, struct
488		      adv_eeprom_config  *eeprom_config)
489{
490	u_int16_t	sum;
491	u_int16_t	*wbuf;
492	u_int8_t	cfg_beg;
493	u_int8_t	cfg_end;
494	u_int8_t	s_addr;
495
496	wbuf = (u_int16_t *)eeprom_config;
497	sum = 0;
498
499	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
500		*wbuf = adv_read_eeprom_16(adv, s_addr);
501		sum += *wbuf;
502	}
503
504	if (adv->type & ADV_VL) {
505		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
506		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
507	} else {
508		cfg_beg = ADV_EEPROM_CFG_BEG;
509		cfg_end = ADV_EEPROM_MAX_ADDR;
510	}
511
512	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
513		*wbuf = adv_read_eeprom_16(adv, s_addr);
514		sum += *wbuf;
515#if ADV_DEBUG_EEPROM
516		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
517#endif
518	}
519	*wbuf = adv_read_eeprom_16(adv, s_addr);
520	return (sum);
521}
522
523int
524adv_set_eeprom_config(struct adv_softc *adv,
525		      struct adv_eeprom_config *eeprom_config)
526{
527	int	retry;
528
529	retry = 0;
530	while (1) {
531		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
532			break;
533		}
534		if (++retry > ADV_EEPROM_MAX_RETRY) {
535			break;
536		}
537	}
538	return (retry > ADV_EEPROM_MAX_RETRY);
539}
540
541int
542adv_reset_chip(struct adv_softc *adv, int reset_bus)
543{
544	adv_stop_chip(adv);
545	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
546				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
547	DELAY(60);
548
549	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
550	adv_set_chip_ih(adv, ADV_INS_HALT);
551
552	if (reset_bus)
553		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
554
555	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
556	if (reset_bus)
557		DELAY(200 * 1000);
558
559	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
560	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
561	return (adv_is_chip_halted(adv));
562}
563
564int
565adv_test_external_lram(struct adv_softc* adv)
566{
567	u_int16_t	q_addr;
568	u_int16_t	saved_value;
569	int		success;
570
571	success = 0;
572
573	q_addr = ADV_QNO_TO_QADDR(241);
574	saved_value = adv_read_lram_16(adv, q_addr);
575	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
576		success = 1;
577		adv_write_lram_16(adv, q_addr, saved_value);
578	}
579	return (success);
580}
581
582
583int
584adv_init_lram_and_mcode(struct adv_softc *adv)
585{
586	u_int32_t	retval;
587
588	adv_disable_interrupt(adv);
589
590	adv_init_lram(adv);
591
592	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
593				    adv_mcode_size);
594	if (retval != adv_mcode_chksum) {
595		printf("adv%d: Microcode download failed checksum!\n",
596		       adv->unit);
597		return (1);
598	}
599
600	if (adv_init_microcode_var(adv) != 0)
601		return (1);
602
603	adv_enable_interrupt(adv);
604	return (0);
605}
606
607u_int8_t
608adv_get_chip_irq(struct adv_softc *adv)
609{
610	u_int16_t	cfg_lsw;
611	u_int8_t	chip_irq;
612
613	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
614
615	if ((adv->type & ADV_VL) != 0) {
616		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
617		if ((chip_irq == 0) ||
618		    (chip_irq == 4) ||
619		    (chip_irq == 7)) {
620			return (0);
621		}
622		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
623	}
624	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
625	if (chip_irq == 3)
626		chip_irq += 2;
627	return (chip_irq + ADV_MIN_IRQ_NO);
628}
629
630u_int8_t
631adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
632{
633	u_int16_t	cfg_lsw;
634
635	if ((adv->type & ADV_VL) != 0) {
636		if (irq_no != 0) {
637			if ((irq_no < ADV_MIN_IRQ_NO)
638			 || (irq_no > ADV_MAX_IRQ_NO)) {
639				irq_no = 0;
640			} else {
641				irq_no -= ADV_MIN_IRQ_NO - 1;
642			}
643		}
644		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
645		cfg_lsw |= 0x0010;
646		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
647		adv_toggle_irq_act(adv);
648
649		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
650		cfg_lsw |= (irq_no & 0x07) << 2;
651		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
652		adv_toggle_irq_act(adv);
653	} else if ((adv->type & ADV_ISA) != 0) {
654		if (irq_no == 15)
655			irq_no -= 2;
656		irq_no -= ADV_MIN_IRQ_NO;
657		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
658		cfg_lsw |= (irq_no & 0x03) << 2;
659		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
660	}
661	return (adv_get_chip_irq(adv));
662}
663
664void
665adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
666{
667	u_int16_t cfg_lsw;
668
669	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
670	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
671		return;
672    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
673	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
674	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
675}
676
677int
678adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
679		       u_int32_t datalen)
680{
681	struct		adv_target_transinfo* tinfo;
682	u_int32_t	*p_data_addr;
683	u_int32_t	*p_data_bcount;
684	int		disable_syn_offset_one_fix;
685	int		retval;
686	u_int		n_q_required;
687	u_int32_t	addr;
688	u_int8_t	sg_entry_cnt;
689	u_int8_t	target_ix;
690	u_int8_t	sg_entry_cnt_minus_one;
691	u_int8_t	tid_no;
692
693	scsiq->q1.q_no = 0;
694	retval = 1;  /* Default to error case */
695	target_ix = scsiq->q2.target_ix;
696	tid_no = ADV_TIX_TO_TID(target_ix);
697	tinfo = &adv->tinfo[tid_no];
698
699	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
700		/* Renegotiate if appropriate. */
701		adv_set_syncrate(adv, /*struct cam_path */NULL,
702				 tid_no, /*period*/0, /*offset*/0,
703				 ADV_TRANS_CUR);
704		if (tinfo->current.period != tinfo->goal.period) {
705			adv_msgout_sdtr(adv, tinfo->goal.period,
706					tinfo->goal.offset);
707			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
708		}
709	}
710
711	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
712		sg_entry_cnt = scsiq->sg_head->entry_cnt;
713		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
714
715#ifdef DIAGNOSTIC
716		if (sg_entry_cnt <= 1)
717			panic("adv_execute_scsi_queue: Queue "
718			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
719
720		if (sg_entry_cnt > ADV_MAX_SG_LIST)
721			panic("adv_execute_scsi_queue: "
722			      "Queue with too many segs.");
723
724		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
725			int i;
726
727			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
728				addr = scsiq->sg_head->sg_list[i].addr +
729				       scsiq->sg_head->sg_list[i].bytes;
730
731				if ((addr & 0x0003) != 0)
732					panic("adv_execute_scsi_queue: SG "
733					      "with odd address or byte count");
734			}
735		}
736#endif
737		p_data_addr =
738		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
739		p_data_bcount =
740		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
741
742		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
743		scsiq->sg_head->queue_cnt = n_q_required - 1;
744	} else {
745		p_data_addr = &scsiq->q1.data_addr;
746		p_data_bcount = &scsiq->q1.data_cnt;
747		n_q_required = 1;
748	}
749
750	disable_syn_offset_one_fix = FALSE;
751
752	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
753	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
754
755		if (datalen != 0) {
756			if (datalen < 512) {
757				disable_syn_offset_one_fix = TRUE;
758			} else {
759				if (scsiq->cdbptr[0] == INQUIRY
760				 || scsiq->cdbptr[0] == REQUEST_SENSE
761				 || scsiq->cdbptr[0] == READ_CAPACITY
762				 || scsiq->cdbptr[0] == MODE_SELECT_6
763				 || scsiq->cdbptr[0] == MODE_SENSE_6
764				 || scsiq->cdbptr[0] == MODE_SENSE_10
765				 || scsiq->cdbptr[0] == MODE_SELECT_10
766				 || scsiq->cdbptr[0] == READ_TOC) {
767					disable_syn_offset_one_fix = TRUE;
768				}
769			}
770		}
771	}
772
773	if (disable_syn_offset_one_fix) {
774		scsiq->q2.tag_code &=
775		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
776		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
777				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
778	}
779
780	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
781	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
782		u_int8_t extra_bytes;
783
784		addr = *p_data_addr + *p_data_bcount;
785		extra_bytes = addr & 0x0003;
786		if (extra_bytes != 0
787		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
788		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
789			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
790			scsiq->q1.extra_bytes = extra_bytes;
791			*p_data_bcount -= extra_bytes;
792		}
793	}
794
795	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
796	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
797		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
798
799	return (retval);
800}
801
802
803u_int8_t
804adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
805		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
806{
807	u_int16_t val;
808	u_int8_t  sg_queue_cnt;
809
810	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
811		       (u_int16_t *)scsiq,
812		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
813
814#if BYTE_ORDER == BIG_ENDIAN
815	adv_adj_endian_qdone_info(scsiq);
816#endif
817
818	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
819	scsiq->q_status = val & 0xFF;
820	scsiq->q_no = (val >> 8) & 0XFF;
821
822	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
823	scsiq->cntl = val & 0xFF;
824	sg_queue_cnt = (val >> 8) & 0xFF;
825
826	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
827	scsiq->sense_len = val & 0xFF;
828	scsiq->extra_bytes = (val >> 8) & 0xFF;
829
830	/*
831	 * Due to a bug in accessing LRAM on the 940UA, the residual
832	 * is split into separate high and low 16bit quantities.
833	 */
834	scsiq->remain_bytes =
835	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
836	scsiq->remain_bytes |=
837	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
838
839	/*
840	 * XXX Is this just a safeguard or will the counter really
841	 * have bogus upper bits?
842	 */
843	scsiq->remain_bytes &= max_dma_count;
844
845	return (sg_queue_cnt);
846}
847
848int
849adv_start_chip(struct adv_softc *adv)
850{
851	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
852	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
853		return (0);
854	return (1);
855}
856
857int
858adv_stop_execution(struct adv_softc *adv)
859{
860	int count;
861
862	count = 0;
863	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
864		adv_write_lram_8(adv, ADV_STOP_CODE_B,
865				 ADV_STOP_REQ_RISC_STOP);
866		do {
867			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
868				ADV_STOP_ACK_RISC_STOP) {
869				return (1);
870			}
871			DELAY(1000);
872		} while (count++ < 20);
873	}
874	return (0);
875}
876
877int
878adv_is_chip_halted(struct adv_softc *adv)
879{
880	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
881		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
882			return (1);
883		}
884	}
885	return (0);
886}
887
888/*
889 * XXX The numeric constants and the loops in this routine
890 * need to be documented.
891 */
892void
893adv_ack_interrupt(struct adv_softc *adv)
894{
895	u_int8_t	host_flag;
896	u_int8_t	risc_flag;
897	int		loop;
898
899	loop = 0;
900	do {
901		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
902		if (loop++ > 0x7FFF) {
903			break;
904		}
905	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
906
907	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
908	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
909			 host_flag | ADV_HOST_FLAG_ACK_INT);
910
911	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
912	loop = 0;
913	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
914		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
915		if (loop++ > 3) {
916			break;
917		}
918	}
919
920	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
921}
922
923/*
924 * Handle all conditions that may halt the chip waiting
925 * for us to intervene.
926 */
927void
928adv_isr_chip_halted(struct adv_softc *adv)
929{
930	u_int16_t	  int_halt_code;
931	u_int16_t	  halt_q_addr;
932	target_bit_vector target_mask;
933	target_bit_vector scsi_busy;
934	u_int8_t	  halt_qp;
935	u_int8_t	  target_ix;
936	u_int8_t	  q_cntl;
937	u_int8_t	  tid_no;
938
939	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
940	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
941	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
942	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
943	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
944	tid_no = ADV_TIX_TO_TID(target_ix);
945	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
946	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
947		/*
948		 * Temporarily disable the async fix by removing
949		 * this target from the list of affected targets,
950		 * setting our async rate, and then putting us
951		 * back into the mask.
952		 */
953		adv->fix_asyn_xfer &= ~target_mask;
954		adv_set_syncrate(adv, /*struct cam_path */NULL,
955				 tid_no, /*period*/0, /*offset*/0,
956				 ADV_TRANS_ACTIVE);
957		adv->fix_asyn_xfer |= target_mask;
958	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
959		adv_set_syncrate(adv, /*struct cam_path */NULL,
960				 tid_no, /*period*/0, /*offset*/0,
961				 ADV_TRANS_ACTIVE);
962	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
963		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
964				     target_mask, tid_no);
965	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
966		struct	  adv_target_transinfo* tinfo;
967		union	  ccb *ccb;
968		u_int32_t cinfo_index;
969		u_int8_t  tag_code;
970		u_int8_t  q_status;
971
972		tinfo = &adv->tinfo[tid_no];
973		q_cntl |= QC_REQ_SENSE;
974
975		/* Renegotiate if appropriate. */
976		adv_set_syncrate(adv, /*struct cam_path */NULL,
977				 tid_no, /*period*/0, /*offset*/0,
978				 ADV_TRANS_CUR);
979		if (tinfo->current.period != tinfo->goal.period) {
980			adv_msgout_sdtr(adv, tinfo->goal.period,
981					tinfo->goal.offset);
982			q_cntl |= QC_MSG_OUT;
983		}
984		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
985
986		/* Don't tag request sense commands */
987		tag_code = adv_read_lram_8(adv,
988					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
989		tag_code &=
990		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
991
992		if ((adv->fix_asyn_xfer & target_mask) != 0
993		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
994			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
995				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
996		}
997		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
998				 tag_code);
999		q_status = adv_read_lram_8(adv,
1000					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1001		q_status |= (QS_READY | QS_BUSY);
1002		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1003				 q_status);
1004		/*
1005		 * Freeze the devq until we can handle the sense condition.
1006		 */
1007		cinfo_index =
1008		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1009		ccb = adv->ccb_infos[cinfo_index].ccb;
1010		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1011		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1012		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1013			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1014			      /*queued_only*/TRUE);
1015		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1016		scsi_busy &= ~target_mask;
1017		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1018		/*
1019		 * Ensure we have enough time to actually
1020		 * retrieve the sense.
1021		 */
1022		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1023		ccb->ccb_h.timeout_ch =
1024		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1025	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1026		struct	ext_msg out_msg;
1027
1028		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1029				       (u_int16_t *) &out_msg,
1030				       sizeof(out_msg)/2);
1031
1032		if ((out_msg.msg_type == MSG_EXTENDED)
1033		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1034		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1035
1036			/* Revert to Async */
1037			adv_set_syncrate(adv, /*struct cam_path */NULL,
1038					 tid_no, /*period*/0, /*offset*/0,
1039					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1040		}
1041		q_cntl &= ~QC_MSG_OUT;
1042		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1043	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1044		u_int8_t scsi_status;
1045		union ccb *ccb;
1046		u_int32_t cinfo_index;
1047
1048		scsi_status = adv_read_lram_8(adv, halt_q_addr
1049					      + ADV_SCSIQ_SCSI_STATUS);
1050		cinfo_index =
1051		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1052		ccb = adv->ccb_infos[cinfo_index].ccb;
1053		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1054		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1055		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1056		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1057			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1058			      /*queued_only*/TRUE);
1059		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1060		scsi_busy &= ~target_mask;
1061		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1062	} else {
1063		printf("Unhandled Halt Code %x\n", int_halt_code);
1064	}
1065	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1066}
1067
1068void
1069adv_sdtr_to_period_offset(struct adv_softc *adv,
1070			  u_int8_t sync_data, u_int8_t *period,
1071			  u_int8_t *offset, int tid)
1072{
1073	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1074	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1075		*period = *offset = 0;
1076	} else {
1077		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1078		*offset = sync_data & 0xF;
1079	}
1080}
1081
1082void
1083adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1084		 u_int tid, u_int period, u_int offset, u_int type)
1085{
1086	struct adv_target_transinfo* tinfo;
1087	u_int old_period;
1088	u_int old_offset;
1089	u_int8_t sdtr_data;
1090
1091	tinfo = &adv->tinfo[tid];
1092
1093	/* Filter our input */
1094	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1095					      &offset, tid);
1096
1097	old_period = tinfo->current.period;
1098	old_offset = tinfo->current.offset;
1099
1100	if ((type & ADV_TRANS_CUR) != 0
1101	 && ((old_period != period || old_offset != offset)
1102	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1103		int s;
1104		int halted;
1105
1106		s = splcam();
1107		halted = adv_is_chip_halted(adv);
1108		if (halted == 0)
1109			/* Must halt the chip first */
1110			adv_host_req_chip_halt(adv);
1111
1112		/* Update current hardware settings */
1113		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1114
1115		/*
1116		 * If a target can run in sync mode, we don't need
1117		 * to check it for sync problems.
1118		 */
1119		if (offset != 0)
1120			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1121
1122		if (halted == 0)
1123			/* Start the chip again */
1124			adv_start_chip(adv);
1125
1126		splx(s);
1127		tinfo->current.period = period;
1128		tinfo->current.offset = offset;
1129
1130		if (path != NULL) {
1131			/*
1132			 * Tell the SCSI layer about the
1133			 * new transfer parameters.
1134			 */
1135			struct	ccb_trans_settings neg;
1136
1137			neg.sync_period = period;
1138			neg.sync_offset = offset;
1139			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1140				  | CCB_TRANS_SYNC_OFFSET_VALID;
1141			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1142			xpt_async(AC_TRANSFER_NEG, path, &neg);
1143		}
1144	}
1145
1146	if ((type & ADV_TRANS_GOAL) != 0) {
1147		tinfo->goal.period = period;
1148		tinfo->goal.offset = offset;
1149	}
1150
1151	if ((type & ADV_TRANS_USER) != 0) {
1152		tinfo->user.period = period;
1153		tinfo->user.offset = offset;
1154	}
1155}
1156
1157u_int8_t
1158adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1159			  u_int *offset, int tid)
1160{
1161	u_int i;
1162	u_int dummy_offset;
1163	u_int dummy_period;
1164
1165	if (offset == NULL) {
1166		dummy_offset = 0;
1167		offset = &dummy_offset;
1168	}
1169
1170	if (period == NULL) {
1171		dummy_period = 0;
1172		period = &dummy_period;
1173	}
1174
1175	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1176	if (*period != 0 && *offset != 0) {
1177		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1178			if (*period <= adv->sdtr_period_tbl[i]) {
1179				/*
1180				 * When responding to a target that requests
1181				 * sync, the requested  rate may fall between
1182				 * two rates that we can output, but still be
1183				 * a rate that we can receive.  Because of this,
1184				 * we want to respond to the target with
1185				 * the same rate that it sent to us even
1186				 * if the period we use to send data to it
1187				 * is lower.  Only lower the response period
1188				 * if we must.
1189				 */
1190				if (i == 0 /* Our maximum rate */)
1191					*period = adv->sdtr_period_tbl[0];
1192				return ((i << 4) | *offset);
1193			}
1194		}
1195	}
1196
1197	/* Must go async */
1198	*period = 0;
1199	*offset = 0;
1200	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1201		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1202	return (0);
1203}
1204
1205/* Internal Routines */
1206
1207static void
1208adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1209		       u_int16_t *buffer, int count)
1210{
1211	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1212	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1213}
1214
1215static void
1216adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1217			u_int16_t *buffer, int count)
1218{
1219	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1220	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1221}
1222
1223static void
1224adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1225		 u_int16_t set_value, int count)
1226{
1227	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1229			      set_value, count);
1230}
1231
1232static u_int32_t
1233adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1234{
1235	u_int32_t	sum;
1236	int		i;
1237
1238	sum = 0;
1239	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1240	for (i = 0; i < count; i++)
1241		sum += ADV_INW(adv, ADV_LRAM_DATA);
1242	return (sum);
1243}
1244
1245static int
1246adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1247			     u_int16_t value)
1248{
1249	int	retval;
1250
1251	retval = 0;
1252	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1253	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1254	DELAY(10000);
1255	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1256	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1257		retval = 1;
1258	return (retval);
1259}
1260
1261static u_int32_t
1262adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1263{
1264	u_int16_t           val_low, val_high;
1265
1266	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1267
1268#if BYTE_ORDER == BIG_ENDIAN
1269	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1270	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271#else
1272	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1273	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1274#endif
1275
1276	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1277}
1278
1279static void
1280adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1281{
1282	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1283
1284#if BYTE_ORDER == BIG_ENDIAN
1285	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1286	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287#else
1288	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1289	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1290#endif
1291}
1292
1293static void
1294adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1295			u_int32_t *buffer, int count)
1296{
1297	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1298	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1299}
1300
1301static u_int16_t
1302adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1303{
1304	u_int16_t read_wval;
1305	u_int8_t  cmd_reg;
1306
1307	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1308	DELAY(1000);
1309	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1310	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1311	DELAY(1000);
1312	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1313	DELAY(1000);
1314	return (read_wval);
1315}
1316
1317static u_int16_t
1318adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1319{
1320	u_int16_t	read_value;
1321
1322	read_value = adv_read_eeprom_16(adv, addr);
1323	if (read_value != value) {
1324		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1325		DELAY(1000);
1326
1327		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1328		DELAY(1000);
1329
1330		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1331		DELAY(20 * 1000);
1332
1333		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1334		DELAY(1000);
1335		read_value = adv_read_eeprom_16(adv, addr);
1336	}
1337	return (read_value);
1338}
1339
1340static int
1341adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1342{
1343	u_int8_t read_back;
1344	int	 retry;
1345
1346	retry = 0;
1347	while (1) {
1348		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1349		DELAY(1000);
1350		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1351		if (read_back == cmd_reg) {
1352			return (1);
1353		}
1354		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1355			return (0);
1356		}
1357	}
1358}
1359
1360static int
1361adv_set_eeprom_config_once(struct adv_softc *adv,
1362			   struct adv_eeprom_config *eeprom_config)
1363{
1364	int		n_error;
1365	u_int16_t	*wbuf;
1366	u_int16_t	sum;
1367	u_int8_t	s_addr;
1368	u_int8_t	cfg_beg;
1369	u_int8_t	cfg_end;
1370
1371	wbuf = (u_int16_t *)eeprom_config;
1372	n_error = 0;
1373	sum = 0;
1374	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1375		sum += *wbuf;
1376		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1377			n_error++;
1378		}
1379	}
1380	if (adv->type & ADV_VL) {
1381		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1382		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1383	} else {
1384		cfg_beg = ADV_EEPROM_CFG_BEG;
1385		cfg_end = ADV_EEPROM_MAX_ADDR;
1386	}
1387
1388	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1389		sum += *wbuf;
1390		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1391			n_error++;
1392		}
1393	}
1394	*wbuf = sum;
1395	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1396		n_error++;
1397	}
1398	wbuf = (u_int16_t *)eeprom_config;
1399	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1400		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1401			n_error++;
1402		}
1403	}
1404	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1405		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1406			n_error++;
1407		}
1408	}
1409	return (n_error);
1410}
1411
1412static u_int32_t
1413adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1414		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1415{
1416	u_int32_t chksum;
1417	u_int16_t mcode_lram_size;
1418	u_int16_t mcode_chksum;
1419
1420	mcode_lram_size = mcode_size >> 1;
1421	/* XXX Why zero the memory just before you write the whole thing?? */
1422	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1423	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1424
1425	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1426	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1427						   ((mcode_size - s_addr
1428						     - ADV_CODE_SEC_BEG) >> 1));
1429	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1430	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1431	return (chksum);
1432}
1433
1434static void
1435adv_reinit_lram(struct adv_softc *adv) {
1436	adv_init_lram(adv);
1437	adv_init_qlink_var(adv);
1438}
1439
1440static void
1441adv_init_lram(struct adv_softc *adv)
1442{
1443	u_int8_t  i;
1444	u_int16_t s_addr;
1445
1446	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1447			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1448
1449	i = ADV_MIN_ACTIVE_QNO;
1450	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1451
1452	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1453	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1454	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1455	i++;
1456	s_addr += ADV_QBLK_SIZE;
1457	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1458		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1459		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1460		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1461	}
1462
1463	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1464	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1465	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1466	i++;
1467	s_addr += ADV_QBLK_SIZE;
1468
1469	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1470		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1471		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1472		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1473	}
1474}
1475
1476static int
1477adv_init_microcode_var(struct adv_softc *adv)
1478{
1479	int	 i;
1480
1481	for (i = 0; i <= ADV_MAX_TID; i++) {
1482
1483		/* Start out async all around */
1484		adv_set_syncrate(adv, /*path*/NULL,
1485				 i, 0, 0,
1486				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1487	}
1488
1489	adv_init_qlink_var(adv);
1490
1491	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1492	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1493
1494	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1495
1496	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1497
1498	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1499	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1500		printf("adv%d: Unable to set program counter. Aborting.\n",
1501		       adv->unit);
1502		return (1);
1503	}
1504	return (0);
1505}
1506
1507static void
1508adv_init_qlink_var(struct adv_softc *adv)
1509{
1510	int	  i;
1511	u_int16_t lram_addr;
1512
1513	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1514	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1515
1516	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1517	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1518
1519	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1520			 (u_int8_t)((int) adv->max_openings + 1));
1521	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1522			 (u_int8_t)((int) adv->max_openings + 2));
1523
1524	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1525
1526	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1527	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1528	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1529	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1530	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1531	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1532
1533	lram_addr = ADV_QADR_BEG;
1534	for (i = 0; i < 32; i++, lram_addr += 2)
1535		adv_write_lram_16(adv, lram_addr, 0);
1536}
1537
1538static void
1539adv_disable_interrupt(struct adv_softc *adv)
1540{
1541	u_int16_t cfg;
1542
1543	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1544	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1545}
1546
1547static void
1548adv_enable_interrupt(struct adv_softc *adv)
1549{
1550	u_int16_t cfg;
1551
1552	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1553	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1554}
1555
1556static void
1557adv_toggle_irq_act(struct adv_softc *adv)
1558{
1559	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1560	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1561}
1562
1563void
1564adv_start_execution(struct adv_softc *adv)
1565{
1566	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1567		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1568	}
1569}
1570
1571int
1572adv_stop_chip(struct adv_softc *adv)
1573{
1574	u_int8_t cc_val;
1575
1576	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1577		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1578	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1579	adv_set_chip_ih(adv, ADV_INS_HALT);
1580	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1581	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1582		return (0);
1583	}
1584	return (1);
1585}
1586
1587static int
1588adv_host_req_chip_halt(struct adv_softc *adv)
1589{
1590	int	 count;
1591	u_int8_t saved_stop_code;
1592
1593	if (adv_is_chip_halted(adv))
1594		return (1);
1595
1596	count = 0;
1597	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1598	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1599			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1600	while (adv_is_chip_halted(adv) == 0
1601	    && count++ < 2000)
1602		;
1603
1604	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1605	return (count < 2000);
1606}
1607
1608static void
1609adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1610{
1611	adv_set_bank(adv, 1);
1612	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1613	adv_set_bank(adv, 0);
1614}
1615
1616#if UNUSED
1617static u_int8_t
1618adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1619{
1620	u_int8_t scsi_ctrl;
1621
1622	adv_set_bank(adv, 1);
1623	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1624	adv_set_bank(adv, 0);
1625	return (scsi_ctrl);
1626}
1627#endif
1628
1629/*
1630 * XXX Looks like more padding issues in this routine as well.
1631 *     There has to be a way to turn this into an insw.
1632 */
1633static void
1634adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1635	       u_int16_t *inbuf, int words)
1636{
1637	int	i;
1638
1639	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1640	for (i = 0; i < words; i++, inbuf++) {
1641		if (i == 5) {
1642			continue;
1643		}
1644		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1645	}
1646}
1647
1648static u_int
1649adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1650{
1651	u_int	  cur_used_qs;
1652	u_int	  cur_free_qs;
1653
1654	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1655
1656	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1657		cur_free_qs = adv->max_openings - cur_used_qs;
1658		return (cur_free_qs);
1659	}
1660	adv->openings_needed = n_qs;
1661	return (0);
1662}
1663
1664static u_int8_t
1665adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1666		      u_int8_t n_free_q)
1667{
1668	int i;
1669
1670	for (i = 0; i < n_free_q; i++) {
1671		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1672		if (free_q_head == ADV_QLINK_END)
1673			break;
1674	}
1675	return (free_q_head);
1676}
1677
1678static u_int8_t
1679adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1680{
1681	u_int16_t	q_addr;
1682	u_int8_t	next_qp;
1683	u_int8_t	q_status;
1684
1685	next_qp = ADV_QLINK_END;
1686	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1687	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1688
1689	if ((q_status & QS_READY) == 0)
1690		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1691
1692	return (next_qp);
1693}
1694
1695static int
1696adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1697		    u_int8_t n_q_required)
1698{
1699	u_int8_t	free_q_head;
1700	u_int8_t	next_qp;
1701	u_int8_t	tid_no;
1702	u_int8_t	target_ix;
1703	int		retval;
1704
1705	retval = 1;
1706	target_ix = scsiq->q2.target_ix;
1707	tid_no = ADV_TIX_TO_TID(target_ix);
1708	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1709	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1710	    != ADV_QLINK_END) {
1711		scsiq->q1.q_no = free_q_head;
1712
1713		/*
1714		 * Now that we know our Q number, point our sense
1715		 * buffer pointer to a bus dma mapped area where
1716		 * we can dma the data to.
1717		 */
1718		scsiq->q1.sense_addr = adv->sense_physbase
1719		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1720		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1721		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1722		adv->cur_active += n_q_required;
1723		retval = 0;
1724	}
1725	return (retval);
1726}
1727
1728
1729static void
1730adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1731			    u_int q_no)
1732{
1733	u_int8_t	sg_list_dwords;
1734	u_int8_t	sg_index, i;
1735	u_int8_t	sg_entry_cnt;
1736	u_int8_t	next_qp;
1737	u_int16_t	q_addr;
1738	struct		adv_sg_head *sg_head;
1739	struct		adv_sg_list_q scsi_sg_q;
1740
1741	sg_head = scsiq->sg_head;
1742
1743	if (sg_head) {
1744		sg_entry_cnt = sg_head->entry_cnt - 1;
1745#ifdef DIAGNOSTIC
1746		if (sg_entry_cnt == 0)
1747			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1748			      "a SG list but only one element");
1749		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1750			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1751			      "a SG list but QC_SG_HEAD not set");
1752#endif
1753		q_addr = ADV_QNO_TO_QADDR(q_no);
1754		sg_index = 1;
1755		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1756		scsi_sg_q.sg_head_qp = q_no;
1757		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1758		for (i = 0; i < sg_head->queue_cnt; i++) {
1759			u_int8_t segs_this_q;
1760
1761			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1762				segs_this_q = ADV_SG_LIST_PER_Q;
1763			else {
1764				/* This will be the last segment then */
1765				segs_this_q = sg_entry_cnt;
1766				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1767			}
1768			scsi_sg_q.seq_no = i + 1;
1769			sg_list_dwords = segs_this_q << 1;
1770			if (i == 0) {
1771				scsi_sg_q.sg_list_cnt = segs_this_q;
1772				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1773			} else {
1774				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1775				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1776			}
1777			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1778			scsi_sg_q.q_no = next_qp;
1779			q_addr = ADV_QNO_TO_QADDR(next_qp);
1780
1781			adv_write_lram_16_multi(adv,
1782						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1783						(u_int16_t *)&scsi_sg_q,
1784						sizeof(scsi_sg_q) >> 1);
1785			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1786						(u_int32_t *)&sg_head->sg_list[sg_index],
1787						sg_list_dwords);
1788			sg_entry_cnt -= segs_this_q;
1789			sg_index += ADV_SG_LIST_PER_Q;
1790		}
1791	}
1792	adv_put_ready_queue(adv, scsiq, q_no);
1793}
1794
1795static void
1796adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1797		    u_int q_no)
1798{
1799	struct		adv_target_transinfo* tinfo;
1800	u_int		q_addr;
1801	u_int		tid_no;
1802
1803	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1804	tinfo = &adv->tinfo[tid_no];
1805	if ((tinfo->current.period != tinfo->goal.period)
1806	 || (tinfo->current.offset != tinfo->goal.offset)) {
1807
1808		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1809		scsiq->q1.cntl |= QC_MSG_OUT;
1810	}
1811	q_addr = ADV_QNO_TO_QADDR(q_no);
1812
1813	scsiq->q1.status = QS_FREE;
1814
1815	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1816				(u_int16_t *)scsiq->cdbptr,
1817				scsiq->q2.cdb_len >> 1);
1818
1819#if BYTE_ORDER == BIG_ENDIAN
1820	adv_adj_scsiq_endian(scsiq);
1821#endif
1822
1823	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1824		      (u_int16_t *) &scsiq->q1.cntl,
1825		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1826
1827#if CC_WRITE_IO_COUNT
1828	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1829			  adv->req_count);
1830#endif
1831
1832#if CC_CLEAR_DMA_REMAIN
1833
1834	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1835	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1836#endif
1837
1838	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1839			  (scsiq->q1.q_no << 8) | QS_READY);
1840}
1841
1842static void
1843adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1844	      u_int16_t *buffer, int words)
1845{
1846	int	i;
1847
1848	/*
1849	 * XXX This routine makes *gross* assumptions
1850	 * about padding in the data structures.
1851	 * Either the data structures should have explicit
1852	 * padding members added, or they should have padding
1853	 * turned off via compiler attributes depending on
1854	 * which yields better overall performance.  My hunch
1855	 * would be that turning off padding would be the
1856	 * faster approach as an outsw is much faster than
1857	 * this crude loop and accessing un-aligned data
1858	 * members isn't *that* expensive.  The other choice
1859	 * would be to modify the ASC script so that the
1860	 * the adv_scsiq_1 structure can be re-arranged so
1861	 * padding isn't required.
1862	 */
1863	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1864	for (i = 0; i < words; i++, buffer++) {
1865		if (i == 2 || i == 10) {
1866			continue;
1867		}
1868		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1869	}
1870}
1871
1872#if BYTE_ORDER == BIG_ENDIAN
1873void
1874adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1875{
1876
1877	panic("adv(4) not supported on big-endian machines.\n");
1878}
1879
1880void
1881adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1882{
1883
1884	panic("adv(4) not supported on big-endian machines.\n");
1885}
1886#endif
1887
1888static void
1889adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1890		     u_int8_t q_cntl, target_bit_vector target_mask,
1891		     int tid_no)
1892{
1893	struct	ext_msg ext_msg;
1894
1895	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1896			       sizeof(ext_msg) >> 1);
1897	if ((ext_msg.msg_type == MSG_EXTENDED)
1898	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1899	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1900		union	  ccb *ccb;
1901		struct	  adv_target_transinfo* tinfo;
1902		u_int32_t cinfo_index;
1903		u_int	 period;
1904		u_int	 offset;
1905		int	 sdtr_accept;
1906		u_int8_t orig_offset;
1907
1908		cinfo_index =
1909		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1910		ccb = adv->ccb_infos[cinfo_index].ccb;
1911		tinfo = &adv->tinfo[tid_no];
1912		sdtr_accept = TRUE;
1913
1914		orig_offset = ext_msg.req_ack_offset;
1915		if (ext_msg.xfer_period < tinfo->goal.period) {
1916                	sdtr_accept = FALSE;
1917			ext_msg.xfer_period = tinfo->goal.period;
1918		}
1919
1920		/* Perform range checking */
1921		period = ext_msg.xfer_period;
1922		offset = ext_msg.req_ack_offset;
1923		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1924		ext_msg.xfer_period = period;
1925		ext_msg.req_ack_offset = offset;
1926
1927		/* Record our current sync settings */
1928		adv_set_syncrate(adv, ccb->ccb_h.path,
1929				 tid_no, ext_msg.xfer_period,
1930				 ext_msg.req_ack_offset,
1931				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1932
1933		/* Offset too high or large period forced async */
1934		if (orig_offset != ext_msg.req_ack_offset)
1935			sdtr_accept = FALSE;
1936
1937		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1938			/* Valid response to our requested negotiation */
1939			q_cntl &= ~QC_MSG_OUT;
1940		} else {
1941			/* Must Respond */
1942			q_cntl |= QC_MSG_OUT;
1943			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1944					ext_msg.req_ack_offset);
1945		}
1946
1947	} else if (ext_msg.msg_type == MSG_EXTENDED
1948		&& ext_msg.msg_req == MSG_EXT_WDTR
1949		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1950
1951		ext_msg.wdtr_width = 0;
1952		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1953					(u_int16_t *)&ext_msg,
1954					sizeof(ext_msg) >> 1);
1955		q_cntl |= QC_MSG_OUT;
1956        } else {
1957
1958		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1959		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1960					(u_int16_t *)&ext_msg,
1961					sizeof(ext_msg) >> 1);
1962		q_cntl |= QC_MSG_OUT;
1963        }
1964	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1965}
1966
1967static void
1968adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1969		u_int8_t sdtr_offset)
1970{
1971	struct	 ext_msg sdtr_buf;
1972
1973	sdtr_buf.msg_type = MSG_EXTENDED;
1974	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1975	sdtr_buf.msg_req = MSG_EXT_SDTR;
1976	sdtr_buf.xfer_period = sdtr_period;
1977	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1978	sdtr_buf.req_ack_offset = sdtr_offset;
1979	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1980				(u_int16_t *) &sdtr_buf,
1981				sizeof(sdtr_buf) / 2);
1982}
1983
1984int
1985adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1986	      u_int32_t status, int queued_only)
1987{
1988	u_int16_t q_addr;
1989	u_int8_t  q_no;
1990	struct adv_q_done_info scsiq_buf;
1991	struct adv_q_done_info *scsiq;
1992	u_int8_t  target_ix;
1993	int	  count;
1994
1995	scsiq = &scsiq_buf;
1996	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1997	count = 0;
1998	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1999		struct adv_ccb_info *ccb_info;
2000		q_addr = ADV_QNO_TO_QADDR(q_no);
2001
2002		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2003		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2004		if (((scsiq->q_status & QS_READY) != 0)
2005		 && ((scsiq->q_status & QS_ABORTED) == 0)
2006		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2007		 && (scsiq->d2.target_ix == target_ix)
2008		 && (queued_only == 0
2009		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2010		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2011			union ccb *aborted_ccb;
2012			struct adv_ccb_info *cinfo;
2013
2014			scsiq->q_status |= QS_ABORTED;
2015			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2016					 scsiq->q_status);
2017			aborted_ccb = ccb_info->ccb;
2018			/* Don't clobber earlier error codes */
2019			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2020			  == CAM_REQ_INPROG)
2021				aborted_ccb->ccb_h.status |= status;
2022			cinfo = (struct adv_ccb_info *)
2023			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2024			cinfo->state |= ACCB_ABORT_QUEUED;
2025			count++;
2026		}
2027	}
2028	return (count);
2029}
2030
2031int
2032adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2033{
2034	int count;
2035	int i;
2036	union ccb *ccb;
2037
2038	i = 200;
2039	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2040	    && i--)
2041		DELAY(1000);
2042	adv_reset_chip(adv, initiate_bus_reset);
2043	adv_reinit_lram(adv);
2044	for (i = 0; i <= ADV_MAX_TID; i++)
2045		adv_set_syncrate(adv, NULL, i, /*period*/0,
2046				 /*offset*/0, ADV_TRANS_CUR);
2047	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2048
2049	/* Tell the XPT layer that a bus reset occured */
2050	if (adv->path != NULL)
2051		xpt_async(AC_BUS_RESET, adv->path, NULL);
2052
2053	count = 0;
2054	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2055		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2056			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2057		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2058		count++;
2059	}
2060
2061	adv_start_chip(adv);
2062	return (count);
2063}
2064
2065static void
2066adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2067{
2068	int orig_id;
2069
2070    	adv_set_bank(adv, 1);
2071    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2072    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2073	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2074		adv_set_bank(adv, 0);
2075		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2076	}
2077    	adv_set_bank(adv, 1);
2078    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2079	adv_set_bank(adv, 0);
2080}
2081