advlib.c revision 41591
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *      $Id: advlib.c,v 1.9 1998/10/29 17:41:34 gibbs Exp $
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <machine/bus_pio.h>
50#include <machine/bus.h>
51#include <machine/clock.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_xpt_sim.h>
57
58#include <cam/scsi/scsi_all.h>
59#include <cam/scsi/scsi_message.h>
60#include <cam/scsi/scsi_da.h>
61#include <cam/scsi/scsi_cd.h>
62
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/pmap.h>
66
67#include <dev/advansys/advansys.h>
68#include <dev/advansys/advmcode.h>
69
70struct adv_quirk_entry {
71	struct scsi_inquiry_pattern inq_pat;
72	u_int8_t quirks;
73#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
74#define ADV_QUIRK_FIX_ASYN_XFER		0x02
75};
76
77static struct adv_quirk_entry adv_quirk_table[] =
78{
79	{
80		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
81		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
82	},
83	{
84		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
85		0
86	},
87	{
88		{
89		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
90		  "TANDBERG", " TDC 36", "*"
91		},
92		0
93	},
94	{
95		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
96		0
97	},
98	{
99		{
100		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
101		  "*", "*", "*"
102		},
103		0
104	},
105	{
106		{
107		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
108		  "*", "*", "*"
109		},
110		0
111	},
112	{
113		/* Default quirk entry */
114		{
115		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
116		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
117                },
118                ADV_QUIRK_FIX_ASYN_XFER,
119	}
120};
121
122/*
123 * Allowable periods in ns
124 */
125u_int8_t adv_sdtr_period_tbl[] =
126{
127	25,
128	30,
129	35,
130	40,
131	50,
132	60,
133	70,
134	85
135};
136
137u_int8_t adv_sdtr_period_tbl_ultra[] =
138{
139	12,
140	19,
141	25,
142	32,
143	38,
144	44,
145	50,
146	57,
147	63,
148	69,
149	75,
150	82,
151	88,
152	94,
153	100,
154	107
155};
156
157struct ext_msg {
158	u_int8_t msg_type;
159	u_int8_t msg_len;
160	u_int8_t msg_req;
161	union {
162		struct {
163			u_int8_t sdtr_xfer_period;
164			u_int8_t sdtr_req_ack_offset;
165		} sdtr;
166		struct {
167       			u_int8_t wdtr_width;
168		} wdtr;
169		struct {
170			u_int8_t mdp[4];
171		} mdp;
172	} u_ext_msg;
173	u_int8_t res;
174};
175
176#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
177#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
178#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
179#define	mdp_b3		u_ext_msg.mdp_b3
180#define	mdp_b2		u_ext_msg.mdp_b2
181#define	mdp_b1		u_ext_msg.mdp_b1
182#define	mdp_b0		u_ext_msg.mdp_b0
183
184/*
185 * Some of the early PCI adapters have problems with
186 * async transfers.  Instead use an offset of 1.
187 */
188#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
189
190/* LRAM routines */
191static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
192					u_int16_t *buffer, int count);
193static void	 adv_write_lram_16_multi(struct adv_softc *adv,
194					 u_int16_t s_addr, u_int16_t *buffer,
195					 int count);
196static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
197				  u_int16_t set_value, int count);
198static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
199				  int count);
200
201static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
202					      u_int16_t addr, u_int16_t value);
203static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
204
205
206static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
207				   u_int32_t value);
208static void	 adv_write_lram_32_multi(struct adv_softc *adv,
209					 u_int16_t s_addr, u_int32_t *buffer,
210					 int count);
211
212/* EEPROM routines */
213static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
214static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
215				     u_int16_t value);
216static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
217					  u_int8_t cmd_reg);
218static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
219					    struct adv_eeprom_config *eeconfig);
220
221/* Initialization */
222static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
223				    u_int16_t *mcode_buf, u_int16_t mcode_size);
224
225static void	 adv_reinit_lram(struct adv_softc *adv);
226static void	 adv_init_lram(struct adv_softc *adv);
227static int	 adv_init_microcode_var(struct adv_softc *adv);
228static void	 adv_init_qlink_var(struct adv_softc *adv);
229
230/* Interrupts */
231static void	 adv_disable_interrupt(struct adv_softc *adv);
232static void	 adv_enable_interrupt(struct adv_softc *adv);
233static void	 adv_toggle_irq_act(struct adv_softc *adv);
234
235/* Chip Control */
236static int	 adv_stop_chip(struct adv_softc *adv);
237static int	 adv_host_req_chip_halt(struct adv_softc *adv);
238static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
239#if UNUSED
240static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
241#endif
242
243/* Queue handling and execution */
244static __inline int
245		 adv_sgcount_to_qcount(int sgcount);
246
247static __inline int
248adv_sgcount_to_qcount(int sgcount)
249{
250	int	n_sg_list_qs;
251
252	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
253	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
254		n_sg_list_qs++;
255	return (n_sg_list_qs + 1);
256}
257
258static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
259				u_int16_t *inbuf, int words);
260static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
261static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
262				       u_int8_t free_q_head, u_int8_t n_free_q);
263static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
264				      u_int8_t free_q_head);
265static int	 adv_send_scsi_queue(struct adv_softc *adv,
266				     struct adv_scsi_q *scsiq,
267				     u_int8_t n_q_required);
268static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
269					     struct adv_scsi_q *scsiq,
270					     u_int q_no);
271static void	 adv_put_ready_queue(struct adv_softc *adv,
272				     struct adv_scsi_q *scsiq, u_int q_no);
273static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
274			       u_int16_t *buffer, int words);
275
276/* Messages */
277static void	 adv_handle_extmsg_in(struct adv_softc *adv,
278				      u_int16_t halt_q_addr, u_int8_t q_cntl,
279				      target_bit_vector target_id,
280				      int tid);
281static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
282				 u_int8_t sdtr_offset);
283static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
284					u_int8_t sdtr_data);
285
286
287/* Exported functions first */
288
289void
290advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
291{
292	struct adv_softc *adv;
293
294	adv = (struct adv_softc *)callback_arg;
295	switch (code) {
296	case AC_FOUND_DEVICE:
297	{
298		struct ccb_getdev *cgd;
299		target_bit_vector target_mask;
300		int num_entries;
301        	caddr_t match;
302		struct adv_quirk_entry *entry;
303		struct adv_target_transinfo* tinfo;
304
305		cgd = (struct ccb_getdev *)arg;
306
307		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
308
309		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
310		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
311				       (caddr_t)adv_quirk_table,
312				       num_entries, sizeof(*adv_quirk_table),
313				       scsi_inquiry_match);
314
315		if (match == NULL)
316			panic("advasync: device didn't match wildcard entry!!");
317
318		entry = (struct adv_quirk_entry *)match;
319
320		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
321			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
322				adv->fix_asyn_xfer_always |= target_mask;
323			else
324				adv->fix_asyn_xfer_always &= ~target_mask;
325			/*
326			 * We start out life with all bits set and clear them
327			 * after we've determined that the fix isn't necessary.
328			 * It may well be that we've already cleared a target
329			 * before the full inquiry session completes, so don't
330			 * gratuitously set a target bit even if it has this
331			 * quirk.  But, if the quirk exonerates a device, clear
332			 * the bit now.
333			 */
334			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
335				adv->fix_asyn_xfer &= ~target_mask;
336		}
337		/*
338		 * Reset our sync settings now that we've determined
339		 * what quirks are in effect for the device.
340		 */
341		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
342		adv_set_syncrate(adv, cgd->ccb_h.path,
343				 cgd->ccb_h.target_id,
344				 tinfo->current.period,
345				 tinfo->current.offset,
346				 ADV_TRANS_CUR);
347		break;
348	}
349	case AC_LOST_DEVICE:
350	{
351		u_int target_mask;
352
353		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
354			target_mask = 0x01 << xpt_path_target_id(path);
355			adv->fix_asyn_xfer |= target_mask;
356		}
357
358		/*
359		 * Revert to async transfers
360		 * for the next device.
361		 */
362		adv_set_syncrate(adv, /*path*/NULL,
363				 xpt_path_target_id(path),
364				 /*period*/0,
365				 /*offset*/0,
366				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
367	}
368	default:
369		break;
370	}
371}
372
373void
374adv_set_bank(struct adv_softc *adv, u_int8_t bank)
375{
376	u_int8_t control;
377
378	/*
379	 * Start out with the bank reset to 0
380	 */
381	control = ADV_INB(adv, ADV_CHIP_CTRL)
382		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
383			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
384			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
385	if (bank == 1) {
386		control |= ADV_CC_BANK_ONE;
387	} else if (bank == 2) {
388		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
389	}
390	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
391}
392
393u_int8_t
394adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
395{
396	u_int8_t   byte_data;
397	u_int16_t  word_data;
398
399	/*
400	 * LRAM is accessed on 16bit boundaries.
401	 */
402	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
403	word_data = ADV_INW(adv, ADV_LRAM_DATA);
404	if (addr & 1) {
405#if BYTE_ORDER == BIG_ENDIAN
406		byte_data = (u_int8_t)(word_data & 0xFF);
407#else
408		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
409#endif
410	} else {
411#if BYTE_ORDER == BIG_ENDIAN
412		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
413#else
414		byte_data = (u_int8_t)(word_data & 0xFF);
415#endif
416	}
417	return (byte_data);
418}
419
420void
421adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
422{
423	u_int16_t word_data;
424
425	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
426	if (addr & 1) {
427		word_data &= 0x00FF;
428		word_data |= (((u_int8_t)value << 8) & 0xFF00);
429	} else {
430		word_data &= 0xFF00;
431		word_data |= ((u_int8_t)value & 0x00FF);
432	}
433	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
434}
435
436
437u_int16_t
438adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
439{
440	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
441	return (ADV_INW(adv, ADV_LRAM_DATA));
442}
443
444void
445adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
446{
447	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
448	ADV_OUTW(adv, ADV_LRAM_DATA, value);
449}
450
451/*
452 * Determine if there is a board at "iobase" by looking
453 * for the AdvanSys signatures.  Return 1 if a board is
454 * found, 0 otherwise.
455 */
456int
457adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
458{
459	u_int16_t signature;
460
461	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
462		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
463		if ((signature == ADV_1000_ID0W)
464		 || (signature == ADV_1000_ID0W_FIX))
465			return (1);
466	}
467	return (0);
468}
469
470void
471adv_lib_init(struct adv_softc *adv)
472{
473	if ((adv->type & ADV_ULTRA) != 0) {
474		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
475		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
476	} else {
477		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
478		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
479	}
480}
481
482u_int16_t
483adv_get_eeprom_config(struct adv_softc *adv, struct
484		      adv_eeprom_config  *eeprom_config)
485{
486	u_int16_t	sum;
487	u_int16_t	*wbuf;
488	u_int8_t	cfg_beg;
489	u_int8_t	cfg_end;
490	u_int8_t	s_addr;
491
492	wbuf = (u_int16_t *)eeprom_config;
493	sum = 0;
494
495	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
496		*wbuf = adv_read_eeprom_16(adv, s_addr);
497		sum += *wbuf;
498	}
499
500	if (adv->type & ADV_VL) {
501		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
502		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
503	} else {
504		cfg_beg = ADV_EEPROM_CFG_BEG;
505		cfg_end = ADV_EEPROM_MAX_ADDR;
506	}
507
508	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
509		*wbuf = adv_read_eeprom_16(adv, s_addr);
510		sum += *wbuf;
511#if ADV_DEBUG_EEPROM
512		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
513#endif
514	}
515	*wbuf = adv_read_eeprom_16(adv, s_addr);
516	return (sum);
517}
518
519int
520adv_set_eeprom_config(struct adv_softc *adv,
521		      struct adv_eeprom_config *eeprom_config)
522{
523	int	retry;
524
525	retry = 0;
526	while (1) {
527		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
528			break;
529		}
530		if (++retry > ADV_EEPROM_MAX_RETRY) {
531			break;
532		}
533	}
534	return (retry > ADV_EEPROM_MAX_RETRY);
535}
536
537int
538adv_reset_chip_and_scsi_bus(struct adv_softc *adv)
539{
540	adv_stop_chip(adv);
541	ADV_OUTB(adv, ADV_CHIP_CTRL,
542		 ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
543	DELAY(200 * 1000);
544
545	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
546	adv_set_chip_ih(adv, ADV_INS_HALT);
547
548	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
549	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
550	DELAY(200 * 1000);
551	return (adv_is_chip_halted(adv));
552}
553
554int
555adv_test_external_lram(struct adv_softc* adv)
556{
557	u_int16_t	q_addr;
558	u_int16_t	saved_value;
559	int		success;
560
561	success = 0;
562
563	q_addr = ADV_QNO_TO_QADDR(241);
564	saved_value = adv_read_lram_16(adv, q_addr);
565	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
566		success = 1;
567		adv_write_lram_16(adv, q_addr, saved_value);
568	}
569	return (success);
570}
571
572
573int
574adv_init_lram_and_mcode(struct adv_softc *adv)
575{
576	u_int32_t	retval;
577
578	adv_disable_interrupt(adv);
579
580	adv_init_lram(adv);
581
582	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
583				    adv_mcode_size);
584	if (retval != adv_mcode_chksum) {
585		printf("adv%d: Microcode download failed checksum!\n",
586		       adv->unit);
587		return (1);
588	}
589
590	if (adv_init_microcode_var(adv) != 0)
591		return (1);
592
593	adv_enable_interrupt(adv);
594	return (0);
595}
596
597u_int8_t
598adv_get_chip_irq(struct adv_softc *adv)
599{
600	u_int16_t	cfg_lsw;
601	u_int8_t	chip_irq;
602
603	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
604
605	if ((adv->type & ADV_VL) != 0) {
606		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
607		if ((chip_irq == 0) ||
608		    (chip_irq == 4) ||
609		    (chip_irq == 7)) {
610			return (0);
611		}
612		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
613	}
614	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
615	if (chip_irq == 3)
616		chip_irq += 2;
617	return (chip_irq + ADV_MIN_IRQ_NO);
618}
619
620u_int8_t
621adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
622{
623	u_int16_t	cfg_lsw;
624
625	if ((adv->type & ADV_VL) != 0) {
626		if (irq_no != 0) {
627			if ((irq_no < ADV_MIN_IRQ_NO)
628			 || (irq_no > ADV_MAX_IRQ_NO)) {
629				irq_no = 0;
630			} else {
631				irq_no -= ADV_MIN_IRQ_NO - 1;
632			}
633		}
634		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
635		cfg_lsw |= 0x0010;
636		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
637		adv_toggle_irq_act(adv);
638
639		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
640		cfg_lsw |= (irq_no & 0x07) << 2;
641		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
642		adv_toggle_irq_act(adv);
643	} else if ((adv->type & ADV_ISA) != 0) {
644		if (irq_no == 15)
645			irq_no -= 2;
646		irq_no -= ADV_MIN_IRQ_NO;
647		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
648		cfg_lsw |= (irq_no & 0x03) << 2;
649		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650	}
651	return (adv_get_chip_irq(adv));
652}
653
654void
655adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
656{
657	u_int16_t cfg_lsw;
658
659	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
660	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
661		return;
662    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
663	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
664	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
665}
666
667int
668adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
669		       u_int32_t datalen)
670{
671	struct		adv_target_transinfo* tinfo;
672	u_int32_t	*p_data_addr;
673	u_int32_t	*p_data_bcount;
674	int		disable_syn_offset_one_fix;
675	int		retval;
676	u_int		n_q_required;
677	u_int32_t	addr;
678	u_int8_t	sg_entry_cnt;
679	u_int8_t	target_ix;
680	u_int8_t	sg_entry_cnt_minus_one;
681	u_int8_t	tid_no;
682
683	scsiq->q1.q_no = 0;
684	retval = 1;  /* Default to error case */
685	target_ix = scsiq->q2.target_ix;
686	tid_no = ADV_TIX_TO_TID(target_ix);
687	tinfo = &adv->tinfo[tid_no];
688
689	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
690		/* Renegotiate if appropriate. */
691		adv_set_syncrate(adv, /*struct cam_path */NULL,
692				 tid_no, /*period*/0, /*offset*/0,
693				 ADV_TRANS_CUR);
694		if (tinfo->current.period != tinfo->goal.period) {
695			adv_msgout_sdtr(adv, tinfo->goal.period,
696					tinfo->goal.offset);
697			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
698		}
699	}
700
701	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
702		sg_entry_cnt = scsiq->sg_head->entry_cnt;
703		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
704
705#ifdef DIAGNOSTIC
706		if (sg_entry_cnt <= 1)
707			panic("adv_execute_scsi_queue: Queue "
708			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
709
710		if (sg_entry_cnt > ADV_MAX_SG_LIST)
711			panic("adv_execute_scsi_queue: "
712			      "Queue with too many segs.");
713
714		if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
715			int i;
716
717			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
718				addr = scsiq->sg_head->sg_list[i].addr +
719				       scsiq->sg_head->sg_list[i].bytes;
720
721				if ((addr & 0x0003) != 0)
722					panic("adv_execute_scsi_queue: SG "
723					      "with odd address or byte count");
724			}
725		}
726#endif
727		p_data_addr =
728		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
729		p_data_bcount =
730		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
731
732		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
733		scsiq->sg_head->queue_cnt = n_q_required - 1;
734	} else {
735		p_data_addr = &scsiq->q1.data_addr;
736		p_data_bcount = &scsiq->q1.data_cnt;
737		n_q_required = 1;
738	}
739
740	disable_syn_offset_one_fix = FALSE;
741
742	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
743	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
744
745		if (datalen != 0) {
746			if (datalen < 512) {
747				disable_syn_offset_one_fix = TRUE;
748			} else {
749				if (scsiq->cdbptr[0] == INQUIRY
750				 || scsiq->cdbptr[0] == REQUEST_SENSE
751				 || scsiq->cdbptr[0] == READ_CAPACITY
752				 || scsiq->cdbptr[0] == MODE_SELECT_6
753				 || scsiq->cdbptr[0] == MODE_SENSE_6
754				 || scsiq->cdbptr[0] == MODE_SENSE_10
755				 || scsiq->cdbptr[0] == MODE_SELECT_10
756				 || scsiq->cdbptr[0] == READ_TOC) {
757					disable_syn_offset_one_fix = TRUE;
758				}
759			}
760		}
761	}
762
763	if (disable_syn_offset_one_fix) {
764		scsiq->q2.tag_code &=
765		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
766		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
767				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
768	}
769
770	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
771	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
772		u_int8_t extra_bytes;
773
774		addr = *p_data_addr + *p_data_bcount;
775		extra_bytes = addr & 0x0003;
776		if (extra_bytes != 0
777		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
778		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
779			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
780			scsiq->q1.extra_bytes = extra_bytes;
781			*p_data_bcount -= extra_bytes;
782		}
783	}
784
785	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
786	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
787		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
788
789	return (retval);
790}
791
792
793u_int8_t
794adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
795		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
796{
797	u_int16_t val;
798	u_int8_t  sg_queue_cnt;
799
800	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
801		       (u_int16_t *)scsiq,
802		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
803
804#if BYTE_ORDER == BIG_ENDIAN
805	adv_adj_endian_qdone_info(scsiq);
806#endif
807
808	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
809	scsiq->q_status = val & 0xFF;
810	scsiq->q_no = (val >> 8) & 0XFF;
811
812	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
813	scsiq->cntl = val & 0xFF;
814	sg_queue_cnt = (val >> 8) & 0xFF;
815
816	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
817	scsiq->sense_len = val & 0xFF;
818	scsiq->extra_bytes = (val >> 8) & 0xFF;
819
820	/*
821	 * XXX
822	 * Due to a bug in accessing LRAM on the 940UA, we only pull
823	 * the low 16bits of residual information.  In the future, we'll
824	 * want to allow transfers larger than 64K, but hopefully we'll
825	 * get a new firmware revision from AdvanSys that address this
826	 * problem before we up the transfer size.
827	 */
828	scsiq->remain_bytes =
829	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
830	/*
831	 * XXX Is this just a safeguard or will the counter really
832	 * have bogus upper bits?
833	 */
834	scsiq->remain_bytes &= max_dma_count;
835
836	return (sg_queue_cnt);
837}
838
839int
840adv_start_chip(struct adv_softc *adv)
841{
842	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
843	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
844		return (0);
845	return (1);
846}
847
848int
849adv_stop_execution(struct adv_softc *adv)
850{
851	int count;
852
853	count = 0;
854	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
855		adv_write_lram_8(adv, ADV_STOP_CODE_B,
856				 ADV_STOP_REQ_RISC_STOP);
857		do {
858			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
859				ADV_STOP_ACK_RISC_STOP) {
860				return (1);
861			}
862			DELAY(1000);
863		} while (count++ < 20);
864	}
865	return (0);
866}
867
868int
869adv_is_chip_halted(struct adv_softc *adv)
870{
871	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
872		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
873			return (1);
874		}
875	}
876	return (0);
877}
878
879/*
880 * XXX The numeric constants and the loops in this routine
881 * need to be documented.
882 */
883void
884adv_ack_interrupt(struct adv_softc *adv)
885{
886	u_int8_t	host_flag;
887	u_int8_t	risc_flag;
888	int		loop;
889
890	loop = 0;
891	do {
892		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
893		if (loop++ > 0x7FFF) {
894			break;
895		}
896	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
897
898	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
899	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
900			 host_flag | ADV_HOST_FLAG_ACK_INT);
901
902	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
903	loop = 0;
904	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
905		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
906		if (loop++ > 3) {
907			break;
908		}
909	}
910
911	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
912}
913
914/*
915 * Handle all conditions that may halt the chip waiting
916 * for us to intervene.
917 */
918void
919adv_isr_chip_halted(struct adv_softc *adv)
920{
921	u_int16_t	  int_halt_code;
922	u_int16_t	  halt_q_addr;
923	target_bit_vector target_mask;
924	target_bit_vector scsi_busy;
925	u_int8_t	  halt_qp;
926	u_int8_t	  target_ix;
927	u_int8_t	  q_cntl;
928	u_int8_t	  tid_no;
929
930	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
931	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
932	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
933	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
934	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
935	tid_no = ADV_TIX_TO_TID(target_ix);
936	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
937	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
938		/*
939		 * Temporarily disable the async fix by removing
940		 * this target from the list of affected targets,
941		 * setting our async rate, and then putting us
942		 * back into the mask.
943		 */
944		adv->fix_asyn_xfer &= ~target_mask;
945		adv_set_syncrate(adv, /*struct cam_path */NULL,
946				 tid_no, /*period*/0, /*offset*/0,
947				 ADV_TRANS_ACTIVE);
948		adv->fix_asyn_xfer |= target_mask;
949	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
950		adv_set_syncrate(adv, /*struct cam_path */NULL,
951				 tid_no, /*period*/0, /*offset*/0,
952				 ADV_TRANS_ACTIVE);
953	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
954		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
955				     target_mask, tid_no);
956	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
957		struct	 adv_target_transinfo* tinfo;
958		union	 ccb *ccb;
959		u_int8_t tag_code;
960		u_int8_t q_status;
961
962		tinfo = &adv->tinfo[tid_no];
963		q_cntl |= QC_REQ_SENSE;
964
965		/* Renegotiate if appropriate. */
966		adv_set_syncrate(adv, /*struct cam_path */NULL,
967				 tid_no, /*period*/0, /*offset*/0,
968				 ADV_TRANS_CUR);
969		if (tinfo->current.period != tinfo->goal.period) {
970			adv_msgout_sdtr(adv, tinfo->goal.period,
971					tinfo->goal.offset);
972			q_cntl |= QC_MSG_OUT;
973		}
974		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
975
976		/* Don't tag request sense commands */
977		tag_code = adv_read_lram_8(adv,
978					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
979		tag_code &=
980		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
981
982		if ((adv->fix_asyn_xfer & target_mask) != 0
983		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
984			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
985				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
986		}
987		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
988				 tag_code);
989		q_status = adv_read_lram_8(adv,
990					   halt_q_addr + ADV_SCSIQ_B_STATUS);
991		q_status |= (QS_READY | QS_BUSY);
992		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
993				 q_status);
994		/*
995		 * Freeze the devq until we can handle the sense condition.
996		 */
997		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
998							 + ADV_SCSIQ_D_CCBPTR);
999		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1000		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1001		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1002			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1003			      /*queued_only*/TRUE);
1004		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1005		scsi_busy &= ~target_mask;
1006		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1007	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1008		struct	ext_msg out_msg;
1009
1010		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1011				       (u_int16_t *) &out_msg,
1012				       sizeof(out_msg)/2);
1013
1014		if ((out_msg.msg_type == MSG_EXTENDED)
1015		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1016		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1017
1018			/* Revert to Async */
1019			adv_set_syncrate(adv, /*struct cam_path */NULL,
1020					 tid_no, /*period*/0, /*offset*/0,
1021					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1022		}
1023		q_cntl &= ~QC_MSG_OUT;
1024		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1025	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1026		u_int8_t scsi_status;
1027		union ccb *ccb;
1028
1029		scsi_status = adv_read_lram_8(adv, halt_q_addr
1030					      + ADV_SCSIQ_SCSI_STATUS);
1031		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1032						     + ADV_SCSIQ_D_CCBPTR);
1033		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1034		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1035		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1036		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1037			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1038			      /*queued_only*/TRUE);
1039		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1040		scsi_busy &= ~target_mask;
1041		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1042	}
1043	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1044}
1045
1046void
1047adv_sdtr_to_period_offset(struct adv_softc *adv,
1048			  u_int8_t sync_data, u_int8_t *period,
1049			  u_int8_t *offset, int tid)
1050{
1051	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1052	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1053		*period = *offset = 0;
1054	} else {
1055		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1056		*offset = sync_data & 0xF;
1057	}
1058}
1059
1060void
1061adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1062		 u_int tid, u_int period, u_int offset, u_int type)
1063{
1064	struct adv_target_transinfo* tinfo;
1065	u_int old_period;
1066	u_int old_offset;
1067	u_int8_t sdtr_data;
1068
1069	tinfo = &adv->tinfo[tid];
1070
1071	/* Filter our input */
1072	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1073					      &offset, tid);
1074
1075	old_period = tinfo->current.period;
1076	old_offset = tinfo->current.offset;
1077
1078	if ((type & ADV_TRANS_CUR) != 0
1079	 && ((old_period != period || old_offset != offset)
1080	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1081		int s;
1082		int halted;
1083
1084		s = splcam();
1085		halted = adv_is_chip_halted(adv);
1086		if (halted == 0)
1087			/* Must halt the chip first */
1088			adv_host_req_chip_halt(adv);
1089
1090		/* Update current hardware settings */
1091		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1092
1093		/*
1094		 * If a target can run in sync mode, we don't need
1095		 * to check it for sync problems.
1096		 */
1097		if (offset != 0)
1098			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1099
1100		if (halted == 0)
1101			/* Start the chip again */
1102			adv_start_chip(adv);
1103
1104		splx(s);
1105		tinfo->current.period = period;
1106		tinfo->current.offset = offset;
1107
1108		if (path != NULL) {
1109			/*
1110			 * Tell the SCSI layer about the
1111			 * new transfer parameters.
1112			 */
1113			struct	ccb_trans_settings neg;
1114
1115			neg.sync_period = period;
1116			neg.sync_offset = offset;
1117			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1118				  | CCB_TRANS_SYNC_OFFSET_VALID;
1119			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1120			xpt_async(AC_TRANSFER_NEG, path, &neg);
1121		}
1122	}
1123
1124	if ((type & ADV_TRANS_GOAL) != 0) {
1125		tinfo->goal.period = period;
1126		tinfo->goal.offset = offset;
1127	}
1128
1129	if ((type & ADV_TRANS_USER) != 0) {
1130		tinfo->user.period = period;
1131		tinfo->user.offset = offset;
1132	}
1133}
1134
1135u_int8_t
1136adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1137			  u_int *offset, int tid)
1138{
1139	u_int i;
1140	u_int dummy_offset;
1141	u_int dummy_period;
1142
1143	if (offset == NULL) {
1144		dummy_offset = 0;
1145		offset = &dummy_offset;
1146	}
1147
1148	if (period == NULL) {
1149		dummy_period = 0;
1150		period = &dummy_period;
1151	}
1152
1153#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1154
1155	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1156	if (*period != 0 && *offset != 0) {
1157		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1158			if (*period <= adv->sdtr_period_tbl[i]) {
1159				/*
1160				 * When responding to a target that requests
1161				 * sync, the requested  rate may fall between
1162				 * two rates that we can output, but still be
1163				 * a rate that we can receive.  Because of this,
1164				 * we want to respond to the target with
1165				 * the same rate that it sent to us even
1166				 * if the period we use to send data to it
1167				 * is lower.  Only lower the response period
1168				 * if we must.
1169				 */
1170				if (i == 0 /* Our maximum rate */)
1171					*period = adv->sdtr_period_tbl[0];
1172				return ((i << 4) | *offset);
1173			}
1174		}
1175	}
1176
1177	/* Must go async */
1178	*period = 0;
1179	*offset = 0;
1180	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1181		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1182	return (0);
1183}
1184
1185/* Internal Routines */
1186
1187static void
1188adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1189		       u_int16_t *buffer, int count)
1190{
1191	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1192	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1193}
1194
1195static void
1196adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1197			u_int16_t *buffer, int count)
1198{
1199	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1200	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1201}
1202
1203static void
1204adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1205		 u_int16_t set_value, int count)
1206{
1207	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1208	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1209			      set_value, count);
1210}
1211
1212static u_int32_t
1213adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1214{
1215	u_int32_t	sum;
1216	int		i;
1217
1218	sum = 0;
1219	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1220	for (i = 0; i < count; i++)
1221		sum += ADV_INW(adv, ADV_LRAM_DATA);
1222	return (sum);
1223}
1224
1225static int
1226adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1227			     u_int16_t value)
1228{
1229	int	retval;
1230
1231	retval = 0;
1232	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1233	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1234	DELAY(10000);
1235	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1236	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1237		retval = 1;
1238	return (retval);
1239}
1240
1241static u_int32_t
1242adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1243{
1244	u_int16_t           val_low, val_high;
1245
1246	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1247
1248#if BYTE_ORDER == BIG_ENDIAN
1249	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1250	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1251#else
1252	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1253	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1254#endif
1255
1256	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1257}
1258
1259static void
1260adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1261{
1262	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1263
1264#if BYTE_ORDER == BIG_ENDIAN
1265	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1266	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1267#else
1268	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1269	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1270#endif
1271}
1272
1273static void
1274adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1275			u_int32_t *buffer, int count)
1276{
1277	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1278	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1279}
1280
1281static u_int16_t
1282adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1283{
1284	u_int16_t read_wval;
1285	u_int8_t  cmd_reg;
1286
1287	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1288	DELAY(1000);
1289	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1290	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1291	DELAY(1000);
1292	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1293	DELAY(1000);
1294	return (read_wval);
1295}
1296
1297static u_int16_t
1298adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1299{
1300	u_int16_t	read_value;
1301
1302	read_value = adv_read_eeprom_16(adv, addr);
1303	if (read_value != value) {
1304		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1305		DELAY(1000);
1306
1307		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1308		DELAY(1000);
1309
1310		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1311		DELAY(20 * 1000);
1312
1313		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1314		DELAY(1000);
1315		read_value = adv_read_eeprom_16(adv, addr);
1316	}
1317	return (read_value);
1318}
1319
1320static int
1321adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1322{
1323	u_int8_t read_back;
1324	int	 retry;
1325
1326	retry = 0;
1327	while (1) {
1328		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1329		DELAY(1000);
1330		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1331		if (read_back == cmd_reg) {
1332			return (1);
1333		}
1334		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1335			return (0);
1336		}
1337	}
1338}
1339
1340static int
1341adv_set_eeprom_config_once(struct adv_softc *adv,
1342			   struct adv_eeprom_config *eeprom_config)
1343{
1344	int		n_error;
1345	u_int16_t	*wbuf;
1346	u_int16_t	sum;
1347	u_int8_t	s_addr;
1348	u_int8_t	cfg_beg;
1349	u_int8_t	cfg_end;
1350
1351	wbuf = (u_int16_t *)eeprom_config;
1352	n_error = 0;
1353	sum = 0;
1354	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1355		sum += *wbuf;
1356		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1357			n_error++;
1358		}
1359	}
1360	if (adv->type & ADV_VL) {
1361		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1362		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1363	} else {
1364		cfg_beg = ADV_EEPROM_CFG_BEG;
1365		cfg_end = ADV_EEPROM_MAX_ADDR;
1366	}
1367
1368	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1369		sum += *wbuf;
1370		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1371			n_error++;
1372		}
1373	}
1374	*wbuf = sum;
1375	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1376		n_error++;
1377	}
1378	wbuf = (u_int16_t *)eeprom_config;
1379	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1380		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1381			n_error++;
1382		}
1383	}
1384	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1385		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1386			n_error++;
1387		}
1388	}
1389	return (n_error);
1390}
1391
1392static u_int32_t
1393adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1394		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1395{
1396	u_int32_t chksum;
1397	u_int16_t mcode_lram_size;
1398	u_int16_t mcode_chksum;
1399
1400	mcode_lram_size = mcode_size >> 1;
1401	/* XXX Why zero the memory just before you write the whole thing?? */
1402	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1403	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1404
1405	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1406	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1407						   ((mcode_size - s_addr
1408						     - ADV_CODE_SEC_BEG) >> 1));
1409	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1410	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1411	return (chksum);
1412}
1413
1414static void
1415adv_reinit_lram(struct adv_softc *adv) {
1416	adv_init_lram(adv);
1417	adv_init_qlink_var(adv);
1418}
1419
1420static void
1421adv_init_lram(struct adv_softc *adv)
1422{
1423	u_int8_t  i;
1424	u_int16_t s_addr;
1425
1426	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1427			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1428
1429	i = ADV_MIN_ACTIVE_QNO;
1430	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1431
1432	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1433	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1434	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1435	i++;
1436	s_addr += ADV_QBLK_SIZE;
1437	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1438		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1439		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1440		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1441	}
1442
1443	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1444	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1445	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1446	i++;
1447	s_addr += ADV_QBLK_SIZE;
1448
1449	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1450		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1451		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1452		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1453	}
1454}
1455
1456static int
1457adv_init_microcode_var(struct adv_softc *adv)
1458{
1459	int	 i;
1460
1461	for (i = 0; i <= ADV_MAX_TID; i++) {
1462
1463		/* Start out async all around */
1464		adv_set_syncrate(adv, /*path*/NULL,
1465				 i, 0, 0,
1466				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1467	}
1468
1469	adv_init_qlink_var(adv);
1470
1471	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1472	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1473
1474	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1475
1476	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1477
1478	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1479	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1480		printf("adv%d: Unable to set program counter. Aborting.\n",
1481		       adv->unit);
1482		return (1);
1483	}
1484	return (0);
1485}
1486
1487static void
1488adv_init_qlink_var(struct adv_softc *adv)
1489{
1490	int	  i;
1491	u_int16_t lram_addr;
1492
1493	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1494	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1495
1496	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1497	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1498
1499	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1500			 (u_int8_t)((int) adv->max_openings + 1));
1501	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1502			 (u_int8_t)((int) adv->max_openings + 2));
1503
1504	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1505
1506	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1507	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1508	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1509	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1510	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1511	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1512
1513	lram_addr = ADV_QADR_BEG;
1514	for (i = 0; i < 32; i++, lram_addr += 2)
1515		adv_write_lram_16(adv, lram_addr, 0);
1516}
1517
1518static void
1519adv_disable_interrupt(struct adv_softc *adv)
1520{
1521	u_int16_t cfg;
1522
1523	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1524	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1525}
1526
1527static void
1528adv_enable_interrupt(struct adv_softc *adv)
1529{
1530	u_int16_t cfg;
1531
1532	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1533	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1534}
1535
1536static void
1537adv_toggle_irq_act(struct adv_softc *adv)
1538{
1539	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1540	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1541}
1542
1543void
1544adv_start_execution(struct adv_softc *adv)
1545{
1546	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1547		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1548	}
1549}
1550
1551static int
1552adv_stop_chip(struct adv_softc *adv)
1553{
1554	u_int8_t cc_val;
1555
1556	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1557		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1558	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1559	adv_set_chip_ih(adv, ADV_INS_HALT);
1560	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1561	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1562		return (0);
1563	}
1564	return (1);
1565}
1566
1567static int
1568adv_host_req_chip_halt(struct adv_softc *adv)
1569{
1570	int	 count;
1571	u_int8_t saved_stop_code;
1572
1573	if (adv_is_chip_halted(adv))
1574		return (1);
1575
1576	count = 0;
1577	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1578	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1579			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1580	while (adv_is_chip_halted(adv) == 0
1581	    && count++ < 2000)
1582		;
1583
1584	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1585	return (count < 2000);
1586}
1587
1588static void
1589adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1590{
1591	adv_set_bank(adv, 1);
1592	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1593	adv_set_bank(adv, 0);
1594}
1595
1596#if UNUSED
1597static u_int8_t
1598adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1599{
1600	u_int8_t scsi_ctrl;
1601
1602	adv_set_bank(adv, 1);
1603	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1604	adv_set_bank(adv, 0);
1605	return (scsi_ctrl);
1606}
1607#endif
1608
1609/*
1610 * XXX Looks like more padding issues in this routine as well.
1611 *     There has to be a way to turn this into an insw.
1612 */
1613static void
1614adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1615	       u_int16_t *inbuf, int words)
1616{
1617	int	i;
1618
1619	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1620	for (i = 0; i < words; i++, inbuf++) {
1621		if (i == 5) {
1622			continue;
1623		}
1624		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1625	}
1626}
1627
1628static u_int
1629adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1630{
1631	u_int	  cur_used_qs;
1632	u_int	  cur_free_qs;
1633
1634	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1635
1636	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1637		cur_free_qs = adv->max_openings - cur_used_qs;
1638		return (cur_free_qs);
1639	}
1640	adv->openings_needed = n_qs;
1641	return (0);
1642}
1643
1644static u_int8_t
1645adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1646		      u_int8_t n_free_q)
1647{
1648	int i;
1649
1650	for (i = 0; i < n_free_q; i++) {
1651		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1652		if (free_q_head == ADV_QLINK_END)
1653			break;
1654	}
1655	return (free_q_head);
1656}
1657
1658static u_int8_t
1659adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1660{
1661	u_int16_t	q_addr;
1662	u_int8_t	next_qp;
1663	u_int8_t	q_status;
1664
1665	next_qp = ADV_QLINK_END;
1666	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1667	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1668
1669	if ((q_status & QS_READY) == 0)
1670		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1671
1672	return (next_qp);
1673}
1674
1675static int
1676adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1677		    u_int8_t n_q_required)
1678{
1679	u_int8_t	free_q_head;
1680	u_int8_t	next_qp;
1681	u_int8_t	tid_no;
1682	u_int8_t	target_ix;
1683	int		retval;
1684
1685	retval = 1;
1686	target_ix = scsiq->q2.target_ix;
1687	tid_no = ADV_TIX_TO_TID(target_ix);
1688	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1689	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1690	    != ADV_QLINK_END) {
1691		scsiq->q1.q_no = free_q_head;
1692
1693		/*
1694		 * Now that we know our Q number, point our sense
1695		 * buffer pointer to a bus dma mapped area where
1696		 * we can dma the data to.
1697		 */
1698		scsiq->q1.sense_addr = adv->sense_physbase
1699		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1700		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1701		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1702		adv->cur_active += n_q_required;
1703		retval = 0;
1704	}
1705	return (retval);
1706}
1707
1708
1709static void
1710adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1711			    u_int q_no)
1712{
1713	u_int8_t	sg_list_dwords;
1714	u_int8_t	sg_index, i;
1715	u_int8_t	sg_entry_cnt;
1716	u_int8_t	next_qp;
1717	u_int16_t	q_addr;
1718	struct		adv_sg_head *sg_head;
1719	struct		adv_sg_list_q scsi_sg_q;
1720
1721	sg_head = scsiq->sg_head;
1722
1723	if (sg_head) {
1724		sg_entry_cnt = sg_head->entry_cnt - 1;
1725#ifdef DIAGNOSTIC
1726		if (sg_entry_cnt == 0)
1727			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1728			      "a SG list but only one element");
1729		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1730			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1731			      "a SG list but QC_SG_HEAD not set");
1732#endif
1733		q_addr = ADV_QNO_TO_QADDR(q_no);
1734		sg_index = 1;
1735		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1736		scsi_sg_q.sg_head_qp = q_no;
1737		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1738		for (i = 0; i < sg_head->queue_cnt; i++) {
1739			u_int8_t segs_this_q;
1740
1741			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1742				segs_this_q = ADV_SG_LIST_PER_Q;
1743			else {
1744				/* This will be the last segment then */
1745				segs_this_q = sg_entry_cnt;
1746				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1747			}
1748			scsi_sg_q.seq_no = i + 1;
1749			sg_list_dwords = segs_this_q << 1;
1750			if (i == 0) {
1751				scsi_sg_q.sg_list_cnt = segs_this_q;
1752				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1753			} else {
1754				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1755				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1756			}
1757			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1758			scsi_sg_q.q_no = next_qp;
1759			q_addr = ADV_QNO_TO_QADDR(next_qp);
1760
1761			adv_write_lram_16_multi(adv,
1762						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1763						(u_int16_t *)&scsi_sg_q,
1764						sizeof(scsi_sg_q) >> 1);
1765			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1766						(u_int32_t *)&sg_head->sg_list[sg_index],
1767						sg_list_dwords);
1768			sg_entry_cnt -= segs_this_q;
1769			sg_index += ADV_SG_LIST_PER_Q;
1770		}
1771	}
1772	adv_put_ready_queue(adv, scsiq, q_no);
1773}
1774
1775static void
1776adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1777		    u_int q_no)
1778{
1779	struct		adv_target_transinfo* tinfo;
1780	u_int		q_addr;
1781	u_int		tid_no;
1782
1783	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1784	tinfo = &adv->tinfo[tid_no];
1785	if (tinfo->current.period != tinfo->goal.period) {
1786
1787		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1788		scsiq->q1.cntl |= QC_MSG_OUT;
1789	}
1790	q_addr = ADV_QNO_TO_QADDR(q_no);
1791
1792	scsiq->q1.status = QS_FREE;
1793
1794	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1795				(u_int16_t *)scsiq->cdbptr,
1796				scsiq->q2.cdb_len >> 1);
1797
1798#if BYTE_ORDER == BIG_ENDIAN
1799	adv_adj_scsiq_endian(scsiq);
1800#endif
1801
1802	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1803		      (u_int16_t *) &scsiq->q1.cntl,
1804		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1805
1806#if CC_WRITE_IO_COUNT
1807	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1808			  adv->req_count);
1809#endif
1810
1811#if CC_CLEAR_DMA_REMAIN
1812
1813	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1814	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1815#endif
1816
1817	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1818			  (scsiq->q1.q_no << 8) | QS_READY);
1819}
1820
1821static void
1822adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1823	      u_int16_t *buffer, int words)
1824{
1825	int	i;
1826
1827	/*
1828	 * XXX This routine makes *gross* assumptions
1829	 * about padding in the data structures.
1830	 * Either the data structures should have explicit
1831	 * padding members added, or they should have padding
1832	 * turned off via compiler attributes depending on
1833	 * which yields better overall performance.  My hunch
1834	 * would be that turning off padding would be the
1835	 * faster approach as an outsw is much faster than
1836	 * this crude loop and accessing un-aligned data
1837	 * members isn't *that* expensive.  The other choice
1838	 * would be to modify the ASC script so that the
1839	 * the adv_scsiq_1 structure can be re-arranged so
1840	 * padding isn't required.
1841	 */
1842	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1843	for (i = 0; i < words; i++, buffer++) {
1844		if (i == 2 || i == 10) {
1845			continue;
1846		}
1847		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1848	}
1849}
1850
1851static void
1852adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1853		     u_int8_t q_cntl, target_bit_vector target_mask,
1854		     int tid_no)
1855{
1856	struct	ext_msg ext_msg;
1857
1858	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1859			       sizeof(ext_msg) >> 1);
1860	if ((ext_msg.msg_type == MSG_EXTENDED)
1861	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1862	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1863		union	 ccb *ccb;
1864		struct	 adv_target_transinfo* tinfo;
1865		u_int	 period;
1866		u_int	 offset;
1867		int	 sdtr_accept;
1868		u_int8_t orig_offset;
1869
1870		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1871							 + ADV_SCSIQ_D_CCBPTR);
1872		tinfo = &adv->tinfo[tid_no];
1873		sdtr_accept = TRUE;
1874
1875		orig_offset = ext_msg.req_ack_offset;
1876		if (ext_msg.xfer_period < tinfo->goal.period) {
1877                	sdtr_accept = FALSE;
1878			ext_msg.xfer_period = tinfo->goal.period;
1879		}
1880
1881		/* Perform range checking */
1882		period = ext_msg.xfer_period;
1883		offset = ext_msg.req_ack_offset;
1884		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1885		ext_msg.xfer_period = period;
1886		ext_msg.req_ack_offset = offset;
1887
1888		/* Record our current sync settings */
1889		adv_set_syncrate(adv, ccb->ccb_h.path,
1890				 tid_no, ext_msg.xfer_period,
1891				 ext_msg.req_ack_offset,
1892				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1893
1894		/* Offset too high or large period forced async */
1895		if (orig_offset != ext_msg.req_ack_offset)
1896			sdtr_accept = FALSE;
1897
1898		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1899			/* Valid response to our requested negotiation */
1900			q_cntl &= ~QC_MSG_OUT;
1901		} else {
1902			/* Must Respond */
1903			q_cntl |= QC_MSG_OUT;
1904			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1905					ext_msg.req_ack_offset);
1906		}
1907
1908	} else if (ext_msg.msg_type == MSG_EXTENDED
1909		&& ext_msg.msg_req == MSG_EXT_WDTR
1910		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1911
1912		ext_msg.wdtr_width = 0;
1913		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1914					(u_int16_t *)&ext_msg,
1915					sizeof(ext_msg) >> 1);
1916		q_cntl |= QC_MSG_OUT;
1917        } else {
1918
1919		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1920		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1921					(u_int16_t *)&ext_msg,
1922					sizeof(ext_msg) >> 1);
1923		q_cntl |= QC_MSG_OUT;
1924        }
1925	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1926}
1927
1928static void
1929adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1930		u_int8_t sdtr_offset)
1931{
1932	struct	 ext_msg sdtr_buf;
1933
1934	sdtr_buf.msg_type = MSG_EXTENDED;
1935	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1936	sdtr_buf.msg_req = MSG_EXT_SDTR;
1937	sdtr_buf.xfer_period = sdtr_period;
1938	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1939	sdtr_buf.req_ack_offset = sdtr_offset;
1940	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1941				(u_int16_t *) &sdtr_buf,
1942				sizeof(sdtr_buf) / 2);
1943}
1944
1945int
1946adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1947	      u_int32_t status, int queued_only)
1948{
1949	u_int16_t q_addr;
1950	u_int8_t  q_no;
1951	struct adv_q_done_info scsiq_buf;
1952	struct adv_q_done_info *scsiq;
1953	u_int8_t  target_ix;
1954	int	  count;
1955
1956	scsiq = &scsiq_buf;
1957	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1958	count = 0;
1959	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1960		q_addr = ADV_QNO_TO_QADDR(q_no);
1961
1962		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1963		if (((scsiq->q_status & QS_READY) != 0)
1964		 && ((scsiq->q_status & QS_ABORTED) == 0)
1965		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1966		 && (scsiq->d2.target_ix == target_ix)
1967		 && (queued_only == 0
1968		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1969		 && (ccb == NULL || (ccb == (union ccb *)scsiq->d2.ccb_ptr))) {
1970			union ccb *aborted_ccb;
1971			struct adv_ccb_info *cinfo;
1972
1973			scsiq->q_status |= QS_ABORTED;
1974			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1975					 scsiq->q_status);
1976			aborted_ccb = (union ccb *)scsiq->d2.ccb_ptr;
1977			/* Don't clobber earlier error codes */
1978			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1979			  == CAM_REQ_INPROG)
1980				aborted_ccb->ccb_h.status |= status;
1981			cinfo = (struct adv_ccb_info *)
1982			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
1983			cinfo->state |= ACCB_ABORT_QUEUED;
1984			count++;
1985		}
1986	}
1987	return (count);
1988}
1989
1990int
1991adv_reset_bus(struct adv_softc *adv)
1992{
1993	int count;
1994	int i;
1995	union ccb *ccb;
1996
1997	adv_reset_chip_and_scsi_bus(adv);
1998	adv_reinit_lram(adv);
1999	for (i = 0; i <= ADV_MAX_TID; i++) {
2000		if (adv->fix_asyn_xfer & (0x01 << i))
2001			adv_set_sdtr_reg_at_id(adv, i,
2002					       ASYN_SDTR_DATA_FIX_PCI_REV_AB);
2003        }
2004	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2005
2006	/* Tell the XPT layer that a bus reset occured */
2007	if (adv->path != NULL)
2008		xpt_async(AC_BUS_RESET, adv->path, NULL);
2009
2010	count = 0;
2011	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2012		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2013			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2014		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2015		count++;
2016	}
2017
2018	adv_start_chip(adv);
2019	return (count);
2020}
2021
2022static void
2023adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2024{
2025	int orig_id;
2026
2027    	adv_set_bank(adv, 1);
2028    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2029    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2030	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2031		adv_set_bank(adv, 0);
2032		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2033	}
2034    	adv_set_bank(adv, 1);
2035    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2036	adv_set_bank(adv, 0);
2037}
2038