1/*-
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*-
32 * Ported from:
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
34 *
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
41 * modification.
42 */
43
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD$");
46
47#include <sys/param.h>
48#include <sys/conf.h>
49#include <sys/lock.h>
50#include <sys/kernel.h>
51#include <sys/mutex.h>
52#include <sys/systm.h>
53
54#include <machine/bus.h>
55#include <machine/resource.h>
56#include <sys/bus.h>
57#include <sys/rman.h>
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_xpt_sim.h>
63
64#include <cam/scsi/scsi_all.h>
65#include <cam/scsi/scsi_message.h>
66#include <cam/scsi/scsi_da.h>
67#include <cam/scsi/scsi_cd.h>
68
69#include <vm/vm.h>
70#include <vm/vm_param.h>
71#include <vm/pmap.h>
72
73#include <dev/advansys/advansys.h>
74#include <dev/advansys/advmcode.h>
75
76struct adv_quirk_entry {
77	struct scsi_inquiry_pattern inq_pat;
78	u_int8_t quirks;
79#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
80#define ADV_QUIRK_FIX_ASYN_XFER		0x02
81};
82
83static struct adv_quirk_entry adv_quirk_table[] =
84{
85	{
86		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
87		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
88	},
89	{
90		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
91		0
92	},
93	{
94		{
95		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
96		  "TANDBERG", " TDC 36", "*"
97		},
98		0
99	},
100	{
101		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
102		0
103	},
104	{
105		{
106		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
107		  "*", "*", "*"
108		},
109		0
110	},
111	{
112		{
113		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
114		  "*", "*", "*"
115		},
116		0
117	},
118	{
119		/* Default quirk entry */
120		{
121		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
122		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
123                },
124                ADV_QUIRK_FIX_ASYN_XFER,
125	}
126};
127
128/*
129 * Allowable periods in ns
130 */
131static u_int8_t adv_sdtr_period_tbl[] =
132{
133	25,
134	30,
135	35,
136	40,
137	50,
138	60,
139	70,
140	85
141};
142
143static u_int8_t adv_sdtr_period_tbl_ultra[] =
144{
145	12,
146	19,
147	25,
148	32,
149	38,
150	44,
151	50,
152	57,
153	63,
154	69,
155	75,
156	82,
157	88,
158	94,
159	100,
160	107
161};
162
163struct ext_msg {
164	u_int8_t msg_type;
165	u_int8_t msg_len;
166	u_int8_t msg_req;
167	union {
168		struct {
169			u_int8_t sdtr_xfer_period;
170			u_int8_t sdtr_req_ack_offset;
171		} sdtr;
172		struct {
173       			u_int8_t wdtr_width;
174		} wdtr;
175		struct {
176			u_int8_t mdp[4];
177		} mdp;
178	} u_ext_msg;
179	u_int8_t res;
180};
181
182#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
183#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
184#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
185#define	mdp_b3		u_ext_msg.mdp_b3
186#define	mdp_b2		u_ext_msg.mdp_b2
187#define	mdp_b1		u_ext_msg.mdp_b1
188#define	mdp_b0		u_ext_msg.mdp_b0
189
190/*
191 * Some of the early PCI adapters have problems with
192 * async transfers.  Instead use an offset of 1.
193 */
194#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
195
196/* LRAM routines */
197static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
198					u_int16_t *buffer, int count);
199static void	 adv_write_lram_16_multi(struct adv_softc *adv,
200					 u_int16_t s_addr, u_int16_t *buffer,
201					 int count);
202static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
203				  u_int16_t set_value, int count);
204static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
205				  int count);
206
207static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
208					      u_int16_t addr, u_int16_t value);
209static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
210
211
212static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
213				   u_int32_t value);
214static void	 adv_write_lram_32_multi(struct adv_softc *adv,
215					 u_int16_t s_addr, u_int32_t *buffer,
216					 int count);
217
218/* EEPROM routines */
219static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
220static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
221				     u_int16_t value);
222static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
223					  u_int8_t cmd_reg);
224static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
225					    struct adv_eeprom_config *eeconfig);
226
227/* Initialization */
228static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
229				    u_int16_t *mcode_buf, u_int16_t mcode_size);
230
231static void	 adv_reinit_lram(struct adv_softc *adv);
232static void	 adv_init_lram(struct adv_softc *adv);
233static int	 adv_init_microcode_var(struct adv_softc *adv);
234static void	 adv_init_qlink_var(struct adv_softc *adv);
235
236/* Interrupts */
237static void	 adv_disable_interrupt(struct adv_softc *adv);
238static void	 adv_enable_interrupt(struct adv_softc *adv);
239static void	 adv_toggle_irq_act(struct adv_softc *adv);
240
241/* Chip Control */
242static int	 adv_host_req_chip_halt(struct adv_softc *adv);
243static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
244#if 0
245static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
246#endif
247
248/* Queue handling and execution */
249static __inline int
250		 adv_sgcount_to_qcount(int sgcount);
251
252static __inline int
253adv_sgcount_to_qcount(int sgcount)
254{
255	int	n_sg_list_qs;
256
257	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
258	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
259		n_sg_list_qs++;
260	return (n_sg_list_qs + 1);
261}
262
263#if BYTE_ORDER == BIG_ENDIAN
264static void	 adv_adj_endian_qdone_info(struct adv_q_done_info *);
265static void	 adv_adj_scsiq_endian(struct adv_scsi_q *);
266#endif
267static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
268				u_int16_t *inbuf, int words);
269static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
270static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
271				       u_int8_t free_q_head, u_int8_t n_free_q);
272static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
273				      u_int8_t free_q_head);
274static int	 adv_send_scsi_queue(struct adv_softc *adv,
275				     struct adv_scsi_q *scsiq,
276				     u_int8_t n_q_required);
277static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
278					     struct adv_scsi_q *scsiq,
279					     u_int q_no);
280static void	 adv_put_ready_queue(struct adv_softc *adv,
281				     struct adv_scsi_q *scsiq, u_int q_no);
282static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
283			       u_int16_t *buffer, int words);
284
285/* Messages */
286static void	 adv_handle_extmsg_in(struct adv_softc *adv,
287				      u_int16_t halt_q_addr, u_int8_t q_cntl,
288				      target_bit_vector target_id,
289				      int tid);
290static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
291				 u_int8_t sdtr_offset);
292static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
293					u_int8_t sdtr_data);
294
295
296/* Exported functions first */
297
298void
299advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
300{
301	struct adv_softc *adv;
302
303	adv = (struct adv_softc *)callback_arg;
304	mtx_assert(&adv->lock, MA_OWNED);
305	switch (code) {
306	case AC_FOUND_DEVICE:
307	{
308		struct ccb_getdev *cgd;
309		target_bit_vector target_mask;
310		int num_entries;
311        	caddr_t match;
312		struct adv_quirk_entry *entry;
313		struct adv_target_transinfo* tinfo;
314
315		cgd = (struct ccb_getdev *)arg;
316
317		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
318
319		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
320		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
321				       (caddr_t)adv_quirk_table,
322				       num_entries, sizeof(*adv_quirk_table),
323				       scsi_inquiry_match);
324
325		if (match == NULL)
326			panic("advasync: device didn't match wildcard entry!!");
327
328		entry = (struct adv_quirk_entry *)match;
329
330		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
331			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
332				adv->fix_asyn_xfer_always |= target_mask;
333			else
334				adv->fix_asyn_xfer_always &= ~target_mask;
335			/*
336			 * We start out life with all bits set and clear them
337			 * after we've determined that the fix isn't necessary.
338			 * It may well be that we've already cleared a target
339			 * before the full inquiry session completes, so don't
340			 * gratuitously set a target bit even if it has this
341			 * quirk.  But, if the quirk exonerates a device, clear
342			 * the bit now.
343			 */
344			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
345				adv->fix_asyn_xfer &= ~target_mask;
346		}
347		/*
348		 * Reset our sync settings now that we've determined
349		 * what quirks are in effect for the device.
350		 */
351		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
352		adv_set_syncrate(adv, cgd->ccb_h.path,
353				 cgd->ccb_h.target_id,
354				 tinfo->current.period,
355				 tinfo->current.offset,
356				 ADV_TRANS_CUR);
357		break;
358	}
359	case AC_LOST_DEVICE:
360	{
361		u_int target_mask;
362
363		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
364			target_mask = 0x01 << xpt_path_target_id(path);
365			adv->fix_asyn_xfer |= target_mask;
366		}
367
368		/*
369		 * Revert to async transfers
370		 * for the next device.
371		 */
372		adv_set_syncrate(adv, /*path*/NULL,
373				 xpt_path_target_id(path),
374				 /*period*/0,
375				 /*offset*/0,
376				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
377	}
378	default:
379		break;
380	}
381}
382
383void
384adv_set_bank(struct adv_softc *adv, u_int8_t bank)
385{
386	u_int8_t control;
387
388	/*
389	 * Start out with the bank reset to 0
390	 */
391	control = ADV_INB(adv, ADV_CHIP_CTRL)
392		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
393			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
394			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
395	if (bank == 1) {
396		control |= ADV_CC_BANK_ONE;
397	} else if (bank == 2) {
398		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
399	}
400	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
401}
402
403u_int8_t
404adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
405{
406	u_int8_t   byte_data;
407	u_int16_t  word_data;
408
409	/*
410	 * LRAM is accessed on 16bit boundaries.
411	 */
412	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
413	word_data = ADV_INW(adv, ADV_LRAM_DATA);
414	if (addr & 1) {
415#if BYTE_ORDER == BIG_ENDIAN
416		byte_data = (u_int8_t)(word_data & 0xFF);
417#else
418		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
419#endif
420	} else {
421#if BYTE_ORDER == BIG_ENDIAN
422		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
423#else
424		byte_data = (u_int8_t)(word_data & 0xFF);
425#endif
426	}
427	return (byte_data);
428}
429
430void
431adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
432{
433	u_int16_t word_data;
434
435	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
436	if (addr & 1) {
437		word_data &= 0x00FF;
438		word_data |= (((u_int8_t)value << 8) & 0xFF00);
439	} else {
440		word_data &= 0xFF00;
441		word_data |= ((u_int8_t)value & 0x00FF);
442	}
443	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
444}
445
446
447u_int16_t
448adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
449{
450	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
451	return (ADV_INW(adv, ADV_LRAM_DATA));
452}
453
454void
455adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
456{
457	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
458	ADV_OUTW(adv, ADV_LRAM_DATA, value);
459}
460
461/*
462 * Determine if there is a board at "iobase" by looking
463 * for the AdvanSys signatures.  Return 1 if a board is
464 * found, 0 otherwise.
465 */
466int
467adv_find_signature(struct resource *res)
468{
469	u_int16_t signature;
470
471	if (bus_read_1(res, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
472		signature = bus_read_2(res, ADV_SIGNATURE_WORD);
473		if ((signature == ADV_1000_ID0W)
474		 || (signature == ADV_1000_ID0W_FIX))
475			return (1);
476	}
477	return (0);
478}
479
480void
481adv_lib_init(struct adv_softc *adv)
482{
483	if ((adv->type & ADV_ULTRA) != 0) {
484		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
485		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
486	} else {
487		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
488		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
489	}
490}
491
492u_int16_t
493adv_get_eeprom_config(struct adv_softc *adv, struct
494		      adv_eeprom_config  *eeprom_config)
495{
496	u_int16_t	sum;
497	u_int16_t	*wbuf;
498	u_int8_t	cfg_beg;
499	u_int8_t	cfg_end;
500	u_int8_t	s_addr;
501
502	wbuf = (u_int16_t *)eeprom_config;
503	sum = 0;
504
505	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
506		*wbuf = adv_read_eeprom_16(adv, s_addr);
507		sum += *wbuf;
508	}
509
510	if (adv->type & ADV_VL) {
511		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
512		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
513	} else {
514		cfg_beg = ADV_EEPROM_CFG_BEG;
515		cfg_end = ADV_EEPROM_MAX_ADDR;
516	}
517
518	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
519		*wbuf = adv_read_eeprom_16(adv, s_addr);
520		sum += *wbuf;
521#ifdef ADV_DEBUG_EEPROM
522		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
523#endif
524	}
525	*wbuf = adv_read_eeprom_16(adv, s_addr);
526	return (sum);
527}
528
529int
530adv_set_eeprom_config(struct adv_softc *adv,
531		      struct adv_eeprom_config *eeprom_config)
532{
533	int	retry;
534
535	retry = 0;
536	while (1) {
537		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
538			break;
539		}
540		if (++retry > ADV_EEPROM_MAX_RETRY) {
541			break;
542		}
543	}
544	return (retry > ADV_EEPROM_MAX_RETRY);
545}
546
547int
548adv_reset_chip(struct adv_softc *adv, int reset_bus)
549{
550	adv_stop_chip(adv);
551	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
552				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
553	DELAY(60);
554
555	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
556	adv_set_chip_ih(adv, ADV_INS_HALT);
557
558	if (reset_bus)
559		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
560
561	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
562	if (reset_bus)
563		DELAY(200 * 1000);
564
565	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
566	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
567	return (adv_is_chip_halted(adv));
568}
569
570int
571adv_test_external_lram(struct adv_softc* adv)
572{
573	u_int16_t	q_addr;
574	u_int16_t	saved_value;
575	int		success;
576
577	success = 0;
578
579	q_addr = ADV_QNO_TO_QADDR(241);
580	saved_value = adv_read_lram_16(adv, q_addr);
581	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
582		success = 1;
583		adv_write_lram_16(adv, q_addr, saved_value);
584	}
585	return (success);
586}
587
588
589int
590adv_init_lram_and_mcode(struct adv_softc *adv)
591{
592	u_int32_t	retval;
593
594	adv_disable_interrupt(adv);
595
596	adv_init_lram(adv);
597
598	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
599				    adv_mcode_size);
600	if (retval != adv_mcode_chksum) {
601		device_printf(adv->dev,
602		    "Microcode download failed checksum!\n");
603		return (1);
604	}
605
606	if (adv_init_microcode_var(adv) != 0)
607		return (1);
608
609	adv_enable_interrupt(adv);
610	return (0);
611}
612
613u_int8_t
614adv_get_chip_irq(struct adv_softc *adv)
615{
616	u_int16_t	cfg_lsw;
617	u_int8_t	chip_irq;
618
619	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
620
621	if ((adv->type & ADV_VL) != 0) {
622		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
623		if ((chip_irq == 0) ||
624		    (chip_irq == 4) ||
625		    (chip_irq == 7)) {
626			return (0);
627		}
628		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
629	}
630	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
631	if (chip_irq == 3)
632		chip_irq += 2;
633	return (chip_irq + ADV_MIN_IRQ_NO);
634}
635
636u_int8_t
637adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
638{
639	u_int16_t	cfg_lsw;
640
641	if ((adv->type & ADV_VL) != 0) {
642		if (irq_no != 0) {
643			if ((irq_no < ADV_MIN_IRQ_NO)
644			 || (irq_no > ADV_MAX_IRQ_NO)) {
645				irq_no = 0;
646			} else {
647				irq_no -= ADV_MIN_IRQ_NO - 1;
648			}
649		}
650		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
651		cfg_lsw |= 0x0010;
652		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
653		adv_toggle_irq_act(adv);
654
655		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
656		cfg_lsw |= (irq_no & 0x07) << 2;
657		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658		adv_toggle_irq_act(adv);
659	} else if ((adv->type & ADV_ISA) != 0) {
660		if (irq_no == 15)
661			irq_no -= 2;
662		irq_no -= ADV_MIN_IRQ_NO;
663		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
664		cfg_lsw |= (irq_no & 0x03) << 2;
665		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
666	}
667	return (adv_get_chip_irq(adv));
668}
669
670void
671adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
672{
673	u_int16_t cfg_lsw;
674
675	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
676	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
677		return;
678    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
679	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
680	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
681}
682
683int
684adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
685		       u_int32_t datalen)
686{
687	struct		adv_target_transinfo* tinfo;
688	u_int32_t	*p_data_addr;
689	u_int32_t	*p_data_bcount;
690	int		disable_syn_offset_one_fix;
691	int		retval;
692	u_int		n_q_required;
693	u_int32_t	addr;
694	u_int8_t	sg_entry_cnt;
695	u_int8_t	target_ix;
696	u_int8_t	sg_entry_cnt_minus_one;
697	u_int8_t	tid_no;
698
699	if (!dumping)
700		mtx_assert(&adv->lock, MA_OWNED);
701	scsiq->q1.q_no = 0;
702	retval = 1;  /* Default to error case */
703	target_ix = scsiq->q2.target_ix;
704	tid_no = ADV_TIX_TO_TID(target_ix);
705	tinfo = &adv->tinfo[tid_no];
706
707	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
708		/* Renegotiate if appropriate. */
709		adv_set_syncrate(adv, /*struct cam_path */NULL,
710				 tid_no, /*period*/0, /*offset*/0,
711				 ADV_TRANS_CUR);
712		if (tinfo->current.period != tinfo->goal.period) {
713			adv_msgout_sdtr(adv, tinfo->goal.period,
714					tinfo->goal.offset);
715			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
716		}
717	}
718
719	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
720		sg_entry_cnt = scsiq->sg_head->entry_cnt;
721		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
722
723#ifdef DIAGNOSTIC
724		if (sg_entry_cnt <= 1)
725			panic("adv_execute_scsi_queue: Queue "
726			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
727
728		if (sg_entry_cnt > ADV_MAX_SG_LIST)
729			panic("adv_execute_scsi_queue: "
730			      "Queue with too many segs.");
731
732		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
733			int i;
734
735			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
736				addr = scsiq->sg_head->sg_list[i].addr +
737				       scsiq->sg_head->sg_list[i].bytes;
738
739				if ((addr & 0x0003) != 0)
740					panic("adv_execute_scsi_queue: SG "
741					      "with odd address or byte count");
742			}
743		}
744#endif
745		p_data_addr =
746		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
747		p_data_bcount =
748		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
749
750		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
751		scsiq->sg_head->queue_cnt = n_q_required - 1;
752	} else {
753		p_data_addr = &scsiq->q1.data_addr;
754		p_data_bcount = &scsiq->q1.data_cnt;
755		n_q_required = 1;
756	}
757
758	disable_syn_offset_one_fix = FALSE;
759
760	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
761	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
762
763		if (datalen != 0) {
764			if (datalen < 512) {
765				disable_syn_offset_one_fix = TRUE;
766			} else {
767				if (scsiq->cdbptr[0] == INQUIRY
768				 || scsiq->cdbptr[0] == REQUEST_SENSE
769				 || scsiq->cdbptr[0] == READ_CAPACITY
770				 || scsiq->cdbptr[0] == MODE_SELECT_6
771				 || scsiq->cdbptr[0] == MODE_SENSE_6
772				 || scsiq->cdbptr[0] == MODE_SENSE_10
773				 || scsiq->cdbptr[0] == MODE_SELECT_10
774				 || scsiq->cdbptr[0] == READ_TOC) {
775					disable_syn_offset_one_fix = TRUE;
776				}
777			}
778		}
779	}
780
781	if (disable_syn_offset_one_fix) {
782		scsiq->q2.tag_code &=
783		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
784		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
785				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
786	}
787
788	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
789	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
790		u_int8_t extra_bytes;
791
792		addr = *p_data_addr + *p_data_bcount;
793		extra_bytes = addr & 0x0003;
794		if (extra_bytes != 0
795		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
796		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
797			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
798			scsiq->q1.extra_bytes = extra_bytes;
799			*p_data_bcount -= extra_bytes;
800		}
801	}
802
803	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
804	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
805		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
806
807	return (retval);
808}
809
810
811u_int8_t
812adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
813		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
814{
815	u_int16_t val;
816	u_int8_t  sg_queue_cnt;
817
818	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
819		       (u_int16_t *)scsiq,
820		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
821
822#if BYTE_ORDER == BIG_ENDIAN
823	adv_adj_endian_qdone_info(scsiq);
824#endif
825
826	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
827	scsiq->q_status = val & 0xFF;
828	scsiq->q_no = (val >> 8) & 0XFF;
829
830	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
831	scsiq->cntl = val & 0xFF;
832	sg_queue_cnt = (val >> 8) & 0xFF;
833
834	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
835	scsiq->sense_len = val & 0xFF;
836	scsiq->extra_bytes = (val >> 8) & 0xFF;
837
838	/*
839	 * Due to a bug in accessing LRAM on the 940UA, the residual
840	 * is split into separate high and low 16bit quantities.
841	 */
842	scsiq->remain_bytes =
843	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
844	scsiq->remain_bytes |=
845	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
846
847	/*
848	 * XXX Is this just a safeguard or will the counter really
849	 * have bogus upper bits?
850	 */
851	scsiq->remain_bytes &= max_dma_count;
852
853	return (sg_queue_cnt);
854}
855
856int
857adv_start_chip(struct adv_softc *adv)
858{
859	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
860	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
861		return (0);
862	return (1);
863}
864
865int
866adv_stop_execution(struct adv_softc *adv)
867{
868	int count;
869
870	count = 0;
871	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
872		adv_write_lram_8(adv, ADV_STOP_CODE_B,
873				 ADV_STOP_REQ_RISC_STOP);
874		do {
875			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
876				ADV_STOP_ACK_RISC_STOP) {
877				return (1);
878			}
879			DELAY(1000);
880		} while (count++ < 20);
881	}
882	return (0);
883}
884
885int
886adv_is_chip_halted(struct adv_softc *adv)
887{
888	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
889		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
890			return (1);
891		}
892	}
893	return (0);
894}
895
896/*
897 * XXX The numeric constants and the loops in this routine
898 * need to be documented.
899 */
900void
901adv_ack_interrupt(struct adv_softc *adv)
902{
903	u_int8_t	host_flag;
904	u_int8_t	risc_flag;
905	int		loop;
906
907	loop = 0;
908	do {
909		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
910		if (loop++ > 0x7FFF) {
911			break;
912		}
913	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
914
915	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
916	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
917			 host_flag | ADV_HOST_FLAG_ACK_INT);
918
919	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
920	loop = 0;
921	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
922		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
923		if (loop++ > 3) {
924			break;
925		}
926	}
927
928	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
929}
930
931/*
932 * Handle all conditions that may halt the chip waiting
933 * for us to intervene.
934 */
935void
936adv_isr_chip_halted(struct adv_softc *adv)
937{
938	u_int16_t	  int_halt_code;
939	u_int16_t	  halt_q_addr;
940	target_bit_vector target_mask;
941	target_bit_vector scsi_busy;
942	u_int8_t	  halt_qp;
943	u_int8_t	  target_ix;
944	u_int8_t	  q_cntl;
945	u_int8_t	  tid_no;
946
947	if (!dumping)
948		mtx_assert(&adv->lock, MA_OWNED);
949	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
950	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
951	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
952	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
953	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
954	tid_no = ADV_TIX_TO_TID(target_ix);
955	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
956	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
957		/*
958		 * Temporarily disable the async fix by removing
959		 * this target from the list of affected targets,
960		 * setting our async rate, and then putting us
961		 * back into the mask.
962		 */
963		adv->fix_asyn_xfer &= ~target_mask;
964		adv_set_syncrate(adv, /*struct cam_path */NULL,
965				 tid_no, /*period*/0, /*offset*/0,
966				 ADV_TRANS_ACTIVE);
967		adv->fix_asyn_xfer |= target_mask;
968	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
969		adv_set_syncrate(adv, /*struct cam_path */NULL,
970				 tid_no, /*period*/0, /*offset*/0,
971				 ADV_TRANS_ACTIVE);
972	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
973		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
974				     target_mask, tid_no);
975	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
976		struct	  adv_target_transinfo* tinfo;
977		struct	  adv_ccb_info *cinfo;
978		union	  ccb *ccb;
979		u_int32_t cinfo_index;
980		u_int8_t  tag_code;
981		u_int8_t  q_status;
982
983		tinfo = &adv->tinfo[tid_no];
984		q_cntl |= QC_REQ_SENSE;
985
986		/* Renegotiate if appropriate. */
987		adv_set_syncrate(adv, /*struct cam_path */NULL,
988				 tid_no, /*period*/0, /*offset*/0,
989				 ADV_TRANS_CUR);
990		if (tinfo->current.period != tinfo->goal.period) {
991			adv_msgout_sdtr(adv, tinfo->goal.period,
992					tinfo->goal.offset);
993			q_cntl |= QC_MSG_OUT;
994		}
995		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
996
997		/* Don't tag request sense commands */
998		tag_code = adv_read_lram_8(adv,
999					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
1000		tag_code &=
1001		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
1002
1003		if ((adv->fix_asyn_xfer & target_mask) != 0
1004		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
1005			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
1006				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
1007		}
1008		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1009				 tag_code);
1010		q_status = adv_read_lram_8(adv,
1011					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1012		q_status |= (QS_READY | QS_BUSY);
1013		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1014				 q_status);
1015		/*
1016		 * Freeze the devq until we can handle the sense condition.
1017		 */
1018		cinfo_index =
1019		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1020		cinfo = &adv->ccb_infos[cinfo_index];
1021		ccb = adv->ccb_infos[cinfo_index].ccb;
1022		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1023		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1024		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1025			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1026			      /*queued_only*/TRUE);
1027		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1028		scsi_busy &= ~target_mask;
1029		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1030		/*
1031		 * Ensure we have enough time to actually
1032		 * retrieve the sense.
1033		 */
1034		callout_reset(&cinfo->timer, 5 * hz, adv_timeout, ccb);
1035	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1036		struct	ext_msg out_msg;
1037
1038		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1039				       (u_int16_t *) &out_msg,
1040				       sizeof(out_msg)/2);
1041
1042		if ((out_msg.msg_type == MSG_EXTENDED)
1043		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1044		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1045
1046			/* Revert to Async */
1047			adv_set_syncrate(adv, /*struct cam_path */NULL,
1048					 tid_no, /*period*/0, /*offset*/0,
1049					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1050		}
1051		q_cntl &= ~QC_MSG_OUT;
1052		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1053	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1054		u_int8_t scsi_status;
1055		union ccb *ccb;
1056		u_int32_t cinfo_index;
1057
1058		scsi_status = adv_read_lram_8(adv, halt_q_addr
1059					      + ADV_SCSIQ_SCSI_STATUS);
1060		cinfo_index =
1061		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1062		ccb = adv->ccb_infos[cinfo_index].ccb;
1063		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1064		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1065		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1066		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1067			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1068			      /*queued_only*/TRUE);
1069		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1070		scsi_busy &= ~target_mask;
1071		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1072	} else {
1073		printf("Unhandled Halt Code %x\n", int_halt_code);
1074	}
1075	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1076}
1077
1078void
1079adv_sdtr_to_period_offset(struct adv_softc *adv,
1080			  u_int8_t sync_data, u_int8_t *period,
1081			  u_int8_t *offset, int tid)
1082{
1083	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1084	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1085		*period = *offset = 0;
1086	} else {
1087		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1088		*offset = sync_data & 0xF;
1089	}
1090}
1091
1092void
1093adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1094		 u_int tid, u_int period, u_int offset, u_int type)
1095{
1096	struct adv_target_transinfo* tinfo;
1097	u_int old_period;
1098	u_int old_offset;
1099	u_int8_t sdtr_data;
1100
1101	mtx_assert(&adv->lock, MA_OWNED);
1102	tinfo = &adv->tinfo[tid];
1103
1104	/* Filter our input */
1105	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1106					      &offset, tid);
1107
1108	old_period = tinfo->current.period;
1109	old_offset = tinfo->current.offset;
1110
1111	if ((type & ADV_TRANS_CUR) != 0
1112	 && ((old_period != period || old_offset != offset)
1113	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1114		int halted;
1115
1116		halted = adv_is_chip_halted(adv);
1117		if (halted == 0)
1118			/* Must halt the chip first */
1119			adv_host_req_chip_halt(adv);
1120
1121		/* Update current hardware settings */
1122		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1123
1124		/*
1125		 * If a target can run in sync mode, we don't need
1126		 * to check it for sync problems.
1127		 */
1128		if (offset != 0)
1129			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1130
1131		if (halted == 0)
1132			/* Start the chip again */
1133			adv_start_chip(adv);
1134
1135		tinfo->current.period = period;
1136		tinfo->current.offset = offset;
1137
1138		if (path != NULL) {
1139			/*
1140			 * Tell the SCSI layer about the
1141			 * new transfer parameters.
1142			 */
1143			struct	ccb_trans_settings neg;
1144			memset(&neg, 0, sizeof (neg));
1145			struct ccb_trans_settings_spi *spi =
1146			    &neg.xport_specific.spi;
1147
1148			neg.protocol = PROTO_SCSI;
1149			neg.protocol_version = SCSI_REV_2;
1150			neg.transport = XPORT_SPI;
1151			neg.transport_version = 2;
1152
1153			spi->sync_offset = offset;
1154			spi->sync_period = period;
1155			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1156			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1157			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1158			xpt_async(AC_TRANSFER_NEG, path, &neg);
1159		}
1160	}
1161
1162	if ((type & ADV_TRANS_GOAL) != 0) {
1163		tinfo->goal.period = period;
1164		tinfo->goal.offset = offset;
1165	}
1166
1167	if ((type & ADV_TRANS_USER) != 0) {
1168		tinfo->user.period = period;
1169		tinfo->user.offset = offset;
1170	}
1171}
1172
1173u_int8_t
1174adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1175			  u_int *offset, int tid)
1176{
1177	u_int i;
1178	u_int dummy_offset;
1179	u_int dummy_period;
1180
1181	if (offset == NULL) {
1182		dummy_offset = 0;
1183		offset = &dummy_offset;
1184	}
1185
1186	if (period == NULL) {
1187		dummy_period = 0;
1188		period = &dummy_period;
1189	}
1190
1191	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1192	if (*period != 0 && *offset != 0) {
1193		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1194			if (*period <= adv->sdtr_period_tbl[i]) {
1195				/*
1196				 * When responding to a target that requests
1197				 * sync, the requested  rate may fall between
1198				 * two rates that we can output, but still be
1199				 * a rate that we can receive.  Because of this,
1200				 * we want to respond to the target with
1201				 * the same rate that it sent to us even
1202				 * if the period we use to send data to it
1203				 * is lower.  Only lower the response period
1204				 * if we must.
1205				 */
1206				if (i == 0 /* Our maximum rate */)
1207					*period = adv->sdtr_period_tbl[0];
1208				return ((i << 4) | *offset);
1209			}
1210		}
1211	}
1212
1213	/* Must go async */
1214	*period = 0;
1215	*offset = 0;
1216	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1217		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1218	return (0);
1219}
1220
1221/* Internal Routines */
1222
1223static void
1224adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1225		       u_int16_t *buffer, int count)
1226{
1227	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1229}
1230
1231static void
1232adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1233			u_int16_t *buffer, int count)
1234{
1235	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1236	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1237}
1238
1239static void
1240adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1241		 u_int16_t set_value, int count)
1242{
1243	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1244	bus_set_multi_2(adv->res, adv->reg_off + ADV_LRAM_DATA,
1245	    set_value, count);
1246}
1247
1248static u_int32_t
1249adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1250{
1251	u_int32_t	sum;
1252	int		i;
1253
1254	sum = 0;
1255	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1256	for (i = 0; i < count; i++)
1257		sum += ADV_INW(adv, ADV_LRAM_DATA);
1258	return (sum);
1259}
1260
1261static int
1262adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1263			     u_int16_t value)
1264{
1265	int	retval;
1266
1267	retval = 0;
1268	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1269	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1270	DELAY(10000);
1271	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1272	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1273		retval = 1;
1274	return (retval);
1275}
1276
1277static u_int32_t
1278adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1279{
1280	u_int16_t           val_low, val_high;
1281
1282	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1283
1284#if BYTE_ORDER == BIG_ENDIAN
1285	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1286	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1287#else
1288	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1289	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1290#endif
1291
1292	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1293}
1294
1295static void
1296adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1297{
1298	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1299
1300#if BYTE_ORDER == BIG_ENDIAN
1301	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1302	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1303#else
1304	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1305	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1306#endif
1307}
1308
1309static void
1310adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1311			u_int32_t *buffer, int count)
1312{
1313	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1314	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1315}
1316
1317static u_int16_t
1318adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1319{
1320	u_int16_t read_wval;
1321	u_int8_t  cmd_reg;
1322
1323	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1324	DELAY(1000);
1325	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1326	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1327	DELAY(1000);
1328	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1329	DELAY(1000);
1330	return (read_wval);
1331}
1332
1333static u_int16_t
1334adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1335{
1336	u_int16_t	read_value;
1337
1338	read_value = adv_read_eeprom_16(adv, addr);
1339	if (read_value != value) {
1340		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1341		DELAY(1000);
1342
1343		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1344		DELAY(1000);
1345
1346		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1347		DELAY(20 * 1000);
1348
1349		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1350		DELAY(1000);
1351		read_value = adv_read_eeprom_16(adv, addr);
1352	}
1353	return (read_value);
1354}
1355
1356static int
1357adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1358{
1359	u_int8_t read_back;
1360	int	 retry;
1361
1362	retry = 0;
1363	while (1) {
1364		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1365		DELAY(1000);
1366		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1367		if (read_back == cmd_reg) {
1368			return (1);
1369		}
1370		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1371			return (0);
1372		}
1373	}
1374}
1375
1376static int
1377adv_set_eeprom_config_once(struct adv_softc *adv,
1378			   struct adv_eeprom_config *eeprom_config)
1379{
1380	int		n_error;
1381	u_int16_t	*wbuf;
1382	u_int16_t	sum;
1383	u_int8_t	s_addr;
1384	u_int8_t	cfg_beg;
1385	u_int8_t	cfg_end;
1386
1387	wbuf = (u_int16_t *)eeprom_config;
1388	n_error = 0;
1389	sum = 0;
1390	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1391		sum += *wbuf;
1392		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1393			n_error++;
1394		}
1395	}
1396	if (adv->type & ADV_VL) {
1397		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1398		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1399	} else {
1400		cfg_beg = ADV_EEPROM_CFG_BEG;
1401		cfg_end = ADV_EEPROM_MAX_ADDR;
1402	}
1403
1404	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1405		sum += *wbuf;
1406		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1407			n_error++;
1408		}
1409	}
1410	*wbuf = sum;
1411	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1412		n_error++;
1413	}
1414	wbuf = (u_int16_t *)eeprom_config;
1415	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1416		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1417			n_error++;
1418		}
1419	}
1420	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1421		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1422			n_error++;
1423		}
1424	}
1425	return (n_error);
1426}
1427
1428static u_int32_t
1429adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1430		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1431{
1432	u_int32_t chksum;
1433	u_int16_t mcode_lram_size;
1434	u_int16_t mcode_chksum;
1435
1436	mcode_lram_size = mcode_size >> 1;
1437	/* XXX Why zero the memory just before you write the whole thing?? */
1438	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1439	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1440
1441	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1442	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1443						   ((mcode_size - s_addr
1444						     - ADV_CODE_SEC_BEG) >> 1));
1445	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1446	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1447	return (chksum);
1448}
1449
1450static void
1451adv_reinit_lram(struct adv_softc *adv) {
1452	adv_init_lram(adv);
1453	adv_init_qlink_var(adv);
1454}
1455
1456static void
1457adv_init_lram(struct adv_softc *adv)
1458{
1459	u_int8_t  i;
1460	u_int16_t s_addr;
1461
1462	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1463			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1464
1465	i = ADV_MIN_ACTIVE_QNO;
1466	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1467
1468	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1469	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1470	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471	i++;
1472	s_addr += ADV_QBLK_SIZE;
1473	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1474		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1475		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1476		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1477	}
1478
1479	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1480	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1481	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1482	i++;
1483	s_addr += ADV_QBLK_SIZE;
1484
1485	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1486		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1487		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1488		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1489	}
1490}
1491
1492static int
1493adv_init_microcode_var(struct adv_softc *adv)
1494{
1495	int	 i;
1496
1497	for (i = 0; i <= ADV_MAX_TID; i++) {
1498
1499		/* Start out async all around */
1500		adv_set_syncrate(adv, /*path*/NULL,
1501				 i, 0, 0,
1502				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1503	}
1504
1505	adv_init_qlink_var(adv);
1506
1507	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1508	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1509
1510	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1511
1512	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1513
1514	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1515	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1516		device_printf(adv->dev,
1517		    "Unable to set program counter. Aborting.\n");
1518		return (1);
1519	}
1520	return (0);
1521}
1522
1523static void
1524adv_init_qlink_var(struct adv_softc *adv)
1525{
1526	int	  i;
1527	u_int16_t lram_addr;
1528
1529	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1530	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1531
1532	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1533	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1534
1535	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1536			 (u_int8_t)((int) adv->max_openings + 1));
1537	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1538			 (u_int8_t)((int) adv->max_openings + 2));
1539
1540	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1541
1542	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1543	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1544	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1545	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1546	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1547	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1548
1549	lram_addr = ADV_QADR_BEG;
1550	for (i = 0; i < 32; i++, lram_addr += 2)
1551		adv_write_lram_16(adv, lram_addr, 0);
1552}
1553
1554static void
1555adv_disable_interrupt(struct adv_softc *adv)
1556{
1557	u_int16_t cfg;
1558
1559	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1560	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1561}
1562
1563static void
1564adv_enable_interrupt(struct adv_softc *adv)
1565{
1566	u_int16_t cfg;
1567
1568	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1569	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1570}
1571
1572static void
1573adv_toggle_irq_act(struct adv_softc *adv)
1574{
1575	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1576	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1577}
1578
1579void
1580adv_start_execution(struct adv_softc *adv)
1581{
1582	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1583		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1584	}
1585}
1586
1587int
1588adv_stop_chip(struct adv_softc *adv)
1589{
1590	u_int8_t cc_val;
1591
1592	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1593		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1594	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1595	adv_set_chip_ih(adv, ADV_INS_HALT);
1596	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1597	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1598		return (0);
1599	}
1600	return (1);
1601}
1602
1603static int
1604adv_host_req_chip_halt(struct adv_softc *adv)
1605{
1606	int	 count;
1607	u_int8_t saved_stop_code;
1608
1609	if (adv_is_chip_halted(adv))
1610		return (1);
1611
1612	count = 0;
1613	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1614	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1615			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1616	while (adv_is_chip_halted(adv) == 0
1617	    && count++ < 2000)
1618		;
1619
1620	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1621	return (count < 2000);
1622}
1623
1624static void
1625adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1626{
1627	adv_set_bank(adv, 1);
1628	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1629	adv_set_bank(adv, 0);
1630}
1631
1632#if 0
1633static u_int8_t
1634adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1635{
1636	u_int8_t scsi_ctrl;
1637
1638	adv_set_bank(adv, 1);
1639	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1640	adv_set_bank(adv, 0);
1641	return (scsi_ctrl);
1642}
1643#endif
1644
1645/*
1646 * XXX Looks like more padding issues in this routine as well.
1647 *     There has to be a way to turn this into an insw.
1648 */
1649static void
1650adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1651	       u_int16_t *inbuf, int words)
1652{
1653	int	i;
1654
1655	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1656	for (i = 0; i < words; i++, inbuf++) {
1657		if (i == 5) {
1658			continue;
1659		}
1660		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1661	}
1662}
1663
1664static u_int
1665adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1666{
1667	u_int	  cur_used_qs;
1668	u_int	  cur_free_qs;
1669
1670	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1671
1672	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1673		cur_free_qs = adv->max_openings - cur_used_qs;
1674		return (cur_free_qs);
1675	}
1676	adv->openings_needed = n_qs;
1677	return (0);
1678}
1679
1680static u_int8_t
1681adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1682		      u_int8_t n_free_q)
1683{
1684	int i;
1685
1686	for (i = 0; i < n_free_q; i++) {
1687		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1688		if (free_q_head == ADV_QLINK_END)
1689			break;
1690	}
1691	return (free_q_head);
1692}
1693
1694static u_int8_t
1695adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1696{
1697	u_int16_t	q_addr;
1698	u_int8_t	next_qp;
1699	u_int8_t	q_status;
1700
1701	next_qp = ADV_QLINK_END;
1702	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1703	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1704
1705	if ((q_status & QS_READY) == 0)
1706		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1707
1708	return (next_qp);
1709}
1710
1711static int
1712adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1713		    u_int8_t n_q_required)
1714{
1715	u_int8_t	free_q_head;
1716	u_int8_t	next_qp;
1717	u_int8_t	tid_no;
1718	u_int8_t	target_ix;
1719	int		retval;
1720
1721	retval = 1;
1722	target_ix = scsiq->q2.target_ix;
1723	tid_no = ADV_TIX_TO_TID(target_ix);
1724	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1725	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1726	    != ADV_QLINK_END) {
1727		scsiq->q1.q_no = free_q_head;
1728
1729		/*
1730		 * Now that we know our Q number, point our sense
1731		 * buffer pointer to a bus dma mapped area where
1732		 * we can dma the data to.
1733		 */
1734		scsiq->q1.sense_addr = adv->sense_physbase
1735		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1736		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1737		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1738		adv->cur_active += n_q_required;
1739		retval = 0;
1740	}
1741	return (retval);
1742}
1743
1744
1745static void
1746adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1747			    u_int q_no)
1748{
1749	u_int8_t	sg_list_dwords;
1750	u_int8_t	sg_index, i;
1751	u_int8_t	sg_entry_cnt;
1752	u_int8_t	next_qp;
1753	u_int16_t	q_addr;
1754	struct		adv_sg_head *sg_head;
1755	struct		adv_sg_list_q scsi_sg_q;
1756
1757	sg_head = scsiq->sg_head;
1758
1759	if (sg_head) {
1760		sg_entry_cnt = sg_head->entry_cnt - 1;
1761#ifdef DIAGNOSTIC
1762		if (sg_entry_cnt == 0)
1763			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1764			      "a SG list but only one element");
1765		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1766			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1767			      "a SG list but QC_SG_HEAD not set");
1768#endif
1769		q_addr = ADV_QNO_TO_QADDR(q_no);
1770		sg_index = 1;
1771		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1772		scsi_sg_q.sg_head_qp = q_no;
1773		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1774		for (i = 0; i < sg_head->queue_cnt; i++) {
1775			u_int8_t segs_this_q;
1776
1777			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1778				segs_this_q = ADV_SG_LIST_PER_Q;
1779			else {
1780				/* This will be the last segment then */
1781				segs_this_q = sg_entry_cnt;
1782				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1783			}
1784			scsi_sg_q.seq_no = i + 1;
1785			sg_list_dwords = segs_this_q << 1;
1786			if (i == 0) {
1787				scsi_sg_q.sg_list_cnt = segs_this_q;
1788				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1789			} else {
1790				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1791				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1792			}
1793			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1794			scsi_sg_q.q_no = next_qp;
1795			q_addr = ADV_QNO_TO_QADDR(next_qp);
1796
1797			adv_write_lram_16_multi(adv,
1798						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1799						(u_int16_t *)&scsi_sg_q,
1800						sizeof(scsi_sg_q) >> 1);
1801			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1802						(u_int32_t *)&sg_head->sg_list[sg_index],
1803						sg_list_dwords);
1804			sg_entry_cnt -= segs_this_q;
1805			sg_index += ADV_SG_LIST_PER_Q;
1806		}
1807	}
1808	adv_put_ready_queue(adv, scsiq, q_no);
1809}
1810
1811static void
1812adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1813		    u_int q_no)
1814{
1815	struct		adv_target_transinfo* tinfo;
1816	u_int		q_addr;
1817	u_int		tid_no;
1818
1819	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1820	tinfo = &adv->tinfo[tid_no];
1821	if ((tinfo->current.period != tinfo->goal.period)
1822	 || (tinfo->current.offset != tinfo->goal.offset)) {
1823
1824		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1825		scsiq->q1.cntl |= QC_MSG_OUT;
1826	}
1827	q_addr = ADV_QNO_TO_QADDR(q_no);
1828
1829	scsiq->q1.status = QS_FREE;
1830
1831	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1832				(u_int16_t *)scsiq->cdbptr,
1833				scsiq->q2.cdb_len >> 1);
1834
1835#if BYTE_ORDER == BIG_ENDIAN
1836	adv_adj_scsiq_endian(scsiq);
1837#endif
1838
1839	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1840		      (u_int16_t *) &scsiq->q1.cntl,
1841		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1842
1843#ifdef CC_WRITE_IO_COUNT
1844	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1845			  adv->req_count);
1846#endif
1847
1848#ifdef CC_CLEAR_DMA_REMAIN
1849
1850	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1851	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1852#endif
1853
1854	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1855			  (scsiq->q1.q_no << 8) | QS_READY);
1856}
1857
1858static void
1859adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1860	      u_int16_t *buffer, int words)
1861{
1862	int	i;
1863
1864	/*
1865	 * XXX This routine makes *gross* assumptions
1866	 * about padding in the data structures.
1867	 * Either the data structures should have explicit
1868	 * padding members added, or they should have padding
1869	 * turned off via compiler attributes depending on
1870	 * which yields better overall performance.  My hunch
1871	 * would be that turning off padding would be the
1872	 * faster approach as an outsw is much faster than
1873	 * this crude loop and accessing un-aligned data
1874	 * members isn't *that* expensive.  The other choice
1875	 * would be to modify the ASC script so that the
1876	 * the adv_scsiq_1 structure can be re-arranged so
1877	 * padding isn't required.
1878	 */
1879	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1880	for (i = 0; i < words; i++, buffer++) {
1881		if (i == 2 || i == 10) {
1882			continue;
1883		}
1884		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1885	}
1886}
1887
1888#if BYTE_ORDER == BIG_ENDIAN
1889void
1890adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1891{
1892
1893	panic("adv(4) not supported on big-endian machines.\n");
1894}
1895
1896void
1897adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1898{
1899
1900	panic("adv(4) not supported on big-endian machines.\n");
1901}
1902#endif
1903
1904static void
1905adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1906		     u_int8_t q_cntl, target_bit_vector target_mask,
1907		     int tid_no)
1908{
1909	struct	ext_msg ext_msg;
1910
1911	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1912			       sizeof(ext_msg) >> 1);
1913	if ((ext_msg.msg_type == MSG_EXTENDED)
1914	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1915	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1916		union	  ccb *ccb;
1917		struct	  adv_target_transinfo* tinfo;
1918		u_int32_t cinfo_index;
1919		u_int	 period;
1920		u_int	 offset;
1921		int	 sdtr_accept;
1922		u_int8_t orig_offset;
1923
1924		cinfo_index =
1925		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1926		ccb = adv->ccb_infos[cinfo_index].ccb;
1927		tinfo = &adv->tinfo[tid_no];
1928		sdtr_accept = TRUE;
1929
1930		orig_offset = ext_msg.req_ack_offset;
1931		if (ext_msg.xfer_period < tinfo->goal.period) {
1932                	sdtr_accept = FALSE;
1933			ext_msg.xfer_period = tinfo->goal.period;
1934		}
1935
1936		/* Perform range checking */
1937		period = ext_msg.xfer_period;
1938		offset = ext_msg.req_ack_offset;
1939		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1940		ext_msg.xfer_period = period;
1941		ext_msg.req_ack_offset = offset;
1942
1943		/* Record our current sync settings */
1944		adv_set_syncrate(adv, ccb->ccb_h.path,
1945				 tid_no, ext_msg.xfer_period,
1946				 ext_msg.req_ack_offset,
1947				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1948
1949		/* Offset too high or large period forced async */
1950		if (orig_offset != ext_msg.req_ack_offset)
1951			sdtr_accept = FALSE;
1952
1953		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1954			/* Valid response to our requested negotiation */
1955			q_cntl &= ~QC_MSG_OUT;
1956		} else {
1957			/* Must Respond */
1958			q_cntl |= QC_MSG_OUT;
1959			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1960					ext_msg.req_ack_offset);
1961		}
1962
1963	} else if (ext_msg.msg_type == MSG_EXTENDED
1964		&& ext_msg.msg_req == MSG_EXT_WDTR
1965		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1966
1967		ext_msg.wdtr_width = 0;
1968		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1969					(u_int16_t *)&ext_msg,
1970					sizeof(ext_msg) >> 1);
1971		q_cntl |= QC_MSG_OUT;
1972        } else {
1973
1974		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1975		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1976					(u_int16_t *)&ext_msg,
1977					sizeof(ext_msg) >> 1);
1978		q_cntl |= QC_MSG_OUT;
1979        }
1980	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1981}
1982
1983static void
1984adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1985		u_int8_t sdtr_offset)
1986{
1987	struct	 ext_msg sdtr_buf;
1988
1989	sdtr_buf.msg_type = MSG_EXTENDED;
1990	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1991	sdtr_buf.msg_req = MSG_EXT_SDTR;
1992	sdtr_buf.xfer_period = sdtr_period;
1993	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1994	sdtr_buf.req_ack_offset = sdtr_offset;
1995	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1996				(u_int16_t *) &sdtr_buf,
1997				sizeof(sdtr_buf) / 2);
1998}
1999
2000int
2001adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
2002	      u_int32_t status, int queued_only)
2003{
2004	u_int16_t q_addr;
2005	u_int8_t  q_no;
2006	struct adv_q_done_info scsiq_buf;
2007	struct adv_q_done_info *scsiq;
2008	u_int8_t  target_ix;
2009	int	  count;
2010
2011	if (!dumping)
2012		mtx_assert(&adv->lock, MA_OWNED);
2013	scsiq = &scsiq_buf;
2014	target_ix = ADV_TIDLUN_TO_IX(target, lun);
2015	count = 0;
2016	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2017		struct adv_ccb_info *ccb_info;
2018		q_addr = ADV_QNO_TO_QADDR(q_no);
2019
2020		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2021		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2022		if (((scsiq->q_status & QS_READY) != 0)
2023		 && ((scsiq->q_status & QS_ABORTED) == 0)
2024		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2025		 && (scsiq->d2.target_ix == target_ix)
2026		 && (queued_only == 0
2027		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2028		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2029			union ccb *aborted_ccb;
2030			struct adv_ccb_info *cinfo;
2031
2032			scsiq->q_status |= QS_ABORTED;
2033			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2034					 scsiq->q_status);
2035			aborted_ccb = ccb_info->ccb;
2036			/* Don't clobber earlier error codes */
2037			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2038			  == CAM_REQ_INPROG)
2039				aborted_ccb->ccb_h.status |= status;
2040			cinfo = (struct adv_ccb_info *)
2041			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2042			cinfo->state |= ACCB_ABORT_QUEUED;
2043			count++;
2044		}
2045	}
2046	return (count);
2047}
2048
2049int
2050adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2051{
2052	int count;
2053	int i;
2054	union ccb *ccb;
2055
2056	if (!dumping)
2057		mtx_assert(&adv->lock, MA_OWNED);
2058	i = 200;
2059	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2060	    && i--)
2061		DELAY(1000);
2062	adv_reset_chip(adv, initiate_bus_reset);
2063	adv_reinit_lram(adv);
2064	for (i = 0; i <= ADV_MAX_TID; i++)
2065		adv_set_syncrate(adv, NULL, i, /*period*/0,
2066				 /*offset*/0, ADV_TRANS_CUR);
2067	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2068
2069	/* Tell the XPT layer that a bus reset occured */
2070	if (adv->path != NULL)
2071		xpt_async(AC_BUS_RESET, adv->path, NULL);
2072
2073	count = 0;
2074	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2075		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2076			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2077		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2078		count++;
2079	}
2080
2081	adv_start_chip(adv);
2082	return (count);
2083}
2084
2085static void
2086adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2087{
2088	int orig_id;
2089
2090    	adv_set_bank(adv, 1);
2091    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2092    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2093	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2094		adv_set_bank(adv, 0);
2095		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2096	}
2097    	adv_set_bank(adv, 1);
2098    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2099	adv_set_bank(adv, 0);
2100}
2101