advlib.c revision 39217
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *      $Id$
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <machine/bus_pio.h>
50#include <machine/bus.h>
51#include <machine/clock.h>
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_xpt_sim.h>
57
58#include <cam/scsi/scsi_all.h>
59#include <cam/scsi/scsi_message.h>
60#include <cam/scsi/scsi_da.h>
61#include <cam/scsi/scsi_cd.h>
62
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/pmap.h>
66
67#include <dev/advansys/advansys.h>
68#include <dev/advansys/advmcode.h>
69
70struct adv_quirk_entry {
71	struct scsi_inquiry_pattern inq_pat;
72	u_int8_t quirks;
73#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
74#define ADV_QUIRK_FIX_ASYN_XFER		0x02
75};
76
77static struct adv_quirk_entry adv_quirk_table[] =
78{
79	{
80		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
81		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
82	},
83	{
84		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
85		0
86	},
87	{
88		{
89		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
90		  "TANDBERG", " TDC 36", "*"
91		},
92		0
93	},
94	{
95		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
96		0
97	},
98	{
99		{
100		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
101		  "*", "*", "*"
102		},
103		0
104	},
105	{
106		{
107		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
108		  "*", "*", "*"
109		},
110		0
111	},
112	{
113		/* Default quirk entry */
114		{
115		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
116		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
117                },
118                ADV_QUIRK_FIX_ASYN_XFER,
119	}
120};
121
122/*
123 * Allowable periods in ns
124 */
125u_int8_t adv_sdtr_period_tbl[] =
126{
127	25,
128	30,
129	35,
130	40,
131	50,
132	60,
133	70,
134	85
135};
136
137u_int8_t adv_sdtr_period_tbl_ultra[] =
138{
139	12,
140	19,
141	25,
142	32,
143	38,
144	44,
145	50,
146	57,
147	63,
148	69,
149	75,
150	82,
151	88,
152	94,
153	100,
154	107
155};
156
157struct ext_msg {
158	u_int8_t msg_type;
159	u_int8_t msg_len;
160	u_int8_t msg_req;
161	union {
162		struct {
163			u_int8_t sdtr_xfer_period;
164			u_int8_t sdtr_req_ack_offset;
165		} sdtr;
166		struct {
167       			u_int8_t wdtr_width;
168		} wdtr;
169		struct {
170			u_int8_t mdp[4];
171		} mdp;
172	} u_ext_msg;
173	u_int8_t res;
174};
175
176#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
177#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
178#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
179#define	mdp_b3		u_ext_msg.mdp_b3
180#define	mdp_b2		u_ext_msg.mdp_b2
181#define	mdp_b1		u_ext_msg.mdp_b1
182#define	mdp_b0		u_ext_msg.mdp_b0
183
184/*
185 * Some of the early PCI adapters have problems with
186 * async transfers.  Instead use an offset of 1.
187 */
188#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
189
190/* LRAM routines */
191static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
192					u_int16_t *buffer, int count);
193static void	 adv_write_lram_16_multi(struct adv_softc *adv,
194					 u_int16_t s_addr, u_int16_t *buffer,
195					 int count);
196static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
197				  u_int16_t set_value, int count);
198static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
199				  int count);
200
201static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
202					      u_int16_t addr, u_int16_t value);
203static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
204
205
206static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
207				   u_int32_t value);
208static void	 adv_write_lram_32_multi(struct adv_softc *adv,
209					 u_int16_t s_addr, u_int32_t *buffer,
210					 int count);
211
212/* EEPROM routines */
213static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
214static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
215				     u_int16_t value);
216static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
217					  u_int8_t cmd_reg);
218static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
219					    struct adv_eeprom_config *eeconfig);
220
221/* Initialization */
222static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
223				    u_int16_t *mcode_buf, u_int16_t mcode_size);
224
225static void	 adv_reinit_lram(struct adv_softc *adv);
226static void	 adv_init_lram(struct adv_softc *adv);
227static int	 adv_init_microcode_var(struct adv_softc *adv);
228static void	 adv_init_qlink_var(struct adv_softc *adv);
229
230/* Interrupts */
231static void	 adv_disable_interrupt(struct adv_softc *adv);
232static void	 adv_enable_interrupt(struct adv_softc *adv);
233static void	 adv_toggle_irq_act(struct adv_softc *adv);
234
235/* Chip Control */
236static int	 adv_stop_chip(struct adv_softc *adv);
237static int	 adv_host_req_chip_halt(struct adv_softc *adv);
238static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
239#if UNUSED
240static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
241#endif
242
243/* Queue handling and execution */
244static int	 adv_sgcount_to_qcount(int sgcount);
245static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
246				u_int16_t *inbuf, int words);
247static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
248static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
249				       u_int8_t free_q_head, u_int8_t n_free_q);
250static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
251				      u_int8_t free_q_head);
252static int	 adv_send_scsi_queue(struct adv_softc *adv,
253				     struct adv_scsi_q *scsiq,
254				     u_int8_t n_q_required);
255static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
256					     struct adv_scsi_q *scsiq,
257					     u_int q_no);
258static void	 adv_put_ready_queue(struct adv_softc *adv,
259				     struct adv_scsi_q *scsiq, u_int q_no);
260static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
261			       u_int16_t *buffer, int words);
262
263/* Messages */
264static void	 adv_handle_extmsg_in(struct adv_softc *adv,
265				      u_int16_t halt_q_addr, u_int8_t q_cntl,
266				      target_bit_vector target_id,
267				      int tid);
268static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
269				 u_int8_t sdtr_offset);
270static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
271					u_int8_t sdtr_data);
272
273
274/* Exported functions first */
275
276void
277advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
278{
279	struct adv_softc *adv;
280
281	adv = (struct adv_softc *)callback_arg;
282	switch (code) {
283	case AC_FOUND_DEVICE:
284	{
285		struct ccb_getdev *cgd;
286		target_bit_vector target_mask;
287		int num_entries;
288        	caddr_t match;
289		struct adv_quirk_entry *entry;
290		struct adv_target_transinfo* tinfo;
291
292		cgd = (struct ccb_getdev *)arg;
293
294		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
295
296		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
297		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
298				       (caddr_t)adv_quirk_table,
299				       num_entries, sizeof(*adv_quirk_table),
300				       scsi_inquiry_match);
301
302		if (match == NULL)
303			panic("advasync: device didn't match wildcard entry!!");
304
305		entry = (struct adv_quirk_entry *)match;
306
307		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
308			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
309				adv->fix_asyn_xfer_always |= target_mask;
310			else
311				adv->fix_asyn_xfer_always &= ~target_mask;
312			/*
313			 * We start out life with all bits set and clear them
314			 * after we've determined that the fix isn't necessary.
315			 * It may well be that we've already cleared a target
316			 * before the full inquiry session completes, so don't
317			 * gratuitously set a target bit even if it has this
318			 * quirk.  But, if the quirk exonerates a device, clear
319			 * the bit now.
320			 */
321			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
322				adv->fix_asyn_xfer &= ~target_mask;
323		}
324		/*
325		 * Reset our sync settings now that we've determined
326		 * what quirks are in effect for the device.
327		 */
328		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
329		adv_set_syncrate(adv, cgd->ccb_h.path,
330				 cgd->ccb_h.target_id,
331				 tinfo->current.period,
332				 tinfo->current.offset,
333				 ADV_TRANS_CUR);
334		break;
335	}
336	case AC_LOST_DEVICE:
337	{
338		u_int target_mask;
339
340		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
341			target_mask = 0x01 << xpt_path_target_id(path);
342			adv->fix_asyn_xfer |= target_mask;
343		}
344
345		/*
346		 * Revert to async transfers
347		 * for the next device.
348		 */
349		adv_set_syncrate(adv, /*path*/NULL,
350				 xpt_path_target_id(path),
351				 /*period*/0,
352				 /*offset*/0,
353				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
354	}
355	default:
356		break;
357	}
358}
359
360void
361adv_set_bank(struct adv_softc *adv, u_int8_t bank)
362{
363	u_int8_t control;
364
365	/*
366	 * Start out with the bank reset to 0
367	 */
368	control = ADV_INB(adv, ADV_CHIP_CTRL)
369		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
370			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
371			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
372	if (bank == 1) {
373		control |= ADV_CC_BANK_ONE;
374	} else if (bank == 2) {
375		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
376	}
377	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
378}
379
380u_int8_t
381adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
382{
383	u_int8_t   byte_data;
384	u_int16_t  word_data;
385
386	/*
387	 * LRAM is accessed on 16bit boundaries.
388	 */
389	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
390	word_data = ADV_INW(adv, ADV_LRAM_DATA);
391	if (addr & 1) {
392#if BYTE_ORDER == BIG_ENDIAN
393		byte_data = (u_int8_t)(word_data & 0xFF);
394#else
395		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
396#endif
397	} else {
398#if BYTE_ORDER == BIG_ENDIAN
399		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
400#else
401		byte_data = (u_int8_t)(word_data & 0xFF);
402#endif
403	}
404	return (byte_data);
405}
406
407void
408adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
409{
410	u_int16_t word_data;
411
412	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
413	if (addr & 1) {
414		word_data &= 0x00FF;
415		word_data |= (((u_int8_t)value << 8) & 0xFF00);
416	} else {
417		word_data &= 0xFF00;
418		word_data |= ((u_int8_t)value & 0x00FF);
419	}
420	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
421}
422
423
424u_int16_t
425adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
426{
427	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
428	return (ADV_INW(adv, ADV_LRAM_DATA));
429}
430
431void
432adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
433{
434	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
435	ADV_OUTW(adv, ADV_LRAM_DATA, value);
436}
437
438/*
439 * Determine if there is a board at "iobase" by looking
440 * for the AdvanSys signatures.  Return 1 if a board is
441 * found, 0 otherwise.
442 */
443int
444adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
445{
446	u_int16_t signature;
447
448	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
449		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
450		if ((signature == ADV_1000_ID0W)
451		 || (signature == ADV_1000_ID0W_FIX))
452			return (1);
453	}
454	return (0);
455}
456
457void
458adv_lib_init(struct adv_softc *adv)
459{
460	if ((adv->type & ADV_ULTRA) != 0) {
461		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
462		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
463	} else {
464		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
465		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
466	}
467}
468
469u_int16_t
470adv_get_eeprom_config(struct adv_softc *adv, struct
471		      adv_eeprom_config  *eeprom_config)
472{
473	u_int16_t	sum;
474	u_int16_t	*wbuf;
475	u_int8_t	cfg_beg;
476	u_int8_t	cfg_end;
477	u_int8_t	s_addr;
478
479	wbuf = (u_int16_t *)eeprom_config;
480	sum = 0;
481
482	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
483		*wbuf = adv_read_eeprom_16(adv, s_addr);
484		sum += *wbuf;
485	}
486
487	if (adv->type & ADV_VL) {
488		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
489		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
490	} else {
491		cfg_beg = ADV_EEPROM_CFG_BEG;
492		cfg_end = ADV_EEPROM_MAX_ADDR;
493	}
494
495	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
496		*wbuf = adv_read_eeprom_16(adv, s_addr);
497		sum += *wbuf;
498#if ADV_DEBUG_EEPROM
499		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
500#endif
501	}
502	*wbuf = adv_read_eeprom_16(adv, s_addr);
503	return (sum);
504}
505
506int
507adv_set_eeprom_config(struct adv_softc *adv,
508		      struct adv_eeprom_config *eeprom_config)
509{
510	int	retry;
511
512	retry = 0;
513	while (1) {
514		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
515			break;
516		}
517		if (++retry > ADV_EEPROM_MAX_RETRY) {
518			break;
519		}
520	}
521	return (retry > ADV_EEPROM_MAX_RETRY);
522}
523
524int
525adv_reset_chip_and_scsi_bus(struct adv_softc *adv)
526{
527	adv_stop_chip(adv);
528	ADV_OUTB(adv, ADV_CHIP_CTRL,
529		 ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
530	DELAY(200 * 1000);
531
532	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
533	adv_set_chip_ih(adv, ADV_INS_HALT);
534
535	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
536	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
537	DELAY(200 * 1000);
538	return (adv_is_chip_halted(adv));
539}
540
541int
542adv_test_external_lram(struct adv_softc* adv)
543{
544	u_int16_t	q_addr;
545	u_int16_t	saved_value;
546	int		success;
547
548	success = 0;
549
550	q_addr = ADV_QNO_TO_QADDR(241);
551	saved_value = adv_read_lram_16(adv, q_addr);
552	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
553		success = 1;
554		adv_write_lram_16(adv, q_addr, saved_value);
555	}
556	return (success);
557}
558
559
560int
561adv_init_lram_and_mcode(struct adv_softc *adv)
562{
563	u_int32_t	retval;
564
565	adv_disable_interrupt(adv);
566
567	adv_init_lram(adv);
568
569	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
570				    adv_mcode_size);
571	if (retval != adv_mcode_chksum) {
572		printf("adv%d: Microcode download failed checksum!\n",
573		       adv->unit);
574		return (1);
575	}
576
577	if (adv_init_microcode_var(adv) != 0)
578		return (1);
579
580	adv_enable_interrupt(adv);
581	return (0);
582}
583
584u_int8_t
585adv_get_chip_irq(struct adv_softc *adv)
586{
587	u_int16_t	cfg_lsw;
588	u_int8_t	chip_irq;
589
590	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
591
592	if ((adv->type & ADV_VL) != 0) {
593		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
594		if ((chip_irq == 0) ||
595		    (chip_irq == 4) ||
596		    (chip_irq == 7)) {
597			return (0);
598		}
599		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
600	}
601	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
602	if (chip_irq == 3)
603		chip_irq += 2;
604	return (chip_irq + ADV_MIN_IRQ_NO);
605}
606
607u_int8_t
608adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
609{
610	u_int16_t	cfg_lsw;
611
612	if ((adv->type & ADV_VL) != 0) {
613		if (irq_no != 0) {
614			if ((irq_no < ADV_MIN_IRQ_NO)
615			 || (irq_no > ADV_MAX_IRQ_NO)) {
616				irq_no = 0;
617			} else {
618				irq_no -= ADV_MIN_IRQ_NO - 1;
619			}
620		}
621		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
622		cfg_lsw |= 0x0010;
623		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
624		adv_toggle_irq_act(adv);
625
626		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
627		cfg_lsw |= (irq_no & 0x07) << 2;
628		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
629		adv_toggle_irq_act(adv);
630	} else if ((adv->type & ADV_ISA) != 0) {
631		if (irq_no == 15)
632			irq_no -= 2;
633		irq_no -= ADV_MIN_IRQ_NO;
634		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
635		cfg_lsw |= (irq_no & 0x03) << 2;
636		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
637	}
638	return (adv_get_chip_irq(adv));
639}
640
641void
642adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
643{
644	u_int16_t cfg_lsw;
645
646	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
647	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
648		return;
649    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
650	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
651	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
652}
653
654int
655adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
656		       u_int32_t datalen)
657{
658	struct		adv_target_transinfo* tinfo;
659	u_int32_t	*p_data_addr;
660	u_int32_t	*p_data_bcount;
661	int		disable_syn_offset_one_fix;
662	int		retval;
663	u_int		n_q_required;
664	u_int32_t	addr;
665	u_int8_t	sg_entry_cnt;
666	u_int8_t	target_ix;
667	u_int8_t	sg_entry_cnt_minus_one;
668	u_int8_t	tid_no;
669
670	scsiq->q1.q_no = 0;
671	retval = 1;  /* Default to error case */
672	target_ix = scsiq->q2.target_ix;
673	tid_no = ADV_TIX_TO_TID(target_ix);
674	tinfo = &adv->tinfo[tid_no];
675
676	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
677		/* Renegotiate if appropriate. */
678		adv_set_syncrate(adv, /*struct cam_path */NULL,
679				 tid_no, /*period*/0, /*offset*/0,
680				 ADV_TRANS_CUR);
681		if (tinfo->current.period != tinfo->goal.period) {
682			adv_msgout_sdtr(adv, tinfo->goal.period,
683					tinfo->goal.offset);
684			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
685		}
686	}
687
688	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
689		sg_entry_cnt = scsiq->sg_head->entry_cnt;
690		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
691
692#ifdef DIAGNOSTIC
693		if (sg_entry_cnt <= 1)
694			panic("adv_execute_scsi_queue: Queue "
695			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
696
697		if (sg_entry_cnt > ADV_MAX_SG_LIST)
698			panic("adv_execute_scsi_queue: "
699			      "Queue with too many segs.");
700
701		if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
702			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
703				addr = scsiq->sg_head->sg_list[i].addr +
704				       scsiq->sg_head->sg_list[i].bytes;
705
706				if ((addr & 0x0003) != 0)
707					panic("adv_execute_scsi_queue: SG "
708					      "with odd address or byte count");
709			}
710		}
711#endif
712		p_data_addr =
713		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
714		p_data_bcount =
715		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
716
717		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
718		scsiq->sg_head->queue_cnt = n_q_required - 1;
719	} else {
720		p_data_addr = &scsiq->q1.data_addr;
721		p_data_bcount = &scsiq->q1.data_cnt;
722		n_q_required = 1;
723	}
724
725	disable_syn_offset_one_fix = FALSE;
726
727	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
728	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
729
730		if (datalen != 0) {
731			if (datalen < 512) {
732				disable_syn_offset_one_fix = TRUE;
733			} else {
734				if (scsiq->cdbptr[0] == INQUIRY
735				 || scsiq->cdbptr[0] == REQUEST_SENSE
736				 || scsiq->cdbptr[0] == READ_CAPACITY
737				 || scsiq->cdbptr[0] == MODE_SELECT_6
738				 || scsiq->cdbptr[0] == MODE_SENSE_6
739				 || scsiq->cdbptr[0] == MODE_SENSE_10
740				 || scsiq->cdbptr[0] == MODE_SELECT_10
741				 || scsiq->cdbptr[0] == READ_TOC) {
742					disable_syn_offset_one_fix = TRUE;
743				}
744			}
745		}
746	}
747
748	if (disable_syn_offset_one_fix) {
749		scsiq->q2.tag_code &=
750		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
751		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
752				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
753	}
754
755	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
756	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
757		u_int8_t extra_bytes;
758
759		addr = *p_data_addr + *p_data_bcount;
760		extra_bytes = addr & 0x0003;
761		if (extra_bytes != 0
762		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
763		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
764			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
765			scsiq->q1.extra_bytes = extra_bytes;
766			*p_data_bcount -= extra_bytes;
767		}
768	}
769
770	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
771	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
772		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
773
774	return (retval);
775}
776
777
778u_int8_t
779adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
780		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
781{
782	u_int16_t val;
783	u_int8_t  sg_queue_cnt;
784
785	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
786		       (u_int16_t *)scsiq,
787		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
788
789#if BYTE_ORDER == BIG_ENDIAN
790	adv_adj_endian_qdone_info(scsiq);
791#endif
792
793	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
794	scsiq->q_status = val & 0xFF;
795	scsiq->q_no = (val >> 8) & 0XFF;
796
797	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
798	scsiq->cntl = val & 0xFF;
799	sg_queue_cnt = (val >> 8) & 0xFF;
800
801	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
802	scsiq->sense_len = val & 0xFF;
803	scsiq->extra_bytes = (val >> 8) & 0xFF;
804
805	scsiq->remain_bytes =
806	    adv_read_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
807	/*
808	 * XXX Is this just a safeguard or will the counter really
809	 * have bogus upper bits?
810	 */
811	scsiq->remain_bytes &= max_dma_count;
812
813	return (sg_queue_cnt);
814}
815
816int
817adv_start_chip(struct adv_softc *adv)
818{
819	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
820	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
821		return (0);
822	return (1);
823}
824
825int
826adv_stop_execution(struct adv_softc *adv)
827{
828	int count;
829
830	count = 0;
831	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
832		adv_write_lram_8(adv, ADV_STOP_CODE_B,
833				 ADV_STOP_REQ_RISC_STOP);
834		do {
835			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
836				ADV_STOP_ACK_RISC_STOP) {
837				return (1);
838			}
839			DELAY(1000);
840		} while (count++ < 20);
841	}
842	return (0);
843}
844
845int
846adv_is_chip_halted(struct adv_softc *adv)
847{
848	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
849		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
850			return (1);
851		}
852	}
853	return (0);
854}
855
856/*
857 * XXX The numeric constants and the loops in this routine
858 * need to be documented.
859 */
860void
861adv_ack_interrupt(struct adv_softc *adv)
862{
863	u_int8_t	host_flag;
864	u_int8_t	risc_flag;
865	int		loop;
866
867	loop = 0;
868	do {
869		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
870		if (loop++ > 0x7FFF) {
871			break;
872		}
873	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
874
875	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
876	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
877			 host_flag | ADV_HOST_FLAG_ACK_INT);
878
879	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
880	loop = 0;
881	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
882		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
883		if (loop++ > 3) {
884			break;
885		}
886	}
887
888	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
889}
890
891/*
892 * Handle all conditions that may halt the chip waiting
893 * for us to intervene.
894 */
895void
896adv_isr_chip_halted(struct adv_softc *adv)
897{
898	u_int16_t	  int_halt_code;
899	u_int16_t	  halt_q_addr;
900	target_bit_vector target_mask;
901	target_bit_vector scsi_busy;
902	u_int8_t	  halt_qp;
903	u_int8_t	  target_ix;
904	u_int8_t	  q_cntl;
905	u_int8_t	  tid_no;
906
907	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
908	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
909	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
910	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
911	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
912	tid_no = ADV_TIX_TO_TID(target_ix);
913	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
914	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
915		/*
916		 * Temporarily disable the async fix by removing
917		 * this target from the list of affected targets,
918		 * setting our async rate, and then putting us
919		 * back into the mask.
920		 */
921		adv->fix_asyn_xfer &= ~target_mask;
922		adv_set_syncrate(adv, /*struct cam_path */NULL,
923				 tid_no, /*period*/0, /*offset*/0,
924				 ADV_TRANS_ACTIVE);
925		adv->fix_asyn_xfer |= target_mask;
926	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
927		adv_set_syncrate(adv, /*struct cam_path */NULL,
928				 tid_no, /*period*/0, /*offset*/0,
929				 ADV_TRANS_ACTIVE);
930	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
931		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
932				     target_mask, tid_no);
933	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
934		struct	 adv_target_transinfo* tinfo;
935		union	 ccb *ccb;
936		u_int8_t tag_code;
937		u_int8_t q_status;
938
939		tinfo = &adv->tinfo[tid_no];
940		q_cntl |= QC_REQ_SENSE;
941
942		/* Renegotiate if appropriate. */
943		adv_set_syncrate(adv, /*struct cam_path */NULL,
944				 tid_no, /*period*/0, /*offset*/0,
945				 ADV_TRANS_CUR);
946		if (tinfo->current.period != tinfo->goal.period) {
947			adv_msgout_sdtr(adv, tinfo->goal.period,
948					tinfo->goal.offset);
949			q_cntl |= QC_MSG_OUT;
950		}
951		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
952
953		/* Don't tag request sense commands */
954		tag_code = adv_read_lram_8(adv,
955					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
956		tag_code &=
957		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
958
959		if ((adv->fix_asyn_xfer & target_mask) != 0
960		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
961			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
962				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
963		}
964		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
965				 tag_code);
966		q_status = adv_read_lram_8(adv,
967					   halt_q_addr + ADV_SCSIQ_B_STATUS);
968		q_status |= (QS_READY | QS_BUSY);
969		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
970				 q_status);
971		/*
972		 * Freeze the devq until we can handle the sense condition.
973		 */
974		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
975							 + ADV_SCSIQ_D_CCBPTR);
976		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
977		ccb->ccb_h.status |= CAM_DEV_QFRZN;
978		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
979			      /*ccb*/NULL, CAM_REQUEUE_REQ,
980			      /*queued_only*/TRUE);
981		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
982		scsi_busy &= ~target_mask;
983		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
984	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
985		struct	ext_msg out_msg;
986
987		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
988				       (u_int16_t *) &out_msg,
989				       sizeof(out_msg)/2);
990
991		if ((out_msg.msg_type == MSG_EXTENDED)
992		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
993		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
994
995			/* Revert to Async */
996			adv_set_syncrate(adv, /*struct cam_path */NULL,
997					 tid_no, /*period*/0, /*offset*/0,
998					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
999		}
1000		q_cntl &= ~QC_MSG_OUT;
1001		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1002	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1003		u_int8_t scsi_status;
1004		union ccb *ccb;
1005
1006		scsi_status = adv_read_lram_8(adv, halt_q_addr
1007					      + ADV_SCSIQ_SCSI_STATUS);
1008		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1009						     + ADV_SCSIQ_D_CCBPTR);
1010		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1011		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1012		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1013			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1014			      /*queued_only*/TRUE);
1015		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1016		scsi_busy &= ~target_mask;
1017		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1018	}
1019	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1020}
1021
1022void
1023adv_sdtr_to_period_offset(struct adv_softc *adv,
1024			  u_int8_t sync_data, u_int8_t *period,
1025			  u_int8_t *offset, int tid)
1026{
1027	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1028	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1029		*period = *offset = 0;
1030	} else {
1031		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1032		*offset = sync_data & 0xF;
1033	}
1034}
1035
1036void
1037adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1038		 u_int tid, u_int period, u_int offset, u_int type)
1039{
1040	struct adv_target_transinfo* tinfo;
1041	u_int old_period;
1042	u_int old_offset;
1043	u_int8_t sdtr_data;
1044
1045	tinfo = &adv->tinfo[tid];
1046
1047	/* Filter our input */
1048	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1049					      &offset, tid);
1050
1051	old_period = tinfo->current.period;
1052	old_offset = tinfo->current.offset;
1053
1054	if ((type & ADV_TRANS_CUR) != 0
1055	 && ((old_period != period || old_offset != offset)
1056	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1057		int s;
1058		int halted;
1059
1060		s = splcam();
1061		halted = adv_is_chip_halted(adv);
1062		if (halted == 0)
1063			/* Must halt the chip first */
1064			adv_host_req_chip_halt(adv);
1065
1066		/* Update current hardware settings */
1067		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1068
1069		/*
1070		 * If a target can run in sync mode, we don't need
1071		 * to check it for sync problems.
1072		 */
1073		if (offset != 0)
1074			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1075
1076		if (halted == 0)
1077			/* Start the chip again */
1078			adv_start_chip(adv);
1079
1080		splx(s);
1081		tinfo->current.period = period;
1082		tinfo->current.offset = offset;
1083
1084		if (path != NULL) {
1085			/*
1086			 * Tell the SCSI layer about the
1087			 * new transfer parameters.
1088			 */
1089			struct	ccb_trans_settings neg;
1090
1091			neg.sync_period = period;
1092			neg.sync_offset = offset;
1093			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1094				  | CCB_TRANS_SYNC_OFFSET_VALID;
1095			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1096			xpt_async(AC_TRANSFER_NEG, path, &neg);
1097		}
1098	}
1099
1100	if ((type & ADV_TRANS_GOAL) != 0) {
1101		tinfo->goal.period = period;
1102		tinfo->goal.offset = offset;
1103	}
1104
1105	if ((type & ADV_TRANS_USER) != 0) {
1106		tinfo->user.period = period;
1107		tinfo->user.offset = offset;
1108	}
1109}
1110
1111u_int8_t
1112adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1113			  u_int *offset, int tid)
1114{
1115	u_int i;
1116	u_int dummy_offset;
1117	u_int dummy_period;
1118
1119	if (offset == NULL) {
1120		dummy_offset = 0;
1121		offset = &dummy_offset;
1122	}
1123
1124	if (period == NULL) {
1125		dummy_period = 0;
1126		period = &dummy_period;
1127	}
1128
1129#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1130
1131	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1132	if (*period != 0 && *offset != 0) {
1133		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1134			if (*period <= adv->sdtr_period_tbl[i]) {
1135				/*
1136				 * When responding to a target that requests
1137				 * sync, the requested  rate may fall between
1138				 * two rates that we can output, but still be
1139				 * a rate that we can receive.  Because of this,
1140				 * we want to respond to the target with
1141				 * the same rate that it sent to us even
1142				 * if the period we use to send data to it
1143				 * is lower.  Only lower the response period
1144				 * if we must.
1145				 */
1146				if (i == 0 /* Our maximum rate */)
1147					*period = adv->sdtr_period_tbl[0];
1148				return ((i << 4) | *offset);
1149			}
1150		}
1151	}
1152
1153	/* Must go async */
1154	*period = 0;
1155	*offset = 0;
1156	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1157		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1158	return (0);
1159}
1160
1161/* Internal Routines */
1162
1163static void
1164adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1165		       u_int16_t *buffer, int count)
1166{
1167	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1168	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1169}
1170
1171static void
1172adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1173			u_int16_t *buffer, int count)
1174{
1175	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1176	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1177}
1178
1179static void
1180adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1181		 u_int16_t set_value, int count)
1182{
1183	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1184	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1185			      set_value, count);
1186}
1187
1188static u_int32_t
1189adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1190{
1191	u_int32_t	sum;
1192	int		i;
1193
1194	sum = 0;
1195	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1196	for (i = 0; i < count; i++)
1197		sum += ADV_INW(adv, ADV_LRAM_DATA);
1198	return (sum);
1199}
1200
1201static int
1202adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1203			     u_int16_t value)
1204{
1205	int	retval;
1206
1207	retval = 0;
1208	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1209	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1210	DELAY(10000);
1211	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1212	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1213		retval = 1;
1214	return (retval);
1215}
1216
1217static u_int32_t
1218adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1219{
1220	u_int16_t           val_low, val_high;
1221
1222	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1223
1224#if BYTE_ORDER == BIG_ENDIAN
1225	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1226	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1227#else
1228	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1229	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1230#endif
1231
1232	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1233}
1234
1235static void
1236adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1237{
1238	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1239
1240#if BYTE_ORDER == BIG_ENDIAN
1241	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1242	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1243#else
1244	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1245	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1246#endif
1247}
1248
1249static void
1250adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1251			u_int32_t *buffer, int count)
1252{
1253	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1254	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1255}
1256
1257static u_int16_t
1258adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1259{
1260	u_int16_t read_wval;
1261	u_int8_t  cmd_reg;
1262
1263	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1264	DELAY(1000);
1265	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1266	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1267	DELAY(1000);
1268	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1269	DELAY(1000);
1270	return (read_wval);
1271}
1272
1273static u_int16_t
1274adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1275{
1276	u_int16_t	read_value;
1277
1278	read_value = adv_read_eeprom_16(adv, addr);
1279	if (read_value != value) {
1280		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1281		DELAY(1000);
1282
1283		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1284		DELAY(1000);
1285
1286		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1287		DELAY(20 * 1000);
1288
1289		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1290		DELAY(1000);
1291		read_value = adv_read_eeprom_16(adv, addr);
1292	}
1293	return (read_value);
1294}
1295
1296static int
1297adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1298{
1299	u_int8_t read_back;
1300	int	 retry;
1301
1302	retry = 0;
1303	while (1) {
1304		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1305		DELAY(1000);
1306		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1307		if (read_back == cmd_reg) {
1308			return (1);
1309		}
1310		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1311			return (0);
1312		}
1313	}
1314}
1315
1316static int
1317adv_set_eeprom_config_once(struct adv_softc *adv,
1318			   struct adv_eeprom_config *eeprom_config)
1319{
1320	int		n_error;
1321	u_int16_t	*wbuf;
1322	u_int16_t	sum;
1323	u_int8_t	s_addr;
1324	u_int8_t	cfg_beg;
1325	u_int8_t	cfg_end;
1326
1327	wbuf = (u_int16_t *)eeprom_config;
1328	n_error = 0;
1329	sum = 0;
1330	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1331		sum += *wbuf;
1332		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1333			n_error++;
1334		}
1335	}
1336	if (adv->type & ADV_VL) {
1337		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1338		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1339	} else {
1340		cfg_beg = ADV_EEPROM_CFG_BEG;
1341		cfg_end = ADV_EEPROM_MAX_ADDR;
1342	}
1343
1344	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1345		sum += *wbuf;
1346		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1347			n_error++;
1348		}
1349	}
1350	*wbuf = sum;
1351	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1352		n_error++;
1353	}
1354	wbuf = (u_int16_t *)eeprom_config;
1355	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1356		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1357			n_error++;
1358		}
1359	}
1360	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1361		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1362			n_error++;
1363		}
1364	}
1365	return (n_error);
1366}
1367
1368static u_int32_t
1369adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1370		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1371{
1372	u_int32_t chksum;
1373	u_int16_t mcode_lram_size;
1374	u_int16_t mcode_chksum;
1375
1376	mcode_lram_size = mcode_size >> 1;
1377	/* XXX Why zero the memory just before you write the whole thing?? */
1378	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1379	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1380
1381	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1382	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1383						   ((mcode_size - s_addr
1384						     - ADV_CODE_SEC_BEG) >> 1));
1385	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1386	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1387	return (chksum);
1388}
1389
1390static void
1391adv_reinit_lram(struct adv_softc *adv) {
1392	adv_init_lram(adv);
1393	adv_init_qlink_var(adv);
1394}
1395
1396static void
1397adv_init_lram(struct adv_softc *adv)
1398{
1399	u_int8_t  i;
1400	u_int16_t s_addr;
1401
1402	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1403			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1404
1405	i = ADV_MIN_ACTIVE_QNO;
1406	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1407
1408	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1409	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1410	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1411	i++;
1412	s_addr += ADV_QBLK_SIZE;
1413	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1414		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1415		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1416		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1417	}
1418
1419	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1420	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1421	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1422	i++;
1423	s_addr += ADV_QBLK_SIZE;
1424
1425	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1426		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1427		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1428		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1429	}
1430}
1431
1432static int
1433adv_init_microcode_var(struct adv_softc *adv)
1434{
1435	int	 i;
1436
1437	for (i = 0; i <= ADV_MAX_TID; i++) {
1438
1439		/* Start out async all around */
1440		adv_set_syncrate(adv, /*path*/NULL,
1441				 i, 0, 0,
1442				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1443	}
1444
1445	adv_init_qlink_var(adv);
1446
1447	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1448	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1449
1450	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1451
1452	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1453
1454	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1455	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1456		printf("adv%d: Unable to set program counter. Aborting.\n",
1457		       adv->unit);
1458		return (1);
1459	}
1460	return (0);
1461}
1462
1463static void
1464adv_init_qlink_var(struct adv_softc *adv)
1465{
1466	int	  i;
1467	u_int16_t lram_addr;
1468
1469	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1470	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1471
1472	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1473	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1474
1475	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1476			 (u_int8_t)((int) adv->max_openings + 1));
1477	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1478			 (u_int8_t)((int) adv->max_openings + 2));
1479
1480	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1481
1482	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1483	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1484	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1485	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1486	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1487	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1488
1489	lram_addr = ADV_QADR_BEG;
1490	for (i = 0; i < 32; i++, lram_addr += 2)
1491		adv_write_lram_16(adv, lram_addr, 0);
1492}
1493
1494static void
1495adv_disable_interrupt(struct adv_softc *adv)
1496{
1497	u_int16_t cfg;
1498
1499	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1500	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1501}
1502
1503static void
1504adv_enable_interrupt(struct adv_softc *adv)
1505{
1506	u_int16_t cfg;
1507
1508	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1509	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1510}
1511
1512static void
1513adv_toggle_irq_act(struct adv_softc *adv)
1514{
1515	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1516	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1517}
1518
1519void
1520adv_start_execution(struct adv_softc *adv)
1521{
1522	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1523		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1524	}
1525}
1526
1527static int
1528adv_stop_chip(struct adv_softc *adv)
1529{
1530	u_int8_t cc_val;
1531
1532	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1533		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1534	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1535	adv_set_chip_ih(adv, ADV_INS_HALT);
1536	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1537	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1538		return (0);
1539	}
1540	return (1);
1541}
1542
1543static int
1544adv_host_req_chip_halt(struct adv_softc *adv)
1545{
1546	int	 count;
1547	u_int8_t saved_stop_code;
1548
1549	if (adv_is_chip_halted(adv))
1550		return (1);
1551
1552	count = 0;
1553	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1554	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1555			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1556	while (adv_is_chip_halted(adv) == 0
1557	    && count++ < 2000)
1558		;
1559
1560	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1561	return (count < 2000);
1562}
1563
1564static void
1565adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1566{
1567	adv_set_bank(adv, 1);
1568	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1569	adv_set_bank(adv, 0);
1570}
1571
1572#if UNUSED
1573static u_int8_t
1574adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1575{
1576	u_int8_t scsi_ctrl;
1577
1578	adv_set_bank(adv, 1);
1579	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1580	adv_set_bank(adv, 0);
1581	return (scsi_ctrl);
1582}
1583#endif
1584
1585static int
1586adv_sgcount_to_qcount(int sgcount)
1587{
1588	int	n_sg_list_qs;
1589
1590	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
1591	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
1592		n_sg_list_qs++;
1593	return (n_sg_list_qs + 1);
1594}
1595
1596/*
1597 * XXX Looks like more padding issues in this routine as well.
1598 *     There has to be a way to turn this into an insw.
1599 */
1600static void
1601adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1602	       u_int16_t *inbuf, int words)
1603{
1604	int	i;
1605
1606	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1607	for (i = 0; i < words; i++, inbuf++) {
1608		if (i == 5) {
1609			continue;
1610		}
1611		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1612	}
1613}
1614
1615static u_int
1616adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1617{
1618	u_int	  cur_used_qs;
1619	u_int	  cur_free_qs;
1620
1621	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1622
1623	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1624		cur_free_qs = adv->max_openings - cur_used_qs;
1625		return (cur_free_qs);
1626	}
1627	adv->openings_needed = n_qs;
1628	return (0);
1629}
1630
1631static u_int8_t
1632adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1633		      u_int8_t n_free_q)
1634{
1635	int i;
1636
1637	for (i = 0; i < n_free_q; i++) {
1638		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1639		if (free_q_head == ADV_QLINK_END)
1640			break;
1641	}
1642	return (free_q_head);
1643}
1644
1645static u_int8_t
1646adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1647{
1648	u_int16_t	q_addr;
1649	u_int8_t	next_qp;
1650	u_int8_t	q_status;
1651
1652	next_qp = ADV_QLINK_END;
1653	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1654	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1655
1656	if ((q_status & QS_READY) == 0)
1657		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1658
1659	return (next_qp);
1660}
1661
1662static int
1663adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1664		    u_int8_t n_q_required)
1665{
1666	u_int8_t	free_q_head;
1667	u_int8_t	next_qp;
1668	u_int8_t	tid_no;
1669	u_int8_t	target_ix;
1670	int		retval;
1671
1672	retval = 1;
1673	target_ix = scsiq->q2.target_ix;
1674	tid_no = ADV_TIX_TO_TID(target_ix);
1675	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1676	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1677	    != ADV_QLINK_END) {
1678		scsiq->q1.q_no = free_q_head;
1679
1680		/*
1681		 * Now that we know our Q number, point our sense
1682		 * buffer pointer to a bus dma mapped area where
1683		 * we can dma the data to.
1684		 */
1685		scsiq->q1.sense_addr = adv->sense_physbase
1686		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1687		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1688		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1689		adv->cur_active += n_q_required;
1690		retval = 0;
1691	}
1692	return (retval);
1693}
1694
1695
1696static void
1697adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1698			    u_int q_no)
1699{
1700	u_int8_t	sg_list_dwords;
1701	u_int8_t	sg_index, i;
1702	u_int8_t	sg_entry_cnt;
1703	u_int8_t	next_qp;
1704	u_int16_t	q_addr;
1705	struct		adv_sg_head *sg_head;
1706	struct		adv_sg_list_q scsi_sg_q;
1707
1708	sg_head = scsiq->sg_head;
1709
1710	if (sg_head) {
1711		sg_entry_cnt = sg_head->entry_cnt - 1;
1712#ifdef DIAGNOSTIC
1713		if (sg_entry_cnt == 0)
1714			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1715			      "a SG list but only one element");
1716		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1717			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1718			      "a SG list but QC_SG_HEAD not set");
1719#endif
1720		q_addr = ADV_QNO_TO_QADDR(q_no);
1721		sg_index = 1;
1722		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1723		scsi_sg_q.sg_head_qp = q_no;
1724		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1725		for (i = 0; i < sg_head->queue_cnt; i++) {
1726			u_int8_t segs_this_q;
1727
1728			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1729				segs_this_q = ADV_SG_LIST_PER_Q;
1730			else {
1731				/* This will be the last segment then */
1732				segs_this_q = sg_entry_cnt;
1733				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1734			}
1735			scsi_sg_q.seq_no = i + 1;
1736			sg_list_dwords = segs_this_q << 1;
1737			if (i == 0) {
1738				scsi_sg_q.sg_list_cnt = segs_this_q;
1739				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1740			} else {
1741				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1742				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1743			}
1744			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1745			scsi_sg_q.q_no = next_qp;
1746			q_addr = ADV_QNO_TO_QADDR(next_qp);
1747
1748			adv_write_lram_16_multi(adv,
1749						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1750						(u_int16_t *)&scsi_sg_q,
1751						sizeof(scsi_sg_q) >> 1);
1752			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1753						(u_int32_t *)&sg_head->sg_list[sg_index],
1754						sg_list_dwords);
1755			sg_entry_cnt -= segs_this_q;
1756			sg_index += ADV_SG_LIST_PER_Q;
1757		}
1758	}
1759	adv_put_ready_queue(adv, scsiq, q_no);
1760}
1761
1762static void
1763adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1764		    u_int q_no)
1765{
1766	struct		adv_target_transinfo* tinfo;
1767	u_int		q_addr;
1768	u_int		tid_no;
1769
1770	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1771	tinfo = &adv->tinfo[tid_no];
1772	if (tinfo->current.period != tinfo->goal.period) {
1773
1774		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1775		scsiq->q1.cntl |= QC_MSG_OUT;
1776	}
1777	q_addr = ADV_QNO_TO_QADDR(q_no);
1778
1779	scsiq->q1.status = QS_FREE;
1780
1781	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1782				(u_int16_t *)scsiq->cdbptr,
1783				scsiq->q2.cdb_len >> 1);
1784
1785#if BYTE_ORDER == BIG_ENDIAN
1786	adv_adj_scsiq_endian(scsiq);
1787#endif
1788
1789	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1790		      (u_int16_t *) &scsiq->q1.cntl,
1791		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1792
1793#if CC_WRITE_IO_COUNT
1794	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1795			  adv->req_count);
1796#endif
1797
1798#if CC_CLEAR_DMA_REMAIN
1799
1800	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1801	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1802#endif
1803
1804	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1805			  (scsiq->q1.q_no << 8) | QS_READY);
1806}
1807
1808static void
1809adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1810	      u_int16_t *buffer, int words)
1811{
1812	int	i;
1813
1814	/*
1815	 * XXX This routine makes *gross* assumptions
1816	 * about padding in the data structures.
1817	 * Either the data structures should have explicit
1818	 * padding members added, or they should have padding
1819	 * turned off via compiler attributes depending on
1820	 * which yields better overall performance.  My hunch
1821	 * would be that turning off padding would be the
1822	 * faster approach as an outsw is much faster than
1823	 * this crude loop and accessing un-aligned data
1824	 * members isn't *that* expensive.  The other choice
1825	 * would be to modify the ASC script so that the
1826	 * the adv_scsiq_1 structure can be re-arranged so
1827	 * padding isn't required.
1828	 */
1829	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1830	for (i = 0; i < words; i++, buffer++) {
1831		if (i == 2 || i == 10) {
1832			continue;
1833		}
1834		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1835	}
1836}
1837
1838static void
1839adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1840		     u_int8_t q_cntl, target_bit_vector target_mask,
1841		     int tid_no)
1842{
1843	struct	ext_msg ext_msg;
1844
1845	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1846			       sizeof(ext_msg) >> 1);
1847	if ((ext_msg.msg_type == MSG_EXTENDED)
1848	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1849	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1850		union	 ccb *ccb;
1851		struct	 adv_target_transinfo* tinfo;
1852		u_int	 period;
1853		u_int	 offset;
1854		int	 sdtr_accept;
1855		u_int8_t orig_offset;
1856
1857		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1858							 + ADV_SCSIQ_D_CCBPTR);
1859		tinfo = &adv->tinfo[tid_no];
1860		sdtr_accept = TRUE;
1861
1862		orig_offset = ext_msg.req_ack_offset;
1863		if (ext_msg.xfer_period < tinfo->goal.period) {
1864                	sdtr_accept = FALSE;
1865			ext_msg.xfer_period = tinfo->goal.period;
1866		}
1867
1868		/* Perform range checking */
1869		period = ext_msg.xfer_period;
1870		offset = ext_msg.req_ack_offset;
1871		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1872		ext_msg.xfer_period = period;
1873		ext_msg.req_ack_offset = offset;
1874
1875		/* Record our current sync settings */
1876		adv_set_syncrate(adv, ccb->ccb_h.path,
1877				 tid_no, ext_msg.xfer_period,
1878				 ext_msg.req_ack_offset,
1879				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1880
1881		/* Offset too high or large period forced async */
1882		if (orig_offset != ext_msg.req_ack_offset)
1883			sdtr_accept = FALSE;
1884
1885		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1886			/* Valid response to our requested negotiation */
1887			q_cntl &= ~QC_MSG_OUT;
1888		} else {
1889			/* Must Respond */
1890			q_cntl |= QC_MSG_OUT;
1891			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1892					ext_msg.req_ack_offset);
1893		}
1894
1895	} else if (ext_msg.msg_type == MSG_EXTENDED
1896		&& ext_msg.msg_req == MSG_EXT_WDTR
1897		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1898
1899		ext_msg.wdtr_width = 0;
1900		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1901					(u_int16_t *)&ext_msg,
1902					sizeof(ext_msg) >> 1);
1903		q_cntl |= QC_MSG_OUT;
1904        } else {
1905
1906		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1907		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1908					(u_int16_t *)&ext_msg,
1909					sizeof(ext_msg) >> 1);
1910		q_cntl |= QC_MSG_OUT;
1911        }
1912	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1913}
1914
1915static void
1916adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1917		u_int8_t sdtr_offset)
1918{
1919	struct	 ext_msg sdtr_buf;
1920
1921	sdtr_buf.msg_type = MSG_EXTENDED;
1922	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1923	sdtr_buf.msg_req = MSG_EXT_SDTR;
1924	sdtr_buf.xfer_period = sdtr_period;
1925	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1926	sdtr_buf.req_ack_offset = sdtr_offset;
1927	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1928				(u_int16_t *) &sdtr_buf,
1929				sizeof(sdtr_buf) / 2);
1930}
1931
1932int
1933adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1934	      u_int32_t status, int queued_only)
1935{
1936	u_int16_t q_addr;
1937	u_int8_t  q_no;
1938	struct adv_q_done_info scsiq_buf;
1939	struct adv_q_done_info *scsiq;
1940	u_int8_t  target_ix;
1941	int	  count;
1942
1943	scsiq = &scsiq_buf;
1944	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1945	count = 0;
1946	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1947		q_addr = ADV_QNO_TO_QADDR(q_no);
1948
1949		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1950		if (((scsiq->q_status & QS_READY) != 0)
1951		 && ((scsiq->q_status & QS_ABORTED) == 0)
1952		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1953		 && (scsiq->d2.target_ix == target_ix)
1954		 && (queued_only == 0
1955		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1956		 && (ccb == NULL || (ccb == (union ccb *)scsiq->d2.ccb_ptr))) {
1957			union ccb *aborted_ccb;
1958			struct adv_ccb_info *cinfo;
1959
1960			scsiq->q_status |= QS_ABORTED;
1961			scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
1962			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1963					 scsiq->q_status);
1964			aborted_ccb = (union ccb *)scsiq->d2.ccb_ptr;
1965			/* Don't clobber earlier error codes */
1966			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1967			  == CAM_REQ_INPROG)
1968				aborted_ccb->ccb_h.status |= status;
1969			cinfo = (struct adv_ccb_info *)
1970			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
1971			cinfo->state |= ACCB_ABORT_QUEUED;
1972			count++;
1973		}
1974	}
1975	return (count);
1976}
1977
1978int
1979adv_reset_bus(struct adv_softc *adv)
1980{
1981	int count;
1982	int i;
1983	union ccb *ccb;
1984
1985	adv_reset_chip_and_scsi_bus(adv);
1986	adv_reinit_lram(adv);
1987	for (i = 0; i <= ADV_MAX_TID; i++) {
1988		if (adv->fix_asyn_xfer & (0x01 << i))
1989			adv_set_sdtr_reg_at_id(adv, i,
1990					       ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1991        }
1992	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1993
1994	/* Tell the XPT layer that a bus reset occured */
1995	if (adv->path != NULL)
1996		xpt_async(AC_BUS_RESET, adv->path, NULL);
1997
1998	count = 0;
1999	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2000		struct	adv_ccb_info *cinfo;
2001
2002		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2003			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2004		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2005		count++;
2006	}
2007
2008	adv_start_chip(adv);
2009	return (count);
2010}
2011
2012static void
2013adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2014{
2015	int orig_id;
2016
2017    	adv_set_bank(adv, 1);
2018    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2019    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2020	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2021		adv_set_bank(adv, 0);
2022		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2023	}
2024    	adv_set_bank(adv, 1);
2025    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2026	adv_set_bank(adv, 0);
2027}
2028