advlib.c revision 18782
1/*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice immediately at the beginning of the file, without modification,
12 *    this list of conditions, and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *      $Id$
32 */
33/*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48
49#include <machine/clock.h>
50
51#include <scsi/scsi_all.h>
52#include <scsi/scsi_message.h>
53#include <scsi/scsi_disk.h>
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <vm/pmap.h>
58
59#include <dev/advansys/advlib.h>
60#include <dev/advansys/advmcode.h>
61
62/*
63 * Allowable periods in ns
64 */
65u_int8_t adv_sdtr_period_tbl[] =
66{
67	25,
68	30,
69	35,
70	40,
71	50,
72	60,
73	70,
74	85
75};
76
77struct sdtr_xmsg {
78	u_int8_t	msg_type;
79	u_int8_t	msg_len;
80	u_int8_t	msg_req;
81	u_int8_t	xfer_period;
82	u_int8_t	req_ack_offset;
83	u_int8_t	res;
84};
85
86/*
87 * Some of the early PCI adapters have problems with
88 * async transfers.  Instead try to use an offset of
89 * 1.
90 */
91#define ASYN_SDTR_DATA_FIX 0x41
92
93/* LRAM routines */
94static void	 adv_read_lram_16_multi __P((struct adv_softc *adv, u_int16_t s_addr,
95					     u_int16_t *buffer, int count));
96static void	 adv_write_lram_16_multi __P((struct adv_softc *adv,
97					      u_int16_t s_addr, u_int16_t *buffer,
98					      int count));
99static void	 adv_mset_lram_16 __P((struct adv_softc *adv,
100					u_int16_t s_addr, u_int16_t set_value,
101				       int count));
102static u_int32_t adv_msum_lram_16 __P((struct adv_softc *adv, u_int16_t s_addr, int count));
103
104static int	 adv_write_and_verify_lram_16 __P((struct adv_softc *adv,
105						   u_int16_t addr, u_int16_t value));
106static u_int32_t adv_read_lram_32 __P((struct adv_softc *adv, u_int16_t addr));
107
108
109static void	 adv_write_lram_32 __P((struct adv_softc *adv, u_int16_t addr,
110					u_int32_t value));
111static void	 adv_write_lram_32_multi __P((struct adv_softc *adv, u_int16_t s_addr,
112					      u_int32_t *buffer, int count));
113
114/* EEPROM routines */
115static u_int16_t adv_read_eeprom_16 __P((struct adv_softc *adv, u_int8_t addr));
116static u_int16_t adv_write_eeprom_16 __P((struct adv_softc *adv, u_int8_t addr, u_int16_t value));
117static int	 adv_write_eeprom_cmd_reg __P((struct adv_softc *adv, 	u_int8_t cmd_reg));
118static int	 adv_set_eeprom_config_once __P((struct adv_softc *adv,
119						 struct adv_eeprom_config *eeprom_config));
120
121/* Initialization */
122static u_int32_t adv_load_microcode __P((struct adv_softc *adv, u_int16_t s_addr,
123					 u_int16_t *mcode_buf, u_int16_t mcode_size));
124static void	 adv_init_lram __P((struct adv_softc *adv));
125static int	 adv_init_microcode_var __P((struct adv_softc *adv));
126static void	 adv_init_qlink_var __P((struct adv_softc *adv));
127
128/* Interrupts */
129static void	 adv_disable_interrupt __P((struct adv_softc *adv));
130static void	 adv_enable_interrupt __P((struct adv_softc *adv));
131static void	 adv_toggle_irq_act __P((struct adv_softc *adv));
132
133/* Chip Control */
134#if UNUSED
135static void	 adv_start_execution __P((struct adv_softc *adv));
136#endif
137static int	 adv_start_chip __P((struct adv_softc *adv));
138static int	 adv_stop_chip __P((struct adv_softc *adv));
139static void	 adv_set_chip_ih __P((struct adv_softc *adv, u_int16_t ins_code));
140static void	 adv_set_bank __P((struct adv_softc *adv, u_int8_t bank));
141#if UNUSED
142static u_int8_t  adv_get_chip_scsi_ctrl __P((struct adv_softc *adv));
143#endif
144
145/* Queue handling and execution */
146static int	 adv_sgcount_to_qcount __P((int sgcount));
147static void	 adv_get_q_info __P((struct adv_softc *adv, u_int16_t s_addr, 	u_int16_t *inbuf,
148				     int words));
149static u_int	 adv_get_num_free_queues __P((struct adv_softc *adv, u_int8_t n_qs));
150static u_int8_t  adv_alloc_free_queues __P((struct adv_softc *adv, u_int8_t free_q_head,
151					    u_int8_t n_free_q));
152static u_int8_t  adv_alloc_free_queue __P((struct adv_softc *adv, u_int8_t free_q_head));
153static int	 adv_send_scsi_queue __P((struct adv_softc *adv, struct adv_scsi_q *scsiq,
154					  u_int8_t n_q_required));
155static void	 adv_put_ready_sg_list_queue __P((struct adv_softc *adv, struct adv_scsi_q *scsiq,
156						  u_int8_t q_no));
157static void	 adv_put_ready_queue __P((struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int8_t q_no));
158static void	 adv_put_scsiq __P((struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int words));
159
160/* SDTR */
161static u_int8_t  adv_msgout_sdtr __P((struct adv_softc *adv, u_int8_t sdtr_period, u_int8_t sdtr_offset));
162static u_int8_t  adv_get_card_sync_setting __P((u_int8_t period, u_int8_t offset));
163static void	 adv_set_chip_sdtr __P((struct adv_softc *adv, u_int8_t sdtr_data,
164					u_int8_t tid_no));
165
166
167/* Exported Function first */
168
169u_int8_t
170adv_read_lram_8(adv, addr)
171	struct adv_softc *adv;
172	u_int16_t addr;
173
174{
175	u_int8_t   byte_data;
176	u_int16_t  word_data;
177
178	/*
179	 * LRAM is accessed on 16bit boundaries.
180	 */
181	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
182	word_data = ADV_INW(adv, ADV_LRAM_DATA);
183	if (addr & 1) {
184#if BYTE_ORDER == BIG_ENDIAN
185		byte_data = (u_int8_t)(word_data & 0xFF);
186#else
187		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
188#endif
189	} else {
190#if BYTE_ORDER == BIG_ENDIAN
191		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
192#else
193		byte_data = (u_int8_t)(word_data & 0xFF);
194#endif
195	}
196	return (byte_data);
197}
198
199void
200adv_write_lram_8(adv, addr, value)
201	struct adv_softc *adv;
202	u_int16_t addr;
203	u_int8_t value;
204{
205	u_int16_t word_data;
206
207	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
208	if (addr & 1) {
209		word_data &= 0x00FF;
210		word_data |= (((u_int8_t)value << 8) & 0xFF00);
211	} else {
212		word_data &= 0xFF00;
213		word_data |= ((u_int8_t)value & 0x00FF);
214	}
215	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
216}
217
218
219u_int16_t
220adv_read_lram_16(adv, addr)
221	struct adv_softc *adv;
222	u_int16_t addr;
223{
224	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
225	return (ADV_INW(adv, ADV_LRAM_DATA));
226}
227
228void
229adv_write_lram_16(adv, addr, value)
230	struct adv_softc *adv;
231	u_int16_t addr;
232	u_int16_t value;
233{
234	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
235	ADV_OUTW(adv, ADV_LRAM_DATA, value);
236}
237
238
239/*
240 * Return the fully qualified board type for the adapter.
241 * The chip_revision must be set before this function is called.
242 */
243void
244adv_get_board_type(adv)
245	struct adv_softc *adv;
246{
247	if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL) &&
248	    (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) {
249		if (((adv->iobase & 0x0C30) == 0x0C30) ||
250			((adv->iobase & 0x0C50) == 0x0C50)) {
251			adv->type = ADV_EISA;
252		} else
253			adv->type = ADV_VL;
254	} else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA) &&
255		   (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) {
256		if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) {
257			adv->type = ADV_ISAPNP;
258		} else
259			adv->type = ADV_ISA;
260	} else if ((adv->chip_version >= ADV_CHIP_MIN_VER_PCI) &&
261		   (adv->chip_version <= ADV_CHIP_MAX_VER_PCI)) {
262		adv->type = ADV_PCI;
263	} else
264		panic("adv_get_board_type: Unknown board type encountered");
265}
266
267u_int16_t
268adv_get_eeprom_config(adv, eeprom_config)
269	struct adv_softc *adv;
270	struct	  adv_eeprom_config  *eeprom_config;
271{
272	u_int16_t	sum;
273	u_int16_t	*wbuf;
274	u_int8_t	cfg_beg;
275	u_int8_t	cfg_end;
276	u_int8_t	s_addr;
277
278	wbuf = (u_int16_t *)eeprom_config;
279	sum = 0;
280
281	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
282		*wbuf = adv_read_eeprom_16(adv, s_addr);
283		sum += *wbuf;
284	}
285
286	if (adv->type & ADV_VL) {
287		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
288		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
289	} else {
290		cfg_beg = ADV_EEPROM_CFG_BEG;
291		cfg_end = ADV_EEPROM_MAX_ADDR;
292	}
293
294	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
295		*wbuf = adv_read_eeprom_16(adv, s_addr);
296		sum += *wbuf;
297#if ADV_DEBUG_EEPROM
298		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
299#endif
300	}
301	*wbuf = adv_read_eeprom_16(adv, s_addr);
302	return (sum);
303}
304
305int
306adv_set_eeprom_config(adv, eeprom_config)
307	struct adv_softc *adv;
308	struct adv_eeprom_config *eeprom_config;
309{
310	int	retry;
311
312	retry = 0;
313	while (1) {
314		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
315			break;
316		}
317		if (++retry > ADV_EEPROM_MAX_RETRY) {
318			break;
319		}
320	}
321	return (retry > ADV_EEPROM_MAX_RETRY);
322}
323
324int
325adv_reset_chip_and_scsi_bus(adv)
326	struct adv_softc *adv;
327{
328	adv_stop_chip(adv);
329	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
330	DELAY(200 * 1000);
331
332	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
333	adv_set_chip_ih(adv, ADV_INS_HALT);
334
335	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
336	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
337	DELAY(200 * 1000);
338	return (adv_is_chip_halted(adv));
339}
340
341int
342adv_test_external_lram(adv)
343	struct adv_softc* adv;
344{
345	u_int16_t	q_addr;
346	u_int16_t	saved_value;
347	int		success;
348
349	success = 0;
350
351	/* XXX Why 241? */
352	q_addr = ADV_QNO_TO_QADDR(241);
353	saved_value = adv_read_lram_16(adv, q_addr);
354	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
355		success = 1;
356		adv_write_lram_16(adv, q_addr, saved_value);
357	}
358	return (success);
359}
360
361
362int
363adv_init_lram_and_mcode(adv)
364	struct adv_softc *adv;
365{
366	u_int32_t	retval;
367	adv_disable_interrupt(adv);
368
369	adv_init_lram(adv);
370
371	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode, adv_mcode_size);
372	if (retval != adv_mcode_chksum) {
373		printf("adv%d: Microcode download failed checksum!\n",
374		       adv->unit);
375		return (1);
376	}
377
378	if (adv_init_microcode_var(adv) != 0)
379		return (1);
380
381	adv_enable_interrupt(adv);
382	return (0);
383}
384
385u_int8_t
386adv_get_chip_irq(adv)
387	struct adv_softc *adv;
388{
389	u_int16_t	cfg_lsw;
390	u_int8_t	chip_irq;
391
392	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
393
394	if ((adv->type & ADV_VL) != 0) {
395		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
396		if ((chip_irq == 0) ||
397		    (chip_irq == 4) ||
398		    (chip_irq == 7)) {
399			return (0);
400		}
401		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
402	}
403	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
404	if (chip_irq == 3)
405		chip_irq += 2;
406	return (chip_irq + ADV_MIN_IRQ_NO);
407}
408
409u_int8_t
410adv_set_chip_irq(adv, irq_no)
411	struct adv_softc *adv;
412	u_int8_t irq_no;
413{
414	u_int16_t	cfg_lsw;
415
416	if ((adv->type & ADV_VL) != 0) {
417		if (irq_no != 0) {
418			if ((irq_no < ADV_MIN_IRQ_NO) || (irq_no > ADV_MAX_IRQ_NO)) {
419				irq_no = 0;
420			} else {
421				irq_no -= ADV_MIN_IRQ_NO - 1;
422			}
423		}
424		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
425		cfg_lsw |= 0x0010;
426		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
427		adv_toggle_irq_act(adv);
428
429		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
430		cfg_lsw |= (irq_no & 0x07) << 2;
431		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
432		adv_toggle_irq_act(adv);
433	} else if ((adv->type & ADV_ISA) != 0) {
434		if (irq_no == 15)
435			irq_no -= 2;
436		irq_no -= ADV_MIN_IRQ_NO;
437		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
438		cfg_lsw |= (irq_no & 0x03) << 2;
439		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
440	}
441	return (adv_get_chip_irq(adv));
442}
443
444int
445adv_execute_scsi_queue(adv, scsiq)
446	struct adv_softc *adv;
447	struct adv_scsi_q *scsiq;
448{
449	int		retval;
450	u_int		n_q_required;
451	int		s;
452	u_int32_t	addr;
453	u_int8_t	sg_entry_cnt;
454	u_int8_t	target_ix;
455	u_int8_t	sg_entry_cnt_minus_one;
456	u_int8_t	tid_no;
457	u_int8_t	sdtr_data;
458	u_int32_t	*p_data_addr;
459	u_int32_t	*p_data_bcount;
460
461	scsiq->q1.q_no = 0;
462	retval = 1;  /* Default to error case */
463	target_ix = scsiq->q2.target_ix;
464	tid_no = ADV_TIX_TO_TID(target_ix);
465
466	n_q_required = 1;
467
468	s = splbio();
469	if (scsiq->cdbptr->opcode == REQUEST_SENSE) {
470		if (((adv->initiate_sdtr & scsiq->q1.target_id) != 0)
471		    && ((adv->sdtr_done & scsiq->q1.target_id) != 0)) {
472			int sdtr_index;
473
474			sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no);
475			sdtr_index = (sdtr_data >> 4);
476			adv_msgout_sdtr(adv, adv_sdtr_period_tbl[sdtr_index],
477					 (sdtr_data & ADV_SYN_MAX_OFFSET));
478			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
479		}
480	}
481
482	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
483		sg_entry_cnt = scsiq->sg_head->entry_cnt;
484		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
485
486#ifdef DIAGNOSTIC
487		if (sg_entry_cnt <= 1)
488			panic("adv_execute_scsi_queue: Queue with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
489
490		if (sg_entry_cnt > ADV_MAX_SG_LIST)
491			panic("adv_execute_scsi_queue: Queue with too many segs.");
492
493		if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
494			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
495				addr = scsiq->sg_head->sg_list[i].addr +
496				       scsiq->sg_head->sg_list[i].bytes;
497
498				if ((addr & 0x0003) != 0)
499					panic("adv_execute_scsi_queue: SG with odd address or byte count");
500			}
501		}
502#endif
503		p_data_addr = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
504		p_data_bcount = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
505
506		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
507		scsiq->sg_head->queue_cnt = n_q_required - 1;
508	} else {
509		p_data_addr = &scsiq->q1.data_addr;
510		p_data_bcount = &scsiq->q1.data_cnt;
511		n_q_required = 1;
512	}
513
514	if (adv->bug_fix_control & ADV_BUG_FIX_ADD_ONE_BYTE) {
515		addr = *p_data_addr + *p_data_bcount;
516		if ((addr & 0x0003) != 0) {
517			/*
518			 * XXX Is this extra test (the one on data_cnt) really only supposed to apply
519			 * to the non SG case or was it a bug due to code duplication?
520			 */
521			if ((scsiq->q1.cntl & QC_SG_HEAD) != 0 || (scsiq->q1.data_cnt & 0x01FF) == 0) {
522				if ((scsiq->cdbptr->opcode == READ_COMMAND) ||
523				    (scsiq->cdbptr->opcode == READ_BIG)) {
524					if ((scsiq->q2.tag_code & ADV_TAG_FLAG_ADD_ONE_BYTE) == 0) {
525						(*p_data_bcount)++;
526						scsiq->q2.tag_code |= ADV_TAG_FLAG_ADD_ONE_BYTE;
527					}
528				}
529
530			}
531		}
532	}
533
534	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
535	    || ((scsiq->q1.cntl & QC_URGENT) != 0))
536		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
537
538	splx(s);
539	return (retval);
540}
541
542
543u_int8_t
544adv_copy_lram_doneq(adv, q_addr, scsiq, max_dma_count)
545	struct adv_softc *adv;
546	u_int16_t q_addr;
547	struct adv_q_done_info *scsiq;
548	u_int32_t max_dma_count;
549{
550	u_int16_t	val;
551	u_int8_t	sg_queue_cnt;
552
553	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
554		       (u_int16_t *)scsiq,
555		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
556
557#if BYTE_ORDER == BIG_ENDIAN
558	adv_adj_endian_qdone_info(scsiq);
559#endif
560
561	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
562	scsiq->q_status = val & 0xFF;
563	scsiq->q_no = (val >> 8) & 0XFF;
564
565	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
566	scsiq->cntl = val & 0xFF;
567	sg_queue_cnt = (val >> 8) & 0xFF;
568
569	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
570	scsiq->sense_len = val & 0xFF;
571	scsiq->user_def = (val >> 8) & 0xFF;
572
573	scsiq->remain_bytes = adv_read_lram_32(adv,
574					       q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
575	/*
576	 * XXX Is this just a safeguard or will the counter really
577	 * have bogus upper bits?
578	 */
579	scsiq->remain_bytes &= max_dma_count;
580
581	return (sg_queue_cnt);
582}
583
584int
585adv_stop_execution(adv)
586	struct	adv_softc *adv;
587{
588	int count;
589
590	count = 0;
591	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
592		adv_write_lram_8(adv, ADV_STOP_CODE_B,
593				 ADV_STOP_REQ_RISC_STOP);
594		do {
595			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
596				ADV_STOP_ACK_RISC_STOP) {
597				return (1);
598			}
599			DELAY(1000);
600		} while (count++ < 20);
601	}
602	return (0);
603}
604
605int
606adv_is_chip_halted(adv)
607	struct adv_softc *adv;
608{
609	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
610		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
611			return (1);
612		}
613	}
614	return (0);
615}
616
617/*
618 * XXX The numeric constants and the loops in this routine
619 * need to be documented.
620 */
621void
622adv_ack_interrupt(adv)
623	struct adv_softc *adv;
624{
625	u_int8_t	host_flag;
626	u_int8_t	risc_flag;
627	int		loop;
628
629	loop = 0;
630	do {
631		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
632		if (loop++ > 0x7FFF) {
633			break;
634		}
635	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
636
637	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
638	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
639			 host_flag | ADV_HOST_FLAG_ACK_INT);
640
641	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
642	loop = 0;
643	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
644		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
645		if (loop++ > 3) {
646			break;
647		}
648	}
649
650	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
651}
652
653/*
654 * Handle all conditions that may halt the chip waiting
655 * for us to intervene.
656 */
657void
658adv_isr_chip_halted(adv)
659	struct adv_softc *adv;
660{
661	u_int16_t	  int_halt_code;
662	u_int8_t	  halt_qp;
663	u_int16_t	  halt_q_addr;
664	u_int8_t	  target_ix;
665	u_int8_t	  q_cntl;
666	u_int8_t	  tid_no;
667	target_bit_vector target_id;
668	target_bit_vector scsi_busy;
669	u_int8_t	  asyn_sdtr;
670	u_int8_t	  sdtr_data;
671
672	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
673	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
674	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
675	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
676	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
677	tid_no = ADV_TIX_TO_TID(target_ix);
678	target_id = ADV_TID_TO_TARGET_ID(tid_no);
679	if (adv->needs_async_bug_fix & target_id)
680		asyn_sdtr = ASYN_SDTR_DATA_FIX;
681	else
682		asyn_sdtr = 0;
683	if (int_halt_code == ADV_HALT_EXTMSG_IN) {
684		struct	sdtr_xmsg sdtr_xmsg;
685		int	sdtr_accept;
686
687		adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG,
688					(u_int16_t *) &sdtr_xmsg,
689					sizeof(sdtr_xmsg) >> 1);
690		if ((sdtr_xmsg.msg_type == MSG_EXTENDED) &&
691		    (sdtr_xmsg.msg_len == MSG_EXT_SDTR_LEN)) {
692			sdtr_accept = TRUE;
693			if (sdtr_xmsg.msg_req == MSG_EXT_SDTR) {
694				if (sdtr_xmsg.req_ack_offset > ADV_SYN_MAX_OFFSET) {
695
696					sdtr_accept = FALSE;
697					sdtr_xmsg.req_ack_offset = ADV_SYN_MAX_OFFSET;
698				}
699				sdtr_data = adv_get_card_sync_setting(sdtr_xmsg.xfer_period,
700								      sdtr_xmsg.req_ack_offset);
701				if (sdtr_xmsg.req_ack_offset == 0) {
702					q_cntl &= ~QC_MSG_OUT;
703					adv->initiate_sdtr &= ~target_id;
704					adv->sdtr_done &= ~target_id;
705					adv_set_chip_sdtr(adv, asyn_sdtr, tid_no);
706				} else if (sdtr_data == 0) {
707					q_cntl |= QC_MSG_OUT;
708					adv->initiate_sdtr &= ~target_id;
709					adv->sdtr_done &= ~target_id;
710					adv_set_chip_sdtr(adv, asyn_sdtr, tid_no);
711				} else {
712					if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
713						q_cntl &= ~QC_MSG_OUT;
714						adv->sdtr_done |= target_id;
715						adv->initiate_sdtr |= target_id;
716						adv->needs_async_bug_fix &= ~target_id;
717						adv_set_chip_sdtr(adv, sdtr_data, tid_no);
718					} else {
719
720						q_cntl |= QC_MSG_OUT;
721
722						adv_msgout_sdtr(adv,
723								sdtr_xmsg.xfer_period,
724								sdtr_xmsg.req_ack_offset);
725						adv->needs_async_bug_fix &= ~target_id;
726						adv_set_chip_sdtr(adv, sdtr_data, tid_no);
727						adv->sdtr_done |= target_id;
728						adv->initiate_sdtr |= target_id;
729					}
730				}
731
732				adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
733			}
734		}
735		/*
736		 * XXX Hey, shouldn't we be rejecting any messages we don't understand?
737		 *     The old code also did not un-halt the processor if it recieved
738		 *     an extended message that it didn't understand.  That didn't
739		 *     seem right, so I changed this routine to always un-halt the
740		 *     processor at the end.
741		 */
742	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
743		u_int8_t	tag_code;
744		u_int8_t	q_status;
745
746		q_cntl |= QC_REQ_SENSE;
747		if (((adv->initiate_sdtr & target_id) != 0) &&
748			((adv->sdtr_done & target_id) != 0)) {
749
750			sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no);
751			/* XXX Macrotize the extraction of the index from sdtr_data ??? */
752			adv_msgout_sdtr(adv, adv_sdtr_period_tbl[(sdtr_data >> 4) & 0x0F],
753					sdtr_data & ADV_SYN_MAX_OFFSET);
754			q_cntl |= QC_MSG_OUT;
755		}
756		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
757
758		/* Don't tag request sense commands */
759		tag_code = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
760		tag_code &= ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
761		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE, tag_code);
762
763		q_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS);
764		q_status |= (QS_READY | QS_BUSY);
765		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS, q_status);
766
767		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
768		scsi_busy &= ~target_id;
769		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
770	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
771		struct	sdtr_xmsg out_msg;
772
773		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
774				       (u_int16_t *) &out_msg,
775				       sizeof(out_msg)/2);
776
777		if ((out_msg.msg_type == MSG_EXTENDED) &&
778			(out_msg.msg_len == MSG_EXT_SDTR_LEN) &&
779			(out_msg.msg_req == MSG_EXT_SDTR)) {
780
781			adv->initiate_sdtr &= ~target_id;
782			adv->sdtr_done &= ~target_id;
783			adv_set_chip_sdtr(adv, asyn_sdtr, tid_no);
784		}
785		q_cntl &= ~QC_MSG_OUT;
786		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
787	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
788		u_int8_t	cur_dvc_qng;
789		u_int8_t	scsi_status;
790
791		/*
792		 * XXX It would be nice if we could push the responsibility for handling
793		 *     this situation onto the generic SCSI layer as other drivers do.
794		 *     This would be done by completing the command with the status byte
795		 *     set to QUEUE_FULL, whereupon it will request that any transactions
796		 *     pending on the target that where scheduled after this one be aborted
797		 *     (so as to maintain queue ordering) and the number of requests the
798		 *     upper level will attempt to send this target will be reduced.
799		 *
800		 *     With this current strategy, am I guaranteed that once I unbusy the
801		 *     target the queued up transactions will be sent in the order they
802		 *     were queued?  If the ASC chip does a round-robin on all queued
803		 *     transactions looking for queues to run, the order is not guaranteed.
804		 */
805		scsi_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_SCSI_STATUS);
806		cur_dvc_qng = adv_read_lram_8(adv, ADV_QADR_BEG + target_ix);
807		printf("adv%d: Queue full - target %d, active transactions %d\n", adv->unit,
808		       tid_no, cur_dvc_qng);
809#if 0
810		/* XXX FIX LATER */
811		if ((cur_dvc_qng > 0) && (adv->cur_dvc_qng[tid_no] > 0)) {
812			scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
813			scsi_busy |= target_id;
814			adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
815			asc_dvc->queue_full_or_busy |= target_id;
816
817			if (scsi_status == SS_QUEUE_FULL) {
818				if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) {
819					cur_dvc_qng -= 1;
820					asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng;
821
822					adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + tid_no,
823							 cur_dvc_qng);
824				}
825			}
826		}
827#endif
828	}
829	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
830}
831
832/* Internal Routines */
833
834static void
835adv_read_lram_16_multi(adv, s_addr, buffer, count)
836	struct adv_softc *adv;
837	u_int16_t	 s_addr;
838	u_int16_t	 *buffer;
839	int		 count;
840{
841	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
842	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
843}
844
845static void
846adv_write_lram_16_multi(adv, s_addr, buffer, count)
847	struct adv_softc *adv;
848	u_int16_t	 s_addr;
849	u_int16_t	 *buffer;
850	int		 count;
851{
852	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
853	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
854}
855
856static void
857adv_mset_lram_16(adv, s_addr, set_value, count)
858	struct adv_softc *adv;
859	u_int16_t s_addr;
860	u_int16_t set_value;
861	int count;
862{
863	int	i;
864
865	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
866	for (i = 0; i < count; i++)
867		ADV_OUTW(adv, ADV_LRAM_DATA, set_value);
868}
869
870static u_int32_t
871adv_msum_lram_16(adv, s_addr, count)
872	struct adv_softc *adv;
873	u_int16_t	 s_addr;
874	int		 count;
875{
876	u_int32_t	sum;
877	int		i;
878
879	sum = 0;
880	for (i = 0; i < count; i++, s_addr += 2)
881		sum += adv_read_lram_16(adv, s_addr);
882	return (sum);
883}
884
885static int
886adv_write_and_verify_lram_16(adv, addr, value)
887	struct adv_softc *adv;
888	u_int16_t addr;
889	u_int16_t value;
890{
891	int	retval;
892
893	retval = 0;
894	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
895	ADV_OUTW(adv, ADV_LRAM_DATA, value);
896	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
897	if (value != ADV_INW(adv, ADV_LRAM_DATA))
898		retval = 1;
899	return (retval);
900}
901
902static u_int32_t
903adv_read_lram_32(adv, addr)
904	struct adv_softc *adv;
905	u_int16_t addr;
906{
907	u_int16_t           val_low, val_high;
908
909	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
910
911#if BYTE_ORDER == BIG_ENDIAN
912	val_high = ADV_INW(adv, ADV_LRAM_DATA);
913	val_low = ADV_INW(adv, ADV_LRAM_DATA);
914#else
915	val_low = ADV_INW(adv, ADV_LRAM_DATA);
916	val_high = ADV_INW(adv, ADV_LRAM_DATA);
917#endif
918
919	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
920}
921
922static void
923adv_write_lram_32(adv, addr, value)
924	struct adv_softc *adv;
925	u_int16_t addr;
926	u_int32_t value;
927{
928	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
929
930#if BYTE_ORDER == BIG_ENDIAN
931	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
932	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
933#else
934	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
935	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
936#endif
937}
938
939static void
940adv_write_lram_32_multi(adv, s_addr, buffer, count)
941	struct adv_softc *adv;
942	u_int16_t s_addr;
943	u_int32_t *buffer;
944	int count;
945{
946	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
947	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count * 2);
948}
949
950static u_int16_t
951adv_read_eeprom_16(adv, addr)
952	struct adv_softc *adv;
953	u_int8_t addr;
954{
955	u_int16_t read_wval;
956	u_int8_t  cmd_reg;
957
958	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
959	DELAY(1000);
960	cmd_reg = addr | ADV_EEPROM_CMD_READ;
961	adv_write_eeprom_cmd_reg(adv, cmd_reg);
962	DELAY(1000);
963	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
964	DELAY(1000);
965	return (read_wval);
966}
967
968static u_int16_t
969adv_write_eeprom_16(adv, addr, value)
970	struct adv_softc *adv;
971	u_int8_t addr;
972	u_int16_t value;
973{
974	u_int16_t	read_value;
975
976	read_value = adv_read_eeprom_16(adv, addr);
977	if (read_value != value) {
978		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
979		DELAY(1000);
980
981		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
982		DELAY(1000);
983
984		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
985		DELAY(20 * 1000);
986
987		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
988		DELAY(1000);
989		read_value = adv_read_eeprom_16(adv, addr);
990	}
991	return (read_value);
992}
993
994static int
995adv_write_eeprom_cmd_reg(adv, cmd_reg)
996	struct adv_softc *adv;
997	u_int8_t cmd_reg;
998{
999	u_int8_t read_back;
1000	int	 retry;
1001
1002	retry = 0;
1003	while (1) {
1004		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1005		DELAY(1000);
1006		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1007		if (read_back == cmd_reg) {
1008			return (1);
1009		}
1010		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1011			return (0);
1012		}
1013	}
1014}
1015
1016static int
1017adv_set_eeprom_config_once(adv, eeprom_config)
1018	struct adv_softc *adv;
1019	struct adv_eeprom_config *eeprom_config;
1020{
1021	int		n_error;
1022	u_int16_t	*wbuf;
1023	u_int16_t	sum;
1024	u_int8_t	s_addr;
1025	u_int8_t	cfg_beg;
1026	u_int8_t	cfg_end;
1027
1028	wbuf = (u_int16_t *)eeprom_config;
1029	n_error = 0;
1030	sum = 0;
1031	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1032		sum += *wbuf;
1033		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1034			n_error++;
1035		}
1036	}
1037	if (adv->type & ADV_VL) {
1038		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1039		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1040	} else {
1041		cfg_beg = ADV_EEPROM_CFG_BEG;
1042		cfg_end = ADV_EEPROM_MAX_ADDR;
1043	}
1044
1045	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1046		sum += *wbuf;
1047		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1048			n_error++;
1049		}
1050	}
1051	*wbuf = sum;
1052	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1053		n_error++;
1054	}
1055	wbuf = (u_int16_t *)eeprom_config;
1056	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1057		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1058			n_error++;
1059		}
1060	}
1061	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1062		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1063			n_error++;
1064		}
1065	}
1066	return (n_error);
1067}
1068
1069static u_int32_t
1070adv_load_microcode(adv, s_addr, mcode_buf, mcode_size)
1071	struct adv_softc *adv;
1072	u_int16_t	 s_addr;
1073	u_int16_t	 *mcode_buf;
1074	u_int16_t	 mcode_size;
1075{
1076	u_int32_t	chksum;
1077	u_int16_t	mcode_lram_size;
1078	u_int16_t	mcode_chksum;
1079
1080	mcode_lram_size = mcode_size >> 1;
1081	/* XXX Why zero the memory just before you write the whole thing?? */
1082	/* adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);*/
1083	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1084
1085	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1086	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1087					  ((mcode_size - s_addr - ADV_CODE_SEC_BEG) >> 1));
1088	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1089	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1090	return (chksum);
1091}
1092
1093static void
1094adv_init_lram(adv)
1095	struct adv_softc *adv;
1096{
1097	u_int8_t	i;
1098	u_int16_t	s_addr;
1099
1100	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1101			 (u_int16_t)((((int)adv->max_openings + 2 + 1) * 64) >> 1));
1102
1103	i = ADV_MIN_ACTIVE_QNO;
1104	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1105
1106	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1107	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1108	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1109	i++;
1110	s_addr += ADV_QBLK_SIZE;
1111	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1112		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1113		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1114		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1115	}
1116
1117	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1118	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1119	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1120	i++;
1121	s_addr += ADV_QBLK_SIZE;
1122
1123	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1124		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1125		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1126		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1127	}
1128}
1129
1130static int
1131adv_init_microcode_var(adv)
1132	struct adv_softc *adv;
1133{
1134	int       i;
1135
1136	for (i = 0; i <= ADV_MAX_TID; i++) {
1137		adv_write_lram_8(adv, ADVV_SDTR_DATA_BEG + i,
1138				 adv->sdtr_data[i]);
1139	}
1140
1141	adv_init_qlink_var(adv);
1142
1143	/* XXX Again, what about wide busses??? */
1144	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1145	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1146
1147	/* What are the extra 8 bytes for?? */
1148	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, vtophys(&(adv->overrun_buf[0])) + 8);
1149
1150	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE - 8);
1151
1152#if 0
1153	/* If we're going to print anything, RCS ids are more meaningful */
1154	mcode_date = adv_read_lram_16(adv, ADVV_MC_DATE_W);
1155	mcode_version = adv_read_lram_16(adv, ADVV_MC_VER_W);
1156#endif
1157	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1158	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1159		printf("adv%d: Unable to set program counter. Aborting.\n", adv->unit);
1160		return (1);
1161	}
1162	if (adv_start_chip(adv) != 1) {
1163		printf("adv%d: Unable to start on board processor. Aborting.\n",
1164		       adv->unit);
1165		return (1);
1166	}
1167	return (0);
1168}
1169
1170static void
1171adv_init_qlink_var(adv)
1172	struct adv_softc *adv;
1173{
1174	int	  i;
1175	u_int16_t lram_addr;
1176
1177	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1178	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1179
1180	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1181	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1182
1183	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1184			 (u_int8_t)((int) adv->max_openings + 1));
1185	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1186			 (u_int8_t)((int) adv->max_openings + 2));
1187
1188	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1189
1190	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1191	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1192	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1193	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1194	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1195
1196	adv_write_lram_8(adv, ADVV_CDBCNT_B, 0);
1197
1198	lram_addr = ADV_QADR_BEG;
1199	for (i = 0; i < 32; i++, lram_addr += 2)
1200		adv_write_lram_16(adv, lram_addr, 0);
1201}
1202static void
1203adv_disable_interrupt(adv)
1204	struct adv_softc *adv;
1205{
1206	u_int16_t cfg;
1207
1208	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1209	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1210}
1211
1212static void
1213adv_enable_interrupt(adv)
1214	struct adv_softc *adv;
1215{
1216	u_int16_t cfg;
1217
1218	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1219	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1220}
1221
1222static void
1223adv_toggle_irq_act(adv)
1224	struct adv_softc *adv;
1225{
1226	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1227	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1228}
1229
1230#if UNUSED
1231static void
1232adv_start_execution(adv)
1233	struct adv_softc *adv;
1234{
1235	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1236		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1237	}
1238}
1239#endif
1240
1241static int
1242adv_start_chip(adv)
1243	struct adv_softc *adv;
1244{
1245	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
1246	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
1247		return (0);
1248	return (1);
1249}
1250
1251static int
1252adv_stop_chip(adv)
1253	struct adv_softc *adv;
1254{
1255	u_int8_t cc_val;
1256
1257	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1258		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1259	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1260	adv_set_chip_ih(adv, ADV_INS_HALT);
1261	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1262	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1263		return (0);
1264	}
1265	return (1);
1266}
1267
1268static void
1269adv_set_chip_ih(adv, ins_code)
1270	struct adv_softc *adv;
1271	u_int16_t ins_code;
1272{
1273	adv_set_bank(adv, 1);
1274	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1275	adv_set_bank(adv, 0);
1276}
1277
1278static void
1279adv_set_bank(adv, bank)
1280	struct adv_softc *adv;
1281	u_int8_t bank;
1282{
1283	u_int8_t control;
1284
1285	/*
1286	 * Start out with the bank reset to 0
1287	 */
1288	control = ADV_INB(adv, ADV_CHIP_CTRL)
1289		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
1290			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
1291			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
1292	if (bank == 1) {
1293		control |= ADV_CC_BANK_ONE;
1294	} else if (bank == 2) {
1295		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
1296	}
1297	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
1298}
1299
1300#if UNUSED
1301static u_int8_t
1302adv_get_chip_scsi_ctrl(adv)
1303	struct	adv_softc *adv;
1304{
1305	u_int8_t scsi_ctrl;
1306
1307	adv_set_bank(adv, 1);
1308	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1309	adv_set_bank(adv, 0);
1310	return (scsi_ctrl);
1311}
1312#endif
1313
1314static int
1315adv_sgcount_to_qcount(sgcount)
1316	int sgcount;
1317{
1318	int	n_sg_list_qs;
1319
1320	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
1321	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
1322		n_sg_list_qs++;
1323	return (n_sg_list_qs + 1);
1324}
1325
1326/*
1327 * XXX Looks like more padding issues in this routine as well.
1328 *     There has to be a way to turn this into an insw.
1329 */
1330static void
1331adv_get_q_info(adv, s_addr, inbuf, words)
1332	struct adv_softc *adv;
1333	u_int16_t s_addr;
1334	u_int16_t *inbuf;
1335	int words;
1336{
1337	int	i;
1338
1339	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1340	for (i = 0; i < words; i++, inbuf++) {
1341		if (i == 5) {
1342			continue;
1343		}
1344		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1345	}
1346}
1347
1348static u_int
1349adv_get_num_free_queues(adv, n_qs)
1350	struct adv_softc *adv;
1351	u_int8_t n_qs;
1352{
1353	u_int	  cur_used_qs;
1354	u_int	  cur_free_qs;
1355
1356	if (n_qs == 1)
1357		cur_used_qs = adv->cur_active +
1358			      adv->openings_needed +
1359			      ADV_MIN_FREE_Q;
1360	else
1361		cur_used_qs = adv->cur_active +
1362			      ADV_MIN_FREE_Q;
1363
1364	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1365		cur_free_qs = adv->max_openings - cur_used_qs;
1366		return (cur_free_qs);
1367	}
1368	if (n_qs > 1)
1369		if (n_qs > adv->openings_needed)
1370			adv->openings_needed = n_qs;
1371	return (0);
1372}
1373
1374static u_int8_t
1375adv_alloc_free_queues(adv, free_q_head, n_free_q)
1376	struct adv_softc *adv;
1377	u_int8_t free_q_head;
1378	u_int8_t n_free_q;
1379{
1380	int i;
1381
1382	for (i = 0; i < n_free_q; i++) {
1383		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1384		if (free_q_head == ADV_QLINK_END)
1385			break;
1386	}
1387	return (free_q_head);
1388}
1389
1390static u_int8_t
1391adv_alloc_free_queue(adv, free_q_head)
1392	struct adv_softc *adv;
1393	u_int8_t free_q_head;
1394{
1395	u_int16_t	q_addr;
1396	u_int8_t	next_qp;
1397	u_int8_t	q_status;
1398
1399	next_qp = ADV_QLINK_END;
1400	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1401	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1402
1403	if ((q_status & QS_READY) == 0)
1404		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1405
1406	return (next_qp);
1407}
1408
1409static int
1410adv_send_scsi_queue(adv, scsiq, n_q_required)
1411	struct adv_softc *adv;
1412	struct adv_scsi_q *scsiq;
1413	u_int8_t n_q_required;
1414{
1415	u_int8_t	free_q_head;
1416	u_int8_t	next_qp;
1417	u_int8_t	tid_no;
1418	u_int8_t	target_ix;
1419	int		retval;
1420
1421	retval = 1;
1422	target_ix = scsiq->q2.target_ix;
1423	tid_no = ADV_TIX_TO_TID(target_ix);
1424	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1425	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1426	    != ADV_QLINK_END) {
1427		if (n_q_required > 1) {
1428			/*
1429			 * Only reset the shortage value when processing
1430			 * a "normal" request and not error recovery or
1431			 * other requests that dip into our reserved queues.
1432			 * Generally speaking, a normal request will need more
1433			 * than one queue.
1434			 */
1435			adv->openings_needed = 0;
1436		}
1437		scsiq->q1.q_no = free_q_head;
1438
1439		/*
1440		 * Now that we know our Q number, point our sense
1441		 * buffer pointer to an area below 16M if we are
1442		 * an ISA adapter.
1443		 */
1444		if (adv->sense_buffers != NULL)
1445			scsiq->q1.sense_addr = (u_int32_t)vtophys(&(adv->sense_buffers[free_q_head]));
1446		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1447		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1448		adv->cur_active += n_q_required;
1449		retval = 0;
1450	}
1451	return (retval);
1452}
1453
1454
1455static void
1456adv_put_ready_sg_list_queue(adv, scsiq, q_no)
1457	struct	adv_softc *adv;
1458	struct 	adv_scsi_q *scsiq;
1459	u_int8_t q_no;
1460{
1461	u_int8_t	sg_list_dwords;
1462	u_int8_t	sg_index, i;
1463	u_int8_t	sg_entry_cnt;
1464	u_int8_t	next_qp;
1465	u_int16_t	q_addr;
1466	struct		adv_sg_head *sg_head;
1467	struct		adv_sg_list_q scsi_sg_q;
1468
1469	sg_head = scsiq->sg_head;
1470
1471	if (sg_head) {
1472		sg_entry_cnt = sg_head->entry_cnt - 1;
1473#ifdef DIAGNOSTIC
1474		if (sg_entry_cnt == 0)
1475			panic("adv_put_ready_sg_list_queue: ScsiQ with a SG list but only one element");
1476		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1477			panic("adv_put_ready_sg_list_queue: ScsiQ with a SG list but QC_SG_HEAD not set");
1478#endif
1479		q_addr = ADV_QNO_TO_QADDR(q_no);
1480		sg_index = 1;
1481		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1482		scsi_sg_q.sg_head_qp = q_no;
1483		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1484		for (i = 0; i < sg_head->queue_cnt; i++) {
1485			u_int8_t segs_this_q;
1486
1487			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1488				segs_this_q = ADV_SG_LIST_PER_Q;
1489			else {
1490				/* This will be the last segment then */
1491				segs_this_q = sg_entry_cnt;
1492				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1493			}
1494			scsi_sg_q.seq_no = i + 1;
1495			sg_list_dwords = segs_this_q * 2;
1496			if (i == 0) {
1497				scsi_sg_q.sg_list_cnt = segs_this_q;
1498				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1499			} else {
1500				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1501				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1502			}
1503			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1504			scsi_sg_q.q_no = next_qp;
1505			q_addr = ADV_QNO_TO_QADDR(next_qp);
1506
1507			adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1508						(u_int16_t *)&scsi_sg_q,
1509						sizeof(scsi_sg_q) >> 1);
1510			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1511						(u_int32_t *)&sg_head->sg_list[sg_index],
1512						sg_list_dwords);
1513			sg_entry_cnt -= segs_this_q;
1514			sg_index += ADV_SG_LIST_PER_Q;
1515		}
1516	}
1517	adv_put_ready_queue(adv, scsiq, q_no);
1518}
1519
1520static void
1521adv_put_ready_queue(adv, scsiq, q_no)
1522	struct adv_softc *adv;
1523	struct adv_scsi_q *scsiq;
1524	u_int8_t q_no;
1525{
1526	u_int16_t	q_addr;
1527	u_int8_t	tid_no;
1528	u_int8_t	sdtr_data;
1529	u_int8_t	syn_period_ix;
1530	u_int8_t	syn_offset;
1531
1532	if (((adv->initiate_sdtr & scsiq->q1.target_id) != 0) &&
1533	    ((adv->sdtr_done & scsiq->q1.target_id) == 0)) {
1534
1535		tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1536
1537		sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no);
1538		syn_period_ix = (sdtr_data >> 4) & (ADV_SYN_XFER_NO - 1);
1539		syn_offset = sdtr_data & ADV_SYN_MAX_OFFSET;
1540		adv_msgout_sdtr(adv, adv_sdtr_period_tbl[syn_period_ix],
1541				 syn_offset);
1542
1543		scsiq->q1.cntl |= QC_MSG_OUT;
1544	}
1545	q_addr = ADV_QNO_TO_QADDR(q_no);
1546
1547	scsiq->q1.status = QS_FREE;
1548
1549	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1550				(u_int16_t *)scsiq->cdbptr,
1551				scsiq->q2.cdb_len >> 1);
1552
1553#if BYTE_ORDER == BIG_ENDIAN
1554	adv_adj_scsiq_endian(scsiq);
1555#endif
1556
1557	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1558		      (u_int16_t *) &scsiq->q1.cntl,
1559		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1560
1561#if CC_WRITE_IO_COUNT
1562	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1563			  adv->req_count);
1564#endif
1565
1566#if CC_CLEAR_DMA_REMAIN
1567
1568	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1569	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1570#endif
1571
1572	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1573			  (scsiq->q1.q_no << 8) | QS_READY);
1574}
1575
1576static void
1577adv_put_scsiq(adv, s_addr, buffer, words)
1578	struct adv_softc *adv;
1579	u_int16_t s_addr;
1580	u_int16_t *buffer;
1581	int words;
1582{
1583	int	i;
1584
1585	/*
1586	 * XXX This routine makes *gross* assumptions
1587	 * about padding in the data structures.
1588	 * Either the data structures should have explicit
1589	 * padding members added, or they should have padding
1590	 * turned off via compiler attributes depending on
1591	 * which yields better overall performance.  My hunch
1592	 * would be that turning off padding would be the
1593	 * faster approach as an outsw is much faster than
1594	 * this crude loop and accessing un-aligned data
1595	 * members isn't *that* expensive.  The other choice
1596	 * would be to modify the ASC script so that the
1597	 * the adv_scsiq_1 structure can be re-arranged so
1598	 * padding isn't required.
1599	 */
1600	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1601	for (i = 0; i < words; i++, buffer++) {
1602		if (i == 2 || i == 10) {
1603			continue;
1604		}
1605		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1606	}
1607}
1608
1609static u_int8_t
1610adv_msgout_sdtr(adv, sdtr_period, sdtr_offset)
1611	struct adv_softc *adv;
1612	u_int8_t sdtr_period;
1613	u_int8_t sdtr_offset;
1614{
1615	struct	 sdtr_xmsg sdtr_buf;
1616
1617	sdtr_buf.msg_type = MSG_EXTENDED;
1618	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1619	sdtr_buf.msg_req = MSG_EXT_SDTR;
1620	sdtr_buf.xfer_period = sdtr_period;
1621	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1622	sdtr_buf.req_ack_offset = sdtr_offset;
1623	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1624				(u_int16_t *) &sdtr_buf,
1625				sizeof(sdtr_buf) / 2);
1626
1627	return (adv_get_card_sync_setting(sdtr_period, sdtr_offset));
1628}
1629
1630static u_int8_t
1631adv_get_card_sync_setting(period, offset)
1632	u_int8_t period;
1633	u_int8_t offset;
1634{
1635	u_int i;
1636
1637	if (period >= adv_sdtr_period_tbl[0]) {
1638		for (i = 0; i < sizeof(adv_sdtr_period_tbl); i++) {
1639			if (period <= adv_sdtr_period_tbl[i])
1640				return ((adv_sdtr_period_tbl[i] << 4) | offset);
1641		}
1642	}
1643	return (0);
1644}
1645
1646static void
1647adv_set_chip_sdtr(adv, sdtr_data, tid_no)
1648	struct adv_softc *adv;
1649	u_int8_t sdtr_data;
1650	u_int8_t tid_no;
1651{
1652	ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
1653	adv_write_lram_8(adv, ADVV_SDTR_DONE_BEG + tid_no, sdtr_data);
1654}
1655