advlib.c revision 40027
119370Spst/*
219370Spst * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3130803Smarcel *
4130803Smarcel * Copyright (c) 1996-1997 Justin Gibbs.
5130803Smarcel * All rights reserved.
698944Sobrien *
719370Spst * Redistribution and use in source and binary forms, with or without
898944Sobrien * modification, are permitted provided that the following conditions
998944Sobrien * are met:
1098944Sobrien * 1. Redistributions of source code must retain the above copyright
1198944Sobrien *    notice, this list of conditions, and the following disclaimer,
1219370Spst *    without modification, immediately at the beginning of the file.
1398944Sobrien * 2. Redistributions in binary form must reproduce the above copyright
1498944Sobrien *    notice, this list of conditions and the following disclaimer in the
1598944Sobrien *    documentation and/or other materials provided with the distribution.
1698944Sobrien * 3. The name of the author may not be used to endorse or promote products
1719370Spst *    derived from this software without specific prior written permission.
1898944Sobrien *
1998944Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
2098944Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2198944Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2219370Spst * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
2319370Spst * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24130803Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2519370Spst * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2619370Spst * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2719370Spst * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2819370Spst * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2919370Spst * SUCH DAMAGE.
3019370Spst *
3119370Spst *      $Id: advlib.c,v 1.6 1998/09/20 05:04:05 gibbs Exp $
3298944Sobrien */
33130803Smarcel/*
3498944Sobrien * Ported from:
3519370Spst * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
3698944Sobrien *
3798944Sobrien * Copyright (c) 1995-1996 Advanced System Products, Inc.
3898944Sobrien * All Rights Reserved.
3998944Sobrien *
4098944Sobrien * Redistribution and use in source and binary forms, with or without
4198944Sobrien * modification, are permitted provided that redistributions of source
42130803Smarcel * code retain the above copyright notice and this comment without
43130803Smarcel * modification.
44130803Smarcel */
4598944Sobrien
4698944Sobrien#include <sys/param.h>
4798944Sobrien#include <sys/systm.h>
4898944Sobrien
4998944Sobrien#include <machine/bus_pio.h>
5098944Sobrien#include <machine/bus.h>
5198944Sobrien#include <machine/clock.h>
5298944Sobrien
5398944Sobrien#include <cam/cam.h>
5498944Sobrien#include <cam/cam_ccb.h>
5598944Sobrien#include <cam/cam_sim.h>
5698944Sobrien#include <cam/cam_xpt_sim.h>
5798944Sobrien
5819370Spst#include <cam/scsi/scsi_all.h>
5919370Spst#include <cam/scsi/scsi_message.h>
6019370Spst#include <cam/scsi/scsi_da.h>
6119370Spst#include <cam/scsi/scsi_cd.h>
6219370Spst
6319370Spst#include <vm/vm.h>
6419370Spst#include <vm/vm_param.h>
6519370Spst#include <vm/pmap.h>
6619370Spst
6719370Spst#include <dev/advansys/advansys.h>
6819370Spst#include <dev/advansys/advmcode.h>
6919370Spst
7019370Spststruct adv_quirk_entry {
7119370Spst	struct scsi_inquiry_pattern inq_pat;
7298944Sobrien	u_int8_t quirks;
7398944Sobrien#define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
7498944Sobrien#define ADV_QUIRK_FIX_ASYN_XFER		0x02
7519370Spst};
76130803Smarcel
7719370Spststatic struct adv_quirk_entry adv_quirk_table[] =
7819370Spst{
7919370Spst	{
8019370Spst		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
8119370Spst		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
8219370Spst	},
8319370Spst	{
8419370Spst		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
8519370Spst		0
8619370Spst	},
8746283Sdfr	{
8819370Spst		{
8919370Spst		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
9019370Spst		  "TANDBERG", " TDC 36", "*"
9119370Spst		},
9219370Spst		0
9319370Spst	},
9419370Spst	{
9519370Spst		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
9619370Spst		0
9719370Spst	},
9819370Spst	{
9919370Spst		{
10019370Spst		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
10119370Spst		  "*", "*", "*"
10219370Spst		},
10319370Spst		0
10498944Sobrien	},
10519370Spst	{
10619370Spst		{
10798944Sobrien		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
10898944Sobrien		  "*", "*", "*"
10919370Spst		},
11019370Spst		0
11146283Sdfr	},
11219370Spst	{
11319370Spst		/* Default quirk entry */
11419370Spst		{
11519370Spst		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
11698944Sobrien		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
11746283Sdfr                },
11819370Spst                ADV_QUIRK_FIX_ASYN_XFER,
11919370Spst	}
12019370Spst};
12119370Spst
12219370Spst/*
12319370Spst * Allowable periods in ns
12498944Sobrien */
12519370Spstu_int8_t adv_sdtr_period_tbl[] =
12619370Spst{
12719370Spst	25,
12819370Spst	30,
12919370Spst	35,
13019370Spst	40,
13119370Spst	50,
13219370Spst	60,
13319370Spst	70,
13446283Sdfr	85
13598944Sobrien};
13619370Spst
13719370Spstu_int8_t adv_sdtr_period_tbl_ultra[] =
13819370Spst{
13919370Spst	12,
14019370Spst	19,
14119370Spst	25,
14219370Spst	32,
14319370Spst	38,
14419370Spst	44,
14519370Spst	50,
14619370Spst	57,
14746283Sdfr	63,
14819370Spst	69,
14919370Spst	75,
15098944Sobrien	82,
15119370Spst	88,
15298944Sobrien	94,
15319370Spst	100,
15419370Spst	107
15598944Sobrien};
15698944Sobrien
15798944Sobrienstruct ext_msg {
15819370Spst	u_int8_t msg_type;
15919370Spst	u_int8_t msg_len;
16019370Spst	u_int8_t msg_req;
16119370Spst	union {
16219370Spst		struct {
16346283Sdfr			u_int8_t sdtr_xfer_period;
16419370Spst			u_int8_t sdtr_req_ack_offset;
16519370Spst		} sdtr;
16619370Spst		struct {
16746283Sdfr       			u_int8_t wdtr_width;
16819370Spst		} wdtr;
16919370Spst		struct {
17019370Spst			u_int8_t mdp[4];
17119370Spst		} mdp;
17219370Spst	} u_ext_msg;
17346283Sdfr	u_int8_t res;
17419370Spst};
17519370Spst
17619370Spst#define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
17719370Spst#define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
17819370Spst#define	wdtr_width	u_ext_msg.wdtr.wdtr_width
17998944Sobrien#define	mdp_b3		u_ext_msg.mdp_b3
18019370Spst#define	mdp_b2		u_ext_msg.mdp_b2
18119370Spst#define	mdp_b1		u_ext_msg.mdp_b1
18219370Spst#define	mdp_b0		u_ext_msg.mdp_b0
18319370Spst
18419370Spst/*
18519370Spst * Some of the early PCI adapters have problems with
18619370Spst * async transfers.  Instead use an offset of 1.
18719370Spst */
18819370Spst#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
18919370Spst
19019370Spst/* LRAM routines */
19146283Sdfrstatic void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
19246283Sdfr					u_int16_t *buffer, int count);
19319370Spststatic void	 adv_write_lram_16_multi(struct adv_softc *adv,
19419370Spst					 u_int16_t s_addr, u_int16_t *buffer,
19519370Spst					 int count);
19619370Spststatic void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
19719370Spst				  u_int16_t set_value, int count);
19846283Sdfrstatic u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
19919370Spst				  int count);
20098944Sobrien
20198944Sobrienstatic int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
20219370Spst					      u_int16_t addr, u_int16_t value);
20346283Sdfrstatic u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
20419370Spst
20519370Spst
20698944Sobrienstatic void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
20719370Spst				   u_int32_t value);
20819370Spststatic void	 adv_write_lram_32_multi(struct adv_softc *adv,
20919370Spst					 u_int16_t s_addr, u_int32_t *buffer,
21019370Spst					 int count);
211130803Smarcel
21219370Spst/* EEPROM routines */
21319370Spststatic u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
21419370Spststatic u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
21598944Sobrien				     u_int16_t value);
21698944Sobrienstatic int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
21798944Sobrien					  u_int8_t cmd_reg);
21898944Sobrienstatic int	 adv_set_eeprom_config_once(struct adv_softc *adv,
21998944Sobrien					    struct adv_eeprom_config *eeconfig);
22019370Spst
22119370Spst/* Initialization */
22219370Spststatic u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
223130803Smarcel				    u_int16_t *mcode_buf, u_int16_t mcode_size);
224130803Smarcel
22598944Sobrienstatic void	 adv_reinit_lram(struct adv_softc *adv);
22619370Spststatic void	 adv_init_lram(struct adv_softc *adv);
22719370Spststatic int	 adv_init_microcode_var(struct adv_softc *adv);
22898944Sobrienstatic void	 adv_init_qlink_var(struct adv_softc *adv);
22919370Spst
23019370Spst/* Interrupts */
23119370Spststatic void	 adv_disable_interrupt(struct adv_softc *adv);
23298944Sobrienstatic void	 adv_enable_interrupt(struct adv_softc *adv);
23319370Spststatic void	 adv_toggle_irq_act(struct adv_softc *adv);
23446283Sdfr
235242936Semaste/* Chip Control */
236242936Semastestatic int	 adv_stop_chip(struct adv_softc *adv);
23719370Spststatic int	 adv_host_req_chip_halt(struct adv_softc *adv);
23819370Spststatic void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
23919370Spst#if UNUSED
24019370Spststatic u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
24119370Spst#endif
24298944Sobrien
24398944Sobrien/* Queue handling and execution */
24419370Spststatic __inline int
24519370Spst		 adv_sgcount_to_qcount(int sgcount);
24619370Spst
24719370Spststatic __inline int
24819370Spstadv_sgcount_to_qcount(int sgcount)
24919370Spst{
25019370Spst	int	n_sg_list_qs;
25119370Spst
25219370Spst	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
25319370Spst	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
25419370Spst		n_sg_list_qs++;
25519370Spst	return (n_sg_list_qs + 1);
25619370Spst}
25719370Spst
25819370Spststatic void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
25998944Sobrien				u_int16_t *inbuf, int words);
26046283Sdfrstatic u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
26119370Spststatic u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
26219370Spst				       u_int8_t free_q_head, u_int8_t n_free_q);
26319370Spststatic u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
26419370Spst				      u_int8_t free_q_head);
26519370Spststatic int	 adv_send_scsi_queue(struct adv_softc *adv,
26698944Sobrien				     struct adv_scsi_q *scsiq,
26798944Sobrien				     u_int8_t n_q_required);
26898944Sobrienstatic void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
26919370Spst					     struct adv_scsi_q *scsiq,
27098944Sobrien					     u_int q_no);
27119370Spststatic void	 adv_put_ready_queue(struct adv_softc *adv,
27219370Spst				     struct adv_scsi_q *scsiq, u_int q_no);
27398944Sobrienstatic void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
27419370Spst			       u_int16_t *buffer, int words);
27519370Spst
27619370Spst/* Messages */
27719370Spststatic void	 adv_handle_extmsg_in(struct adv_softc *adv,
27819370Spst				      u_int16_t halt_q_addr, u_int8_t q_cntl,
27998944Sobrien				      target_bit_vector target_id,
28098944Sobrien				      int tid);
28198944Sobrienstatic void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
28298944Sobrien				 u_int8_t sdtr_offset);
28398944Sobrienstatic void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
28498944Sobrien					u_int8_t sdtr_data);
285242936Semaste
286242936Semaste
28719370Spst/* Exported functions first */
28819370Spst
28919370Spstvoid
29019370Spstadvasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
29119370Spst{
29219370Spst	struct adv_softc *adv;
29319370Spst
29419370Spst	adv = (struct adv_softc *)callback_arg;
29519370Spst	switch (code) {
29619370Spst	case AC_FOUND_DEVICE:
29719370Spst	{
29819370Spst		struct ccb_getdev *cgd;
29919370Spst		target_bit_vector target_mask;
30019370Spst		int num_entries;
30198944Sobrien        	caddr_t match;
30298944Sobrien		struct adv_quirk_entry *entry;
30319370Spst		struct adv_target_transinfo* tinfo;
30498944Sobrien
30519370Spst		cgd = (struct ccb_getdev *)arg;
30619370Spst
30798944Sobrien		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
30898944Sobrien
30998944Sobrien		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
31098944Sobrien		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
31198944Sobrien				       (caddr_t)adv_quirk_table,
31298944Sobrien				       num_entries, sizeof(*adv_quirk_table),
31398944Sobrien				       scsi_inquiry_match);
31419370Spst
31519370Spst		if (match == NULL)
31646283Sdfr			panic("advasync: device didn't match wildcard entry!!");
31719370Spst
31819370Spst		entry = (struct adv_quirk_entry *)match;
31919370Spst
32019370Spst		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
32119370Spst			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
32219370Spst				adv->fix_asyn_xfer_always |= target_mask;
32346283Sdfr			else
32419370Spst				adv->fix_asyn_xfer_always &= ~target_mask;
32519370Spst			/*
32619370Spst			 * We start out life with all bits set and clear them
32746283Sdfr			 * after we've determined that the fix isn't necessary.
32819370Spst			 * It may well be that we've already cleared a target
32919370Spst			 * before the full inquiry session completes, so don't
33019370Spst			 * gratuitously set a target bit even if it has this
33119370Spst			 * quirk.  But, if the quirk exonerates a device, clear
33219370Spst			 * the bit now.
33319370Spst			 */
33419370Spst			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
33519370Spst				adv->fix_asyn_xfer &= ~target_mask;
33619370Spst		}
33719370Spst		/*
33819370Spst		 * Reset our sync settings now that we've determined
33919370Spst		 * what quirks are in effect for the device.
34019370Spst		 */
34119370Spst		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
34219370Spst		adv_set_syncrate(adv, cgd->ccb_h.path,
34319370Spst				 cgd->ccb_h.target_id,
34419370Spst				 tinfo->current.period,
34519370Spst				 tinfo->current.offset,
34619370Spst				 ADV_TRANS_CUR);
34719370Spst		break;
34819370Spst	}
34946283Sdfr	case AC_LOST_DEVICE:
35019370Spst	{
35119370Spst		u_int target_mask;
35219370Spst
35398944Sobrien		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
35419370Spst			target_mask = 0x01 << xpt_path_target_id(path);
35519370Spst			adv->fix_asyn_xfer |= target_mask;
35619370Spst		}
35719370Spst
35819370Spst		/*
35919370Spst		 * Revert to async transfers
36019370Spst		 * for the next device.
36119370Spst		 */
36219370Spst		adv_set_syncrate(adv, /*path*/NULL,
36319370Spst				 xpt_path_target_id(path),
36446283Sdfr				 /*period*/0,
36519370Spst				 /*offset*/0,
36619370Spst				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
36746283Sdfr	}
36819370Spst	default:
36919370Spst		break;
37019370Spst	}
37119370Spst}
37219370Spst
37319370Spstvoid
37419370Spstadv_set_bank(struct adv_softc *adv, u_int8_t bank)
37519370Spst{
37619370Spst	u_int8_t control;
37719370Spst
37819370Spst	/*
37998944Sobrien	 * Start out with the bank reset to 0
38098944Sobrien	 */
38198944Sobrien	control = ADV_INB(adv, ADV_CHIP_CTRL)
38219370Spst		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
38398944Sobrien			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
38498944Sobrien			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
38519370Spst	if (bank == 1) {
38619370Spst		control |= ADV_CC_BANK_ONE;
38719370Spst	} else if (bank == 2) {
38819370Spst		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
38919370Spst	}
39019370Spst	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
39146283Sdfr}
39219370Spst
39319370Spstu_int8_t
39419370Spstadv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
39546283Sdfr{
39619370Spst	u_int8_t   byte_data;
39719370Spst	u_int16_t  word_data;
39819370Spst
39919370Spst	/*
40019370Spst	 * LRAM is accessed on 16bit boundaries.
40119370Spst	 */
40219370Spst	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
40346283Sdfr	word_data = ADV_INW(adv, ADV_LRAM_DATA);
40419370Spst	if (addr & 1) {
40519370Spst#if BYTE_ORDER == BIG_ENDIAN
40619370Spst		byte_data = (u_int8_t)(word_data & 0xFF);
40719370Spst#else
40819370Spst		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
40919370Spst#endif
41019370Spst	} else {
41119370Spst#if BYTE_ORDER == BIG_ENDIAN
41219370Spst		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
41346283Sdfr#else
41419370Spst		byte_data = (u_int8_t)(word_data & 0xFF);
41519370Spst#endif
41619370Spst	}
41798944Sobrien	return (byte_data);
41898944Sobrien}
41998944Sobrien
42098944Sobrienvoid
42198944Sobrienadv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
42219370Spst{
42398944Sobrien	u_int16_t word_data;
42419370Spst
42519370Spst	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
42619370Spst	if (addr & 1) {
42719370Spst		word_data &= 0x00FF;
42819370Spst		word_data |= (((u_int8_t)value << 8) & 0xFF00);
42919370Spst	} else {
43046283Sdfr		word_data &= 0xFF00;
43119370Spst		word_data |= ((u_int8_t)value & 0x00FF);
43219370Spst	}
43319370Spst	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
43446283Sdfr}
43519370Spst
43619370Spst
43719370Spstu_int16_t
43846283Sdfradv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
43998944Sobrien{
44098944Sobrien	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
44198944Sobrien	return (ADV_INW(adv, ADV_LRAM_DATA));
44298944Sobrien}
44398944Sobrien
44498944Sobrienvoid
44546283Sdfradv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
44619370Spst{
44719370Spst	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
44819370Spst	ADV_OUTW(adv, ADV_LRAM_DATA, value);
44919370Spst}
45019370Spst
45119370Spst/*
45219370Spst * Determine if there is a board at "iobase" by looking
45319370Spst * for the AdvanSys signatures.  Return 1 if a board is
45419370Spst * found, 0 otherwise.
45519370Spst */
45698944Sobrienint
45798944Sobrienadv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
45819370Spst{
45919370Spst	u_int16_t signature;
46019370Spst
46198944Sobrien	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
46298944Sobrien		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
46398944Sobrien		if ((signature == ADV_1000_ID0W)
46498944Sobrien		 || (signature == ADV_1000_ID0W_FIX))
46598944Sobrien			return (1);
46698944Sobrien	}
46798944Sobrien	return (0);
46898944Sobrien}
46998944Sobrien
47098944Sobrienvoid
47198944Sobrienadv_lib_init(struct adv_softc *adv)
47298944Sobrien{
47398944Sobrien	if ((adv->type & ADV_ULTRA) != 0) {
47498944Sobrien		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
47598944Sobrien		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
47698944Sobrien	} else {
47798944Sobrien		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
47898944Sobrien		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
47998944Sobrien	}
48098944Sobrien}
48198944Sobrien
48298944Sobrienu_int16_t
48319370Spstadv_get_eeprom_config(struct adv_softc *adv, struct
48419370Spst		      adv_eeprom_config  *eeprom_config)
48519370Spst{
48619370Spst	u_int16_t	sum;
48719370Spst	u_int16_t	*wbuf;
48819370Spst	u_int8_t	cfg_beg;
48919370Spst	u_int8_t	cfg_end;
49019370Spst	u_int8_t	s_addr;
49198944Sobrien
49298944Sobrien	wbuf = (u_int16_t *)eeprom_config;
49319370Spst	sum = 0;
49419370Spst
49598944Sobrien	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
49646283Sdfr		*wbuf = adv_read_eeprom_16(adv, s_addr);
49798944Sobrien		sum += *wbuf;
49819370Spst	}
49919370Spst
50019370Spst	if (adv->type & ADV_VL) {
50119370Spst		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
50219370Spst		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
50319370Spst	} else {
50419370Spst		cfg_beg = ADV_EEPROM_CFG_BEG;
50519370Spst		cfg_end = ADV_EEPROM_MAX_ADDR;
50619370Spst	}
50719370Spst
50898944Sobrien	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
50919370Spst		*wbuf = adv_read_eeprom_16(adv, s_addr);
51019370Spst		sum += *wbuf;
51119370Spst#if ADV_DEBUG_EEPROM
512130803Smarcel		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
51319370Spst#endif
51419370Spst	}
51519370Spst	*wbuf = adv_read_eeprom_16(adv, s_addr);
51646283Sdfr	return (sum);
51798944Sobrien}
51898944Sobrien
51998944Sobrienint
52098944Sobrienadv_set_eeprom_config(struct adv_softc *adv,
52198944Sobrien		      struct adv_eeprom_config *eeprom_config)
52298944Sobrien{
52398944Sobrien	int	retry;
52498944Sobrien
52598944Sobrien	retry = 0;
52698944Sobrien	while (1) {
52798944Sobrien		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
52898944Sobrien			break;
52998944Sobrien		}
53046283Sdfr		if (++retry > ADV_EEPROM_MAX_RETRY) {
53198944Sobrien			break;
53298944Sobrien		}
53398944Sobrien	}
53498944Sobrien	return (retry > ADV_EEPROM_MAX_RETRY);
53598944Sobrien}
53698944Sobrien
53798944Sobrienint
53898944Sobrienadv_reset_chip_and_scsi_bus(struct adv_softc *adv)
53998944Sobrien{
54098944Sobrien	adv_stop_chip(adv);
54198944Sobrien	ADV_OUTB(adv, ADV_CHIP_CTRL,
54298944Sobrien		 ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT);
54398944Sobrien	DELAY(200 * 1000);
54498944Sobrien
54598944Sobrien	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
54698944Sobrien	adv_set_chip_ih(adv, ADV_INS_HALT);
54798944Sobrien
54898944Sobrien	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
54998944Sobrien	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
55046283Sdfr	DELAY(200 * 1000);
55198944Sobrien	return (adv_is_chip_halted(adv));
55246283Sdfr}
55398944Sobrien
55419370Spstint
55519370Spstadv_test_external_lram(struct adv_softc* adv)
55698944Sobrien{
55719370Spst	u_int16_t	q_addr;
55819370Spst	u_int16_t	saved_value;
55919370Spst	int		success;
56019370Spst
56119370Spst	success = 0;
56246283Sdfr
56346283Sdfr	q_addr = ADV_QNO_TO_QADDR(241);
56446283Sdfr	saved_value = adv_read_lram_16(adv, q_addr);
56546283Sdfr	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
56698944Sobrien		success = 1;
56798944Sobrien		adv_write_lram_16(adv, q_addr, saved_value);
56898944Sobrien	}
56998944Sobrien	return (success);
57098944Sobrien}
57198944Sobrien
57298944Sobrien
57398944Sobrienint
57498944Sobrienadv_init_lram_and_mcode(struct adv_softc *adv)
57598944Sobrien{
57698944Sobrien	u_int32_t	retval;
57798944Sobrien
57898944Sobrien	adv_disable_interrupt(adv);
57946283Sdfr
58098944Sobrien	adv_init_lram(adv);
58198944Sobrien
58298944Sobrien	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
58398944Sobrien				    adv_mcode_size);
58498944Sobrien	if (retval != adv_mcode_chksum) {
58598944Sobrien		printf("adv%d: Microcode download failed checksum!\n",
58698944Sobrien		       adv->unit);
58746283Sdfr		return (1);
58846283Sdfr	}
58998944Sobrien
590130803Smarcel	if (adv_init_microcode_var(adv) != 0)
591130803Smarcel		return (1);
592130803Smarcel
59346283Sdfr	adv_enable_interrupt(adv);
59419370Spst	return (0);
595}
596
597u_int8_t
598adv_get_chip_irq(struct adv_softc *adv)
599{
600	u_int16_t	cfg_lsw;
601	u_int8_t	chip_irq;
602
603	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
604
605	if ((adv->type & ADV_VL) != 0) {
606		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
607		if ((chip_irq == 0) ||
608		    (chip_irq == 4) ||
609		    (chip_irq == 7)) {
610			return (0);
611		}
612		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
613	}
614	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
615	if (chip_irq == 3)
616		chip_irq += 2;
617	return (chip_irq + ADV_MIN_IRQ_NO);
618}
619
620u_int8_t
621adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
622{
623	u_int16_t	cfg_lsw;
624
625	if ((adv->type & ADV_VL) != 0) {
626		if (irq_no != 0) {
627			if ((irq_no < ADV_MIN_IRQ_NO)
628			 || (irq_no > ADV_MAX_IRQ_NO)) {
629				irq_no = 0;
630			} else {
631				irq_no -= ADV_MIN_IRQ_NO - 1;
632			}
633		}
634		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
635		cfg_lsw |= 0x0010;
636		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
637		adv_toggle_irq_act(adv);
638
639		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
640		cfg_lsw |= (irq_no & 0x07) << 2;
641		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
642		adv_toggle_irq_act(adv);
643	} else if ((adv->type & ADV_ISA) != 0) {
644		if (irq_no == 15)
645			irq_no -= 2;
646		irq_no -= ADV_MIN_IRQ_NO;
647		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
648		cfg_lsw |= (irq_no & 0x03) << 2;
649		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650	}
651	return (adv_get_chip_irq(adv));
652}
653
654void
655adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
656{
657	u_int16_t cfg_lsw;
658
659	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
660	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
661		return;
662    	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
663	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
664	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
665}
666
667int
668adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
669		       u_int32_t datalen)
670{
671	struct		adv_target_transinfo* tinfo;
672	u_int32_t	*p_data_addr;
673	u_int32_t	*p_data_bcount;
674	int		disable_syn_offset_one_fix;
675	int		retval;
676	u_int		n_q_required;
677	u_int32_t	addr;
678	u_int8_t	sg_entry_cnt;
679	u_int8_t	target_ix;
680	u_int8_t	sg_entry_cnt_minus_one;
681	u_int8_t	tid_no;
682
683	scsiq->q1.q_no = 0;
684	retval = 1;  /* Default to error case */
685	target_ix = scsiq->q2.target_ix;
686	tid_no = ADV_TIX_TO_TID(target_ix);
687	tinfo = &adv->tinfo[tid_no];
688
689	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
690		/* Renegotiate if appropriate. */
691		adv_set_syncrate(adv, /*struct cam_path */NULL,
692				 tid_no, /*period*/0, /*offset*/0,
693				 ADV_TRANS_CUR);
694		if (tinfo->current.period != tinfo->goal.period) {
695			adv_msgout_sdtr(adv, tinfo->goal.period,
696					tinfo->goal.offset);
697			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
698		}
699	}
700
701	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
702		sg_entry_cnt = scsiq->sg_head->entry_cnt;
703		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
704
705#ifdef DIAGNOSTIC
706		if (sg_entry_cnt <= 1)
707			panic("adv_execute_scsi_queue: Queue "
708			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
709
710		if (sg_entry_cnt > ADV_MAX_SG_LIST)
711			panic("adv_execute_scsi_queue: "
712			      "Queue with too many segs.");
713
714		if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) {
715			int i;
716
717			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
718				addr = scsiq->sg_head->sg_list[i].addr +
719				       scsiq->sg_head->sg_list[i].bytes;
720
721				if ((addr & 0x0003) != 0)
722					panic("adv_execute_scsi_queue: SG "
723					      "with odd address or byte count");
724			}
725		}
726#endif
727		p_data_addr =
728		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
729		p_data_bcount =
730		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
731
732		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
733		scsiq->sg_head->queue_cnt = n_q_required - 1;
734	} else {
735		p_data_addr = &scsiq->q1.data_addr;
736		p_data_bcount = &scsiq->q1.data_cnt;
737		n_q_required = 1;
738	}
739
740	disable_syn_offset_one_fix = FALSE;
741
742	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
743	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
744
745		if (datalen != 0) {
746			if (datalen < 512) {
747				disable_syn_offset_one_fix = TRUE;
748			} else {
749				if (scsiq->cdbptr[0] == INQUIRY
750				 || scsiq->cdbptr[0] == REQUEST_SENSE
751				 || scsiq->cdbptr[0] == READ_CAPACITY
752				 || scsiq->cdbptr[0] == MODE_SELECT_6
753				 || scsiq->cdbptr[0] == MODE_SENSE_6
754				 || scsiq->cdbptr[0] == MODE_SENSE_10
755				 || scsiq->cdbptr[0] == MODE_SELECT_10
756				 || scsiq->cdbptr[0] == READ_TOC) {
757					disable_syn_offset_one_fix = TRUE;
758				}
759			}
760		}
761	}
762
763	if (disable_syn_offset_one_fix) {
764		scsiq->q2.tag_code &=
765		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
766		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
767				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
768	}
769
770	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
771	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
772		u_int8_t extra_bytes;
773
774		addr = *p_data_addr + *p_data_bcount;
775		extra_bytes = addr & 0x0003;
776		if (extra_bytes != 0
777		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
778		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
779			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
780			scsiq->q1.extra_bytes = extra_bytes;
781			*p_data_bcount -= extra_bytes;
782		}
783	}
784
785	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
786	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
787		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
788
789	return (retval);
790}
791
792
793u_int8_t
794adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
795		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
796{
797	u_int16_t val;
798	u_int8_t  sg_queue_cnt;
799
800	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
801		       (u_int16_t *)scsiq,
802		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
803
804#if BYTE_ORDER == BIG_ENDIAN
805	adv_adj_endian_qdone_info(scsiq);
806#endif
807
808	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
809	scsiq->q_status = val & 0xFF;
810	scsiq->q_no = (val >> 8) & 0XFF;
811
812	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
813	scsiq->cntl = val & 0xFF;
814	sg_queue_cnt = (val >> 8) & 0xFF;
815
816	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
817	scsiq->sense_len = val & 0xFF;
818	scsiq->extra_bytes = (val >> 8) & 0xFF;
819
820	scsiq->remain_bytes =
821	    adv_read_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
822	/*
823	 * XXX Is this just a safeguard or will the counter really
824	 * have bogus upper bits?
825	 */
826	scsiq->remain_bytes &= max_dma_count;
827
828	return (sg_queue_cnt);
829}
830
831int
832adv_start_chip(struct adv_softc *adv)
833{
834	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
835	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
836		return (0);
837	return (1);
838}
839
840int
841adv_stop_execution(struct adv_softc *adv)
842{
843	int count;
844
845	count = 0;
846	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
847		adv_write_lram_8(adv, ADV_STOP_CODE_B,
848				 ADV_STOP_REQ_RISC_STOP);
849		do {
850			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
851				ADV_STOP_ACK_RISC_STOP) {
852				return (1);
853			}
854			DELAY(1000);
855		} while (count++ < 20);
856	}
857	return (0);
858}
859
860int
861adv_is_chip_halted(struct adv_softc *adv)
862{
863	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
864		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
865			return (1);
866		}
867	}
868	return (0);
869}
870
871/*
872 * XXX The numeric constants and the loops in this routine
873 * need to be documented.
874 */
875void
876adv_ack_interrupt(struct adv_softc *adv)
877{
878	u_int8_t	host_flag;
879	u_int8_t	risc_flag;
880	int		loop;
881
882	loop = 0;
883	do {
884		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
885		if (loop++ > 0x7FFF) {
886			break;
887		}
888	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
889
890	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
891	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
892			 host_flag | ADV_HOST_FLAG_ACK_INT);
893
894	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
895	loop = 0;
896	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
897		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
898		if (loop++ > 3) {
899			break;
900		}
901	}
902
903	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
904}
905
906/*
907 * Handle all conditions that may halt the chip waiting
908 * for us to intervene.
909 */
910void
911adv_isr_chip_halted(struct adv_softc *adv)
912{
913	u_int16_t	  int_halt_code;
914	u_int16_t	  halt_q_addr;
915	target_bit_vector target_mask;
916	target_bit_vector scsi_busy;
917	u_int8_t	  halt_qp;
918	u_int8_t	  target_ix;
919	u_int8_t	  q_cntl;
920	u_int8_t	  tid_no;
921
922	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
923	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
924	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
925	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
926	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
927	tid_no = ADV_TIX_TO_TID(target_ix);
928	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
929	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
930		/*
931		 * Temporarily disable the async fix by removing
932		 * this target from the list of affected targets,
933		 * setting our async rate, and then putting us
934		 * back into the mask.
935		 */
936		adv->fix_asyn_xfer &= ~target_mask;
937		adv_set_syncrate(adv, /*struct cam_path */NULL,
938				 tid_no, /*period*/0, /*offset*/0,
939				 ADV_TRANS_ACTIVE);
940		adv->fix_asyn_xfer |= target_mask;
941	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
942		adv_set_syncrate(adv, /*struct cam_path */NULL,
943				 tid_no, /*period*/0, /*offset*/0,
944				 ADV_TRANS_ACTIVE);
945	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
946		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
947				     target_mask, tid_no);
948	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
949		struct	 adv_target_transinfo* tinfo;
950		union	 ccb *ccb;
951		u_int8_t tag_code;
952		u_int8_t q_status;
953
954		tinfo = &adv->tinfo[tid_no];
955		q_cntl |= QC_REQ_SENSE;
956
957		/* Renegotiate if appropriate. */
958		adv_set_syncrate(adv, /*struct cam_path */NULL,
959				 tid_no, /*period*/0, /*offset*/0,
960				 ADV_TRANS_CUR);
961		if (tinfo->current.period != tinfo->goal.period) {
962			adv_msgout_sdtr(adv, tinfo->goal.period,
963					tinfo->goal.offset);
964			q_cntl |= QC_MSG_OUT;
965		}
966		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
967
968		/* Don't tag request sense commands */
969		tag_code = adv_read_lram_8(adv,
970					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
971		tag_code &=
972		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
973
974		if ((adv->fix_asyn_xfer & target_mask) != 0
975		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
976			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
977				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
978		}
979		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
980				 tag_code);
981		q_status = adv_read_lram_8(adv,
982					   halt_q_addr + ADV_SCSIQ_B_STATUS);
983		q_status |= (QS_READY | QS_BUSY);
984		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
985				 q_status);
986		/*
987		 * Freeze the devq until we can handle the sense condition.
988		 */
989		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
990							 + ADV_SCSIQ_D_CCBPTR);
991		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
992		ccb->ccb_h.status |= CAM_DEV_QFRZN;
993		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
994			      /*ccb*/NULL, CAM_REQUEUE_REQ,
995			      /*queued_only*/TRUE);
996		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
997		scsi_busy &= ~target_mask;
998		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
999	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1000		struct	ext_msg out_msg;
1001
1002		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1003				       (u_int16_t *) &out_msg,
1004				       sizeof(out_msg)/2);
1005
1006		if ((out_msg.msg_type == MSG_EXTENDED)
1007		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1008		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1009
1010			/* Revert to Async */
1011			adv_set_syncrate(adv, /*struct cam_path */NULL,
1012					 tid_no, /*period*/0, /*offset*/0,
1013					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1014		}
1015		q_cntl &= ~QC_MSG_OUT;
1016		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1017	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1018		u_int8_t scsi_status;
1019		union ccb *ccb;
1020
1021		scsi_status = adv_read_lram_8(adv, halt_q_addr
1022					      + ADV_SCSIQ_SCSI_STATUS);
1023		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1024						     + ADV_SCSIQ_D_CCBPTR);
1025		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1026		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1027		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1028			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1029			      /*queued_only*/TRUE);
1030		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1031		scsi_busy &= ~target_mask;
1032		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1033	}
1034	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1035}
1036
1037void
1038adv_sdtr_to_period_offset(struct adv_softc *adv,
1039			  u_int8_t sync_data, u_int8_t *period,
1040			  u_int8_t *offset, int tid)
1041{
1042	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1043	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1044		*period = *offset = 0;
1045	} else {
1046		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1047		*offset = sync_data & 0xF;
1048	}
1049}
1050
1051void
1052adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1053		 u_int tid, u_int period, u_int offset, u_int type)
1054{
1055	struct adv_target_transinfo* tinfo;
1056	u_int old_period;
1057	u_int old_offset;
1058	u_int8_t sdtr_data;
1059
1060	tinfo = &adv->tinfo[tid];
1061
1062	/* Filter our input */
1063	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1064					      &offset, tid);
1065
1066	old_period = tinfo->current.period;
1067	old_offset = tinfo->current.offset;
1068
1069	if ((type & ADV_TRANS_CUR) != 0
1070	 && ((old_period != period || old_offset != offset)
1071	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1072		int s;
1073		int halted;
1074
1075		s = splcam();
1076		halted = adv_is_chip_halted(adv);
1077		if (halted == 0)
1078			/* Must halt the chip first */
1079			adv_host_req_chip_halt(adv);
1080
1081		/* Update current hardware settings */
1082		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1083
1084		/*
1085		 * If a target can run in sync mode, we don't need
1086		 * to check it for sync problems.
1087		 */
1088		if (offset != 0)
1089			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1090
1091		if (halted == 0)
1092			/* Start the chip again */
1093			adv_start_chip(adv);
1094
1095		splx(s);
1096		tinfo->current.period = period;
1097		tinfo->current.offset = offset;
1098
1099		if (path != NULL) {
1100			/*
1101			 * Tell the SCSI layer about the
1102			 * new transfer parameters.
1103			 */
1104			struct	ccb_trans_settings neg;
1105
1106			neg.sync_period = period;
1107			neg.sync_offset = offset;
1108			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1109				  | CCB_TRANS_SYNC_OFFSET_VALID;
1110			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1111			xpt_async(AC_TRANSFER_NEG, path, &neg);
1112		}
1113	}
1114
1115	if ((type & ADV_TRANS_GOAL) != 0) {
1116		tinfo->goal.period = period;
1117		tinfo->goal.offset = offset;
1118	}
1119
1120	if ((type & ADV_TRANS_USER) != 0) {
1121		tinfo->user.period = period;
1122		tinfo->user.offset = offset;
1123	}
1124}
1125
1126u_int8_t
1127adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1128			  u_int *offset, int tid)
1129{
1130	u_int i;
1131	u_int dummy_offset;
1132	u_int dummy_period;
1133
1134	if (offset == NULL) {
1135		dummy_offset = 0;
1136		offset = &dummy_offset;
1137	}
1138
1139	if (period == NULL) {
1140		dummy_period = 0;
1141		period = &dummy_period;
1142	}
1143
1144#define MIN(a,b) (((a) < (b)) ? (a) : (b))
1145
1146	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1147	if (*period != 0 && *offset != 0) {
1148		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1149			if (*period <= adv->sdtr_period_tbl[i]) {
1150				/*
1151				 * When responding to a target that requests
1152				 * sync, the requested  rate may fall between
1153				 * two rates that we can output, but still be
1154				 * a rate that we can receive.  Because of this,
1155				 * we want to respond to the target with
1156				 * the same rate that it sent to us even
1157				 * if the period we use to send data to it
1158				 * is lower.  Only lower the response period
1159				 * if we must.
1160				 */
1161				if (i == 0 /* Our maximum rate */)
1162					*period = adv->sdtr_period_tbl[0];
1163				return ((i << 4) | *offset);
1164			}
1165		}
1166	}
1167
1168	/* Must go async */
1169	*period = 0;
1170	*offset = 0;
1171	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1172		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1173	return (0);
1174}
1175
1176/* Internal Routines */
1177
1178static void
1179adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1180		       u_int16_t *buffer, int count)
1181{
1182	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1183	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1184}
1185
1186static void
1187adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1188			u_int16_t *buffer, int count)
1189{
1190	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1191	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1192}
1193
1194static void
1195adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1196		 u_int16_t set_value, int count)
1197{
1198	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1199	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1200			      set_value, count);
1201}
1202
1203static u_int32_t
1204adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1205{
1206	u_int32_t	sum;
1207	int		i;
1208
1209	sum = 0;
1210	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1211	for (i = 0; i < count; i++)
1212		sum += ADV_INW(adv, ADV_LRAM_DATA);
1213	return (sum);
1214}
1215
1216static int
1217adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1218			     u_int16_t value)
1219{
1220	int	retval;
1221
1222	retval = 0;
1223	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1224	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1225	DELAY(10000);
1226	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1227	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1228		retval = 1;
1229	return (retval);
1230}
1231
1232static u_int32_t
1233adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1234{
1235	u_int16_t           val_low, val_high;
1236
1237	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1238
1239#if BYTE_ORDER == BIG_ENDIAN
1240	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1241	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1242#else
1243	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1244	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1245#endif
1246
1247	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1248}
1249
1250static void
1251adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1252{
1253	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1254
1255#if BYTE_ORDER == BIG_ENDIAN
1256	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1257	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1258#else
1259	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1260	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1261#endif
1262}
1263
1264static void
1265adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1266			u_int32_t *buffer, int count)
1267{
1268	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1269	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1270}
1271
1272static u_int16_t
1273adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1274{
1275	u_int16_t read_wval;
1276	u_int8_t  cmd_reg;
1277
1278	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1279	DELAY(1000);
1280	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1281	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1282	DELAY(1000);
1283	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1284	DELAY(1000);
1285	return (read_wval);
1286}
1287
1288static u_int16_t
1289adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1290{
1291	u_int16_t	read_value;
1292
1293	read_value = adv_read_eeprom_16(adv, addr);
1294	if (read_value != value) {
1295		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1296		DELAY(1000);
1297
1298		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1299		DELAY(1000);
1300
1301		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1302		DELAY(20 * 1000);
1303
1304		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1305		DELAY(1000);
1306		read_value = adv_read_eeprom_16(adv, addr);
1307	}
1308	return (read_value);
1309}
1310
1311static int
1312adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1313{
1314	u_int8_t read_back;
1315	int	 retry;
1316
1317	retry = 0;
1318	while (1) {
1319		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1320		DELAY(1000);
1321		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1322		if (read_back == cmd_reg) {
1323			return (1);
1324		}
1325		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1326			return (0);
1327		}
1328	}
1329}
1330
1331static int
1332adv_set_eeprom_config_once(struct adv_softc *adv,
1333			   struct adv_eeprom_config *eeprom_config)
1334{
1335	int		n_error;
1336	u_int16_t	*wbuf;
1337	u_int16_t	sum;
1338	u_int8_t	s_addr;
1339	u_int8_t	cfg_beg;
1340	u_int8_t	cfg_end;
1341
1342	wbuf = (u_int16_t *)eeprom_config;
1343	n_error = 0;
1344	sum = 0;
1345	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1346		sum += *wbuf;
1347		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1348			n_error++;
1349		}
1350	}
1351	if (adv->type & ADV_VL) {
1352		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1353		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1354	} else {
1355		cfg_beg = ADV_EEPROM_CFG_BEG;
1356		cfg_end = ADV_EEPROM_MAX_ADDR;
1357	}
1358
1359	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1360		sum += *wbuf;
1361		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1362			n_error++;
1363		}
1364	}
1365	*wbuf = sum;
1366	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1367		n_error++;
1368	}
1369	wbuf = (u_int16_t *)eeprom_config;
1370	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1371		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1372			n_error++;
1373		}
1374	}
1375	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1376		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1377			n_error++;
1378		}
1379	}
1380	return (n_error);
1381}
1382
1383static u_int32_t
1384adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1385		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1386{
1387	u_int32_t chksum;
1388	u_int16_t mcode_lram_size;
1389	u_int16_t mcode_chksum;
1390
1391	mcode_lram_size = mcode_size >> 1;
1392	/* XXX Why zero the memory just before you write the whole thing?? */
1393	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1394	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1395
1396	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1397	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1398						   ((mcode_size - s_addr
1399						     - ADV_CODE_SEC_BEG) >> 1));
1400	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1401	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1402	return (chksum);
1403}
1404
1405static void
1406adv_reinit_lram(struct adv_softc *adv) {
1407	adv_init_lram(adv);
1408	adv_init_qlink_var(adv);
1409}
1410
1411static void
1412adv_init_lram(struct adv_softc *adv)
1413{
1414	u_int8_t  i;
1415	u_int16_t s_addr;
1416
1417	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1418			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1419
1420	i = ADV_MIN_ACTIVE_QNO;
1421	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1422
1423	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1424	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1425	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1426	i++;
1427	s_addr += ADV_QBLK_SIZE;
1428	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1429		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1430		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1431		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1432	}
1433
1434	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1435	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1436	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1437	i++;
1438	s_addr += ADV_QBLK_SIZE;
1439
1440	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1441		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1442		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1443		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1444	}
1445}
1446
1447static int
1448adv_init_microcode_var(struct adv_softc *adv)
1449{
1450	int	 i;
1451
1452	for (i = 0; i <= ADV_MAX_TID; i++) {
1453
1454		/* Start out async all around */
1455		adv_set_syncrate(adv, /*path*/NULL,
1456				 i, 0, 0,
1457				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1458	}
1459
1460	adv_init_qlink_var(adv);
1461
1462	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1463	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1464
1465	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1466
1467	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1468
1469	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1470	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1471		printf("adv%d: Unable to set program counter. Aborting.\n",
1472		       adv->unit);
1473		return (1);
1474	}
1475	return (0);
1476}
1477
1478static void
1479adv_init_qlink_var(struct adv_softc *adv)
1480{
1481	int	  i;
1482	u_int16_t lram_addr;
1483
1484	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1485	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1486
1487	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1488	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1489
1490	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1491			 (u_int8_t)((int) adv->max_openings + 1));
1492	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1493			 (u_int8_t)((int) adv->max_openings + 2));
1494
1495	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1496
1497	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1498	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1499	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1500	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1501	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1502	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1503
1504	lram_addr = ADV_QADR_BEG;
1505	for (i = 0; i < 32; i++, lram_addr += 2)
1506		adv_write_lram_16(adv, lram_addr, 0);
1507}
1508
1509static void
1510adv_disable_interrupt(struct adv_softc *adv)
1511{
1512	u_int16_t cfg;
1513
1514	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1515	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1516}
1517
1518static void
1519adv_enable_interrupt(struct adv_softc *adv)
1520{
1521	u_int16_t cfg;
1522
1523	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1524	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1525}
1526
1527static void
1528adv_toggle_irq_act(struct adv_softc *adv)
1529{
1530	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1531	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1532}
1533
1534void
1535adv_start_execution(struct adv_softc *adv)
1536{
1537	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1538		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1539	}
1540}
1541
1542static int
1543adv_stop_chip(struct adv_softc *adv)
1544{
1545	u_int8_t cc_val;
1546
1547	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1548		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1549	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1550	adv_set_chip_ih(adv, ADV_INS_HALT);
1551	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1552	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1553		return (0);
1554	}
1555	return (1);
1556}
1557
1558static int
1559adv_host_req_chip_halt(struct adv_softc *adv)
1560{
1561	int	 count;
1562	u_int8_t saved_stop_code;
1563
1564	if (adv_is_chip_halted(adv))
1565		return (1);
1566
1567	count = 0;
1568	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1569	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1570			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1571	while (adv_is_chip_halted(adv) == 0
1572	    && count++ < 2000)
1573		;
1574
1575	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1576	return (count < 2000);
1577}
1578
1579static void
1580adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1581{
1582	adv_set_bank(adv, 1);
1583	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1584	adv_set_bank(adv, 0);
1585}
1586
1587#if UNUSED
1588static u_int8_t
1589adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1590{
1591	u_int8_t scsi_ctrl;
1592
1593	adv_set_bank(adv, 1);
1594	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1595	adv_set_bank(adv, 0);
1596	return (scsi_ctrl);
1597}
1598#endif
1599
1600/*
1601 * XXX Looks like more padding issues in this routine as well.
1602 *     There has to be a way to turn this into an insw.
1603 */
1604static void
1605adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1606	       u_int16_t *inbuf, int words)
1607{
1608	int	i;
1609
1610	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1611	for (i = 0; i < words; i++, inbuf++) {
1612		if (i == 5) {
1613			continue;
1614		}
1615		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1616	}
1617}
1618
1619static u_int
1620adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1621{
1622	u_int	  cur_used_qs;
1623	u_int	  cur_free_qs;
1624
1625	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1626
1627	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1628		cur_free_qs = adv->max_openings - cur_used_qs;
1629		return (cur_free_qs);
1630	}
1631	adv->openings_needed = n_qs;
1632	return (0);
1633}
1634
1635static u_int8_t
1636adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1637		      u_int8_t n_free_q)
1638{
1639	int i;
1640
1641	for (i = 0; i < n_free_q; i++) {
1642		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1643		if (free_q_head == ADV_QLINK_END)
1644			break;
1645	}
1646	return (free_q_head);
1647}
1648
1649static u_int8_t
1650adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1651{
1652	u_int16_t	q_addr;
1653	u_int8_t	next_qp;
1654	u_int8_t	q_status;
1655
1656	next_qp = ADV_QLINK_END;
1657	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1658	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1659
1660	if ((q_status & QS_READY) == 0)
1661		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1662
1663	return (next_qp);
1664}
1665
1666static int
1667adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1668		    u_int8_t n_q_required)
1669{
1670	u_int8_t	free_q_head;
1671	u_int8_t	next_qp;
1672	u_int8_t	tid_no;
1673	u_int8_t	target_ix;
1674	int		retval;
1675
1676	retval = 1;
1677	target_ix = scsiq->q2.target_ix;
1678	tid_no = ADV_TIX_TO_TID(target_ix);
1679	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1680	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1681	    != ADV_QLINK_END) {
1682		scsiq->q1.q_no = free_q_head;
1683
1684		/*
1685		 * Now that we know our Q number, point our sense
1686		 * buffer pointer to a bus dma mapped area where
1687		 * we can dma the data to.
1688		 */
1689		scsiq->q1.sense_addr = adv->sense_physbase
1690		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1691		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1692		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1693		adv->cur_active += n_q_required;
1694		retval = 0;
1695	}
1696	return (retval);
1697}
1698
1699
1700static void
1701adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1702			    u_int q_no)
1703{
1704	u_int8_t	sg_list_dwords;
1705	u_int8_t	sg_index, i;
1706	u_int8_t	sg_entry_cnt;
1707	u_int8_t	next_qp;
1708	u_int16_t	q_addr;
1709	struct		adv_sg_head *sg_head;
1710	struct		adv_sg_list_q scsi_sg_q;
1711
1712	sg_head = scsiq->sg_head;
1713
1714	if (sg_head) {
1715		sg_entry_cnt = sg_head->entry_cnt - 1;
1716#ifdef DIAGNOSTIC
1717		if (sg_entry_cnt == 0)
1718			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1719			      "a SG list but only one element");
1720		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1721			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1722			      "a SG list but QC_SG_HEAD not set");
1723#endif
1724		q_addr = ADV_QNO_TO_QADDR(q_no);
1725		sg_index = 1;
1726		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1727		scsi_sg_q.sg_head_qp = q_no;
1728		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1729		for (i = 0; i < sg_head->queue_cnt; i++) {
1730			u_int8_t segs_this_q;
1731
1732			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1733				segs_this_q = ADV_SG_LIST_PER_Q;
1734			else {
1735				/* This will be the last segment then */
1736				segs_this_q = sg_entry_cnt;
1737				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1738			}
1739			scsi_sg_q.seq_no = i + 1;
1740			sg_list_dwords = segs_this_q << 1;
1741			if (i == 0) {
1742				scsi_sg_q.sg_list_cnt = segs_this_q;
1743				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1744			} else {
1745				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1746				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1747			}
1748			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1749			scsi_sg_q.q_no = next_qp;
1750			q_addr = ADV_QNO_TO_QADDR(next_qp);
1751
1752			adv_write_lram_16_multi(adv,
1753						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1754						(u_int16_t *)&scsi_sg_q,
1755						sizeof(scsi_sg_q) >> 1);
1756			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1757						(u_int32_t *)&sg_head->sg_list[sg_index],
1758						sg_list_dwords);
1759			sg_entry_cnt -= segs_this_q;
1760			sg_index += ADV_SG_LIST_PER_Q;
1761		}
1762	}
1763	adv_put_ready_queue(adv, scsiq, q_no);
1764}
1765
1766static void
1767adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1768		    u_int q_no)
1769{
1770	struct		adv_target_transinfo* tinfo;
1771	u_int		q_addr;
1772	u_int		tid_no;
1773
1774	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1775	tinfo = &adv->tinfo[tid_no];
1776	if (tinfo->current.period != tinfo->goal.period) {
1777
1778		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1779		scsiq->q1.cntl |= QC_MSG_OUT;
1780	}
1781	q_addr = ADV_QNO_TO_QADDR(q_no);
1782
1783	scsiq->q1.status = QS_FREE;
1784
1785	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1786				(u_int16_t *)scsiq->cdbptr,
1787				scsiq->q2.cdb_len >> 1);
1788
1789#if BYTE_ORDER == BIG_ENDIAN
1790	adv_adj_scsiq_endian(scsiq);
1791#endif
1792
1793	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1794		      (u_int16_t *) &scsiq->q1.cntl,
1795		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1796
1797#if CC_WRITE_IO_COUNT
1798	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1799			  adv->req_count);
1800#endif
1801
1802#if CC_CLEAR_DMA_REMAIN
1803
1804	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1805	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1806#endif
1807
1808	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1809			  (scsiq->q1.q_no << 8) | QS_READY);
1810}
1811
1812static void
1813adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1814	      u_int16_t *buffer, int words)
1815{
1816	int	i;
1817
1818	/*
1819	 * XXX This routine makes *gross* assumptions
1820	 * about padding in the data structures.
1821	 * Either the data structures should have explicit
1822	 * padding members added, or they should have padding
1823	 * turned off via compiler attributes depending on
1824	 * which yields better overall performance.  My hunch
1825	 * would be that turning off padding would be the
1826	 * faster approach as an outsw is much faster than
1827	 * this crude loop and accessing un-aligned data
1828	 * members isn't *that* expensive.  The other choice
1829	 * would be to modify the ASC script so that the
1830	 * the adv_scsiq_1 structure can be re-arranged so
1831	 * padding isn't required.
1832	 */
1833	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1834	for (i = 0; i < words; i++, buffer++) {
1835		if (i == 2 || i == 10) {
1836			continue;
1837		}
1838		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1839	}
1840}
1841
1842static void
1843adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1844		     u_int8_t q_cntl, target_bit_vector target_mask,
1845		     int tid_no)
1846{
1847	struct	ext_msg ext_msg;
1848
1849	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1850			       sizeof(ext_msg) >> 1);
1851	if ((ext_msg.msg_type == MSG_EXTENDED)
1852	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1853	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1854		union	 ccb *ccb;
1855		struct	 adv_target_transinfo* tinfo;
1856		u_int	 period;
1857		u_int	 offset;
1858		int	 sdtr_accept;
1859		u_int8_t orig_offset;
1860
1861		ccb = (union ccb *) adv_read_lram_32(adv, halt_q_addr
1862							 + ADV_SCSIQ_D_CCBPTR);
1863		tinfo = &adv->tinfo[tid_no];
1864		sdtr_accept = TRUE;
1865
1866		orig_offset = ext_msg.req_ack_offset;
1867		if (ext_msg.xfer_period < tinfo->goal.period) {
1868                	sdtr_accept = FALSE;
1869			ext_msg.xfer_period = tinfo->goal.period;
1870		}
1871
1872		/* Perform range checking */
1873		period = ext_msg.xfer_period;
1874		offset = ext_msg.req_ack_offset;
1875		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1876		ext_msg.xfer_period = period;
1877		ext_msg.req_ack_offset = offset;
1878
1879		/* Record our current sync settings */
1880		adv_set_syncrate(adv, ccb->ccb_h.path,
1881				 tid_no, ext_msg.xfer_period,
1882				 ext_msg.req_ack_offset,
1883				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1884
1885		/* Offset too high or large period forced async */
1886		if (orig_offset != ext_msg.req_ack_offset)
1887			sdtr_accept = FALSE;
1888
1889		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1890			/* Valid response to our requested negotiation */
1891			q_cntl &= ~QC_MSG_OUT;
1892		} else {
1893			/* Must Respond */
1894			q_cntl |= QC_MSG_OUT;
1895			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1896					ext_msg.req_ack_offset);
1897		}
1898
1899	} else if (ext_msg.msg_type == MSG_EXTENDED
1900		&& ext_msg.msg_req == MSG_EXT_WDTR
1901		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1902
1903		ext_msg.wdtr_width = 0;
1904		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1905					(u_int16_t *)&ext_msg,
1906					sizeof(ext_msg) >> 1);
1907		q_cntl |= QC_MSG_OUT;
1908        } else {
1909
1910		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1911		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1912					(u_int16_t *)&ext_msg,
1913					sizeof(ext_msg) >> 1);
1914		q_cntl |= QC_MSG_OUT;
1915        }
1916	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1917}
1918
1919static void
1920adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1921		u_int8_t sdtr_offset)
1922{
1923	struct	 ext_msg sdtr_buf;
1924
1925	sdtr_buf.msg_type = MSG_EXTENDED;
1926	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1927	sdtr_buf.msg_req = MSG_EXT_SDTR;
1928	sdtr_buf.xfer_period = sdtr_period;
1929	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1930	sdtr_buf.req_ack_offset = sdtr_offset;
1931	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1932				(u_int16_t *) &sdtr_buf,
1933				sizeof(sdtr_buf) / 2);
1934}
1935
1936int
1937adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1938	      u_int32_t status, int queued_only)
1939{
1940	u_int16_t q_addr;
1941	u_int8_t  q_no;
1942	struct adv_q_done_info scsiq_buf;
1943	struct adv_q_done_info *scsiq;
1944	u_int8_t  target_ix;
1945	int	  count;
1946
1947	scsiq = &scsiq_buf;
1948	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1949	count = 0;
1950	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1951		q_addr = ADV_QNO_TO_QADDR(q_no);
1952
1953		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1954		if (((scsiq->q_status & QS_READY) != 0)
1955		 && ((scsiq->q_status & QS_ABORTED) == 0)
1956		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1957		 && (scsiq->d2.target_ix == target_ix)
1958		 && (queued_only == 0
1959		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1960		 && (ccb == NULL || (ccb == (union ccb *)scsiq->d2.ccb_ptr))) {
1961			union ccb *aborted_ccb;
1962			struct adv_ccb_info *cinfo;
1963
1964			scsiq->q_status |= QS_ABORTED;
1965			scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
1966			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1967					 scsiq->q_status);
1968			aborted_ccb = (union ccb *)scsiq->d2.ccb_ptr;
1969			/* Don't clobber earlier error codes */
1970			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1971			  == CAM_REQ_INPROG)
1972				aborted_ccb->ccb_h.status |= status;
1973			cinfo = (struct adv_ccb_info *)
1974			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
1975			cinfo->state |= ACCB_ABORT_QUEUED;
1976			count++;
1977		}
1978	}
1979	return (count);
1980}
1981
1982int
1983adv_reset_bus(struct adv_softc *adv)
1984{
1985	int count;
1986	int i;
1987	union ccb *ccb;
1988
1989	adv_reset_chip_and_scsi_bus(adv);
1990	adv_reinit_lram(adv);
1991	for (i = 0; i <= ADV_MAX_TID; i++) {
1992		if (adv->fix_asyn_xfer & (0x01 << i))
1993			adv_set_sdtr_reg_at_id(adv, i,
1994					       ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1995        }
1996	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1997
1998	/* Tell the XPT layer that a bus reset occured */
1999	if (adv->path != NULL)
2000		xpt_async(AC_BUS_RESET, adv->path, NULL);
2001
2002	count = 0;
2003	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2004		struct	adv_ccb_info *cinfo;
2005
2006		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2007			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2008		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2009		count++;
2010	}
2011
2012	adv_start_chip(adv);
2013	return (count);
2014}
2015
2016static void
2017adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2018{
2019	int orig_id;
2020
2021    	adv_set_bank(adv, 1);
2022    	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2023    	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2024	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2025		adv_set_bank(adv, 0);
2026		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2027	}
2028    	adv_set_bank(adv, 1);
2029    	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2030	adv_set_bank(adv, 0);
2031}
2032