advlib.c revision 21673
1/* 2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips 3 * 4 * Copyright (c) 1996 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: head/sys/dev/advansys/advlib.c 21673 1997-01-14 07:20:47Z jkh $ 32 */ 33/* 34 * Ported from: 35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 36 * 37 * Copyright (c) 1995-1996 Advanced System Products, Inc. 38 * All Rights Reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that redistributions of source 42 * code retain the above copyright notice and this comment without 43 * modification. 44 */ 45 46#include <sys/param.h> 47#include <sys/systm.h> 48 49#include <machine/clock.h> 50 51#include <scsi/scsi_all.h> 52#include <scsi/scsi_message.h> 53#include <scsi/scsi_disk.h> 54 55#include <vm/vm.h> 56#include <vm/vm_param.h> 57#include <vm/pmap.h> 58 59#include <dev/advansys/advlib.h> 60#include <dev/advansys/advmcode.h> 61 62/* 63 * Allowable periods in ns 64 */ 65u_int8_t adv_sdtr_period_tbl[] = 66{ 67 25, 68 30, 69 35, 70 40, 71 50, 72 60, 73 70, 74 85 75}; 76 77struct sdtr_xmsg { 78 u_int8_t msg_type; 79 u_int8_t msg_len; 80 u_int8_t msg_req; 81 u_int8_t xfer_period; 82 u_int8_t req_ack_offset; 83 u_int8_t res; 84}; 85 86/* 87 * Some of the early PCI adapters have problems with 88 * async transfers. Instead try to use an offset of 89 * 1. 90 */ 91#define ASYN_SDTR_DATA_FIX 0x41 92 93/* LRAM routines */ 94static void adv_read_lram_16_multi __P((struct adv_softc *adv, u_int16_t s_addr, 95 u_int16_t *buffer, int count)); 96static void adv_write_lram_16_multi __P((struct adv_softc *adv, 97 u_int16_t s_addr, u_int16_t *buffer, 98 int count)); 99static void adv_mset_lram_16 __P((struct adv_softc *adv, 100 u_int16_t s_addr, u_int16_t set_value, 101 int count)); 102static u_int32_t adv_msum_lram_16 __P((struct adv_softc *adv, u_int16_t s_addr, int count)); 103 104static int adv_write_and_verify_lram_16 __P((struct adv_softc *adv, 105 u_int16_t addr, u_int16_t value)); 106static u_int32_t adv_read_lram_32 __P((struct adv_softc *adv, u_int16_t addr)); 107 108 109static void adv_write_lram_32 __P((struct adv_softc *adv, u_int16_t addr, 110 u_int32_t value)); 111static void adv_write_lram_32_multi __P((struct adv_softc *adv, u_int16_t s_addr, 112 u_int32_t *buffer, int count)); 113 114/* EEPROM routines */ 115static u_int16_t adv_read_eeprom_16 __P((struct adv_softc *adv, u_int8_t addr)); 116static u_int16_t adv_write_eeprom_16 __P((struct adv_softc *adv, u_int8_t addr, u_int16_t value)); 117static int adv_write_eeprom_cmd_reg __P((struct adv_softc *adv, u_int8_t cmd_reg)); 118static int adv_set_eeprom_config_once __P((struct adv_softc *adv, 119 struct adv_eeprom_config *eeprom_config)); 120 121/* Initialization */ 122static u_int32_t adv_load_microcode __P((struct adv_softc *adv, 123 u_int16_t s_addr, u_int16_t *mcode_buf, u_int16_t mcode_size)); 124static void adv_init_lram __P((struct adv_softc *adv)); 125static int adv_init_microcode_var __P((struct adv_softc *adv)); 126static void adv_init_qlink_var __P((struct adv_softc *adv)); 127 128/* Interrupts */ 129static void adv_disable_interrupt __P((struct adv_softc *adv)); 130static void adv_enable_interrupt __P((struct adv_softc *adv)); 131static void adv_toggle_irq_act __P((struct adv_softc *adv)); 132 133/* Chip Control */ 134#if UNUSED 135static void adv_start_execution __P((struct adv_softc *adv)); 136#endif 137static int adv_start_chip __P((struct adv_softc *adv)); 138static int adv_stop_chip __P((struct adv_softc *adv)); 139static void adv_set_chip_ih __P((struct adv_softc *adv, 140 u_int16_t ins_code)); 141static void adv_set_bank __P((struct adv_softc *adv, u_int8_t bank)); 142#if UNUSED 143static u_int8_t adv_get_chip_scsi_ctrl __P((struct adv_softc *adv)); 144#endif 145 146/* Queue handling and execution */ 147static int adv_sgcount_to_qcount __P((int sgcount)); 148static void adv_get_q_info __P((struct adv_softc *adv, u_int16_t s_addr, u_int16_t *inbuf, 149 int words)); 150static u_int adv_get_num_free_queues __P((struct adv_softc *adv, 151 u_int8_t n_qs)); 152static u_int8_t adv_alloc_free_queues __P((struct adv_softc *adv, 153 u_int8_t free_q_head, 154 u_int8_t n_free_q)); 155static u_int8_t adv_alloc_free_queue __P((struct adv_softc *adv, 156 u_int8_t free_q_head)); 157static int adv_send_scsi_queue __P((struct adv_softc *adv, 158 struct adv_scsi_q *scsiq, 159 u_int8_t n_q_required)); 160static void adv_put_ready_sg_list_queue __P((struct adv_softc *adv, 161 struct adv_scsi_q *scsiq, 162 u_int8_t q_no)); 163static void adv_put_ready_queue __P((struct adv_softc *adv, 164 struct adv_scsi_q *scsiq, 165 u_int8_t q_no)); 166static void adv_put_scsiq __P((struct adv_softc *adv, u_int16_t s_addr, 167 u_int16_t *buffer, int words)); 168 169/* SDTR */ 170static u_int8_t adv_msgout_sdtr __P((struct adv_softc *adv, 171 u_int8_t sdtr_period, 172 u_int8_t sdtr_offset)); 173static u_int8_t adv_get_card_sync_setting __P((u_int8_t period, 174 u_int8_t offset)); 175static void adv_set_chip_sdtr __P((struct adv_softc *adv, 176 u_int8_t sdtr_data, 177 u_int8_t tid_no)); 178 179 180/* Exported functions first */ 181 182u_int8_t 183adv_read_lram_8(adv, addr) 184 struct adv_softc *adv; 185 u_int16_t addr; 186 187{ 188 u_int8_t byte_data; 189 u_int16_t word_data; 190 191 /* 192 * LRAM is accessed on 16bit boundaries. 193 */ 194 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE); 195 word_data = ADV_INW(adv, ADV_LRAM_DATA); 196 if (addr & 1) { 197#if BYTE_ORDER == BIG_ENDIAN 198 byte_data = (u_int8_t)(word_data & 0xFF); 199#else 200 byte_data = (u_int8_t)((word_data >> 8) & 0xFF); 201#endif 202 } else { 203#if BYTE_ORDER == BIG_ENDIAN 204 byte_data = (u_int8_t)((word_data >> 8) & 0xFF); 205#else 206 byte_data = (u_int8_t)(word_data & 0xFF); 207#endif 208 } 209 return (byte_data); 210} 211 212void 213adv_write_lram_8(adv, addr, value) 214 struct adv_softc *adv; 215 u_int16_t addr; 216 u_int8_t value; 217{ 218 u_int16_t word_data; 219 220 word_data = adv_read_lram_16(adv, addr & 0xFFFE); 221 if (addr & 1) { 222 word_data &= 0x00FF; 223 word_data |= (((u_int8_t)value << 8) & 0xFF00); 224 } else { 225 word_data &= 0xFF00; 226 word_data |= ((u_int8_t)value & 0x00FF); 227 } 228 adv_write_lram_16(adv, addr & 0xFFFE, word_data); 229} 230 231 232u_int16_t 233adv_read_lram_16(adv, addr) 234 struct adv_softc *adv; 235 u_int16_t addr; 236{ 237 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 238 return (ADV_INW(adv, ADV_LRAM_DATA)); 239} 240 241void 242adv_write_lram_16(adv, addr, value) 243 struct adv_softc *adv; 244 u_int16_t addr; 245 u_int16_t value; 246{ 247 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 248 ADV_OUTW(adv, ADV_LRAM_DATA, value); 249} 250 251 252/* 253 * Return the fully qualified board type for the adapter. 254 * The chip_revision must be set before this function is called. 255 */ 256void 257adv_get_board_type(adv) 258 struct adv_softc *adv; 259{ 260 if ((adv->chip_version >= ADV_CHIP_MIN_VER_VL) && 261 (adv->chip_version <= ADV_CHIP_MAX_VER_VL)) { 262 if (((adv->iobase & 0x0C30) == 0x0C30) || 263 ((adv->iobase & 0x0C50) == 0x0C50)) { 264 adv->type = ADV_EISA; 265 } else 266 adv->type = ADV_VL; 267 } else if ((adv->chip_version >= ADV_CHIP_MIN_VER_ISA) && 268 (adv->chip_version <= ADV_CHIP_MAX_VER_ISA)) { 269 if (adv->chip_version >= ADV_CHIP_MIN_VER_ISA_PNP) { 270 adv->type = ADV_ISAPNP; 271 } else 272 adv->type = ADV_ISA; 273 } else if ((adv->chip_version >= ADV_CHIP_MIN_VER_PCI) && 274 (adv->chip_version <= ADV_CHIP_MAX_VER_PCI)) { 275 adv->type = ADV_PCI; 276 } else 277 panic("adv_get_board_type: Unknown board type encountered"); 278} 279 280u_int16_t 281adv_get_eeprom_config(adv, eeprom_config) 282 struct adv_softc *adv; 283 struct adv_eeprom_config *eeprom_config; 284{ 285 u_int16_t sum; 286 u_int16_t *wbuf; 287 u_int8_t cfg_beg; 288 u_int8_t cfg_end; 289 u_int8_t s_addr; 290 291 wbuf = (u_int16_t *)eeprom_config; 292 sum = 0; 293 294 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { 295 *wbuf = adv_read_eeprom_16(adv, s_addr); 296 sum += *wbuf; 297 } 298 299 if (adv->type & ADV_VL) { 300 cfg_beg = ADV_EEPROM_CFG_BEG_VL; 301 cfg_end = ADV_EEPROM_MAX_ADDR_VL; 302 } else { 303 cfg_beg = ADV_EEPROM_CFG_BEG; 304 cfg_end = ADV_EEPROM_MAX_ADDR; 305 } 306 307 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { 308 *wbuf = adv_read_eeprom_16(adv, s_addr); 309 sum += *wbuf; 310#if ADV_DEBUG_EEPROM 311 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf); 312#endif 313 } 314 *wbuf = adv_read_eeprom_16(adv, s_addr); 315 return (sum); 316} 317 318int 319adv_set_eeprom_config(adv, eeprom_config) 320 struct adv_softc *adv; 321 struct adv_eeprom_config *eeprom_config; 322{ 323 int retry; 324 325 retry = 0; 326 while (1) { 327 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) { 328 break; 329 } 330 if (++retry > ADV_EEPROM_MAX_RETRY) { 331 break; 332 } 333 } 334 return (retry > ADV_EEPROM_MAX_RETRY); 335} 336 337int 338adv_reset_chip_and_scsi_bus(adv) 339 struct adv_softc *adv; 340{ 341 adv_stop_chip(adv); 342 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_SCSI_RESET | ADV_CC_HALT); 343 DELAY(200 * 1000); 344 345 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM); 346 adv_set_chip_ih(adv, ADV_INS_HALT); 347 348 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT); 349 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); 350 DELAY(200 * 1000); 351 return (adv_is_chip_halted(adv)); 352} 353 354int 355adv_test_external_lram(adv) 356 struct adv_softc* adv; 357{ 358 u_int16_t q_addr; 359 u_int16_t saved_value; 360 int success; 361 362 success = 0; 363 364 /* XXX Why 241? */ 365 q_addr = ADV_QNO_TO_QADDR(241); 366 saved_value = adv_read_lram_16(adv, q_addr); 367 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) { 368 success = 1; 369 adv_write_lram_16(adv, q_addr, saved_value); 370 } 371 return (success); 372} 373 374 375int 376adv_init_lram_and_mcode(adv) 377 struct adv_softc *adv; 378{ 379 u_int32_t retval; 380 adv_disable_interrupt(adv); 381 382 adv_init_lram(adv); 383 384 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode, adv_mcode_size); 385 if (retval != adv_mcode_chksum) { 386 printf("adv%d: Microcode download failed checksum!\n", 387 adv->unit); 388 return (1); 389 } 390 391 if (adv_init_microcode_var(adv) != 0) 392 return (1); 393 394 adv_enable_interrupt(adv); 395 return (0); 396} 397 398u_int8_t 399adv_get_chip_irq(adv) 400 struct adv_softc *adv; 401{ 402 u_int16_t cfg_lsw; 403 u_int8_t chip_irq; 404 405 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 406 407 if ((adv->type & ADV_VL) != 0) { 408 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07)); 409 if ((chip_irq == 0) || 410 (chip_irq == 4) || 411 (chip_irq == 7)) { 412 return (0); 413 } 414 return (chip_irq + (ADV_MIN_IRQ_NO - 1)); 415 } 416 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03)); 417 if (chip_irq == 3) 418 chip_irq += 2; 419 return (chip_irq + ADV_MIN_IRQ_NO); 420} 421 422u_int8_t 423adv_set_chip_irq(adv, irq_no) 424 struct adv_softc *adv; 425 u_int8_t irq_no; 426{ 427 u_int16_t cfg_lsw; 428 429 if ((adv->type & ADV_VL) != 0) { 430 if (irq_no != 0) { 431 if ((irq_no < ADV_MIN_IRQ_NO) || (irq_no > ADV_MAX_IRQ_NO)) { 432 irq_no = 0; 433 } else { 434 irq_no -= ADV_MIN_IRQ_NO - 1; 435 } 436 } 437 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3; 438 cfg_lsw |= 0x0010; 439 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); 440 adv_toggle_irq_act(adv); 441 442 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0; 443 cfg_lsw |= (irq_no & 0x07) << 2; 444 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); 445 adv_toggle_irq_act(adv); 446 } else if ((adv->type & ADV_ISA) != 0) { 447 if (irq_no == 15) 448 irq_no -= 2; 449 irq_no -= ADV_MIN_IRQ_NO; 450 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3; 451 cfg_lsw |= (irq_no & 0x03) << 2; 452 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); 453 } 454 return (adv_get_chip_irq(adv)); 455} 456 457int 458adv_execute_scsi_queue(adv, scsiq) 459 struct adv_softc *adv; 460 struct adv_scsi_q *scsiq; 461{ 462 int retval; 463 u_int n_q_required; 464 int s; 465 u_int32_t addr; 466 u_int8_t sg_entry_cnt; 467 u_int8_t target_ix; 468 u_int8_t sg_entry_cnt_minus_one; 469 u_int8_t tid_no; 470 u_int8_t sdtr_data; 471 u_int32_t *p_data_addr; 472 u_int32_t *p_data_bcount; 473 474 scsiq->q1.q_no = 0; 475 retval = 1; /* Default to error case */ 476 target_ix = scsiq->q2.target_ix; 477 tid_no = ADV_TIX_TO_TID(target_ix); 478 479 n_q_required = 1; 480 481 s = splbio(); 482 if (scsiq->cdbptr->opcode == REQUEST_SENSE) { 483 if (((adv->initiate_sdtr & scsiq->q1.target_id) != 0) 484 && ((adv->sdtr_done & scsiq->q1.target_id) != 0)) { 485 int sdtr_index; 486 487 sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no); 488 sdtr_index = (sdtr_data >> 4); 489 adv_msgout_sdtr(adv, adv_sdtr_period_tbl[sdtr_index], 490 (sdtr_data & ADV_SYN_MAX_OFFSET)); 491 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); 492 } 493 } 494 495 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { 496 sg_entry_cnt = scsiq->sg_head->entry_cnt; 497 sg_entry_cnt_minus_one = sg_entry_cnt - 1; 498 499#ifdef DIAGNOSTIC 500 if (sg_entry_cnt <= 1) 501 panic("adv_execute_scsi_queue: Queue with QC_SG_HEAD set but %d segs.", sg_entry_cnt); 502 503 if (sg_entry_cnt > ADV_MAX_SG_LIST) 504 panic("adv_execute_scsi_queue: Queue with too many segs."); 505 506 if (adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) { 507 for (i = 0; i < sg_entry_cnt_minus_one; i++) { 508 addr = scsiq->sg_head->sg_list[i].addr + 509 scsiq->sg_head->sg_list[i].bytes; 510 511 if ((addr & 0x0003) != 0) 512 panic("adv_execute_scsi_queue: SG with odd address or byte count"); 513 } 514 } 515#endif 516 p_data_addr = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr; 517 p_data_bcount = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes; 518 519 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt); 520 scsiq->sg_head->queue_cnt = n_q_required - 1; 521 } else { 522 p_data_addr = &scsiq->q1.data_addr; 523 p_data_bcount = &scsiq->q1.data_cnt; 524 n_q_required = 1; 525 } 526 527 if (adv->bug_fix_control & ADV_BUG_FIX_ADD_ONE_BYTE) { 528 addr = *p_data_addr + *p_data_bcount; 529 if ((addr & 0x0003) != 0) { 530 /* 531 * XXX Is this extra test (the one on data_cnt) really only supposed to apply 532 * to the non SG case or was it a bug due to code duplication? 533 */ 534 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0 || (scsiq->q1.data_cnt & 0x01FF) == 0) { 535 if ((scsiq->cdbptr->opcode == READ_COMMAND) || 536 (scsiq->cdbptr->opcode == READ_BIG)) { 537 if ((scsiq->q2.tag_code & ADV_TAG_FLAG_ADD_ONE_BYTE) == 0) { 538 (*p_data_bcount)++; 539 scsiq->q2.tag_code |= ADV_TAG_FLAG_ADD_ONE_BYTE; 540 } 541 } 542 543 } 544 } 545 } 546 547 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required) 548 || ((scsiq->q1.cntl & QC_URGENT) != 0)) 549 retval = adv_send_scsi_queue(adv, scsiq, n_q_required); 550 551 splx(s); 552 return (retval); 553} 554 555 556u_int8_t 557adv_copy_lram_doneq(adv, q_addr, scsiq, max_dma_count) 558 struct adv_softc *adv; 559 u_int16_t q_addr; 560 struct adv_q_done_info *scsiq; 561 u_int32_t max_dma_count; 562{ 563 u_int16_t val; 564 u_int8_t sg_queue_cnt; 565 566 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG, 567 (u_int16_t *)scsiq, 568 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2); 569 570#if BYTE_ORDER == BIG_ENDIAN 571 adv_adj_endian_qdone_info(scsiq); 572#endif 573 574 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS); 575 scsiq->q_status = val & 0xFF; 576 scsiq->q_no = (val >> 8) & 0XFF; 577 578 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL); 579 scsiq->cntl = val & 0xFF; 580 sg_queue_cnt = (val >> 8) & 0xFF; 581 582 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN); 583 scsiq->sense_len = val & 0xFF; 584 scsiq->user_def = (val >> 8) & 0xFF; 585 586 scsiq->remain_bytes = adv_read_lram_32(adv, 587 q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT); 588 /* 589 * XXX Is this just a safeguard or will the counter really 590 * have bogus upper bits? 591 */ 592 scsiq->remain_bytes &= max_dma_count; 593 594 return (sg_queue_cnt); 595} 596 597int 598adv_stop_execution(adv) 599 struct adv_softc *adv; 600{ 601 int count; 602 603 count = 0; 604 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) { 605 adv_write_lram_8(adv, ADV_STOP_CODE_B, 606 ADV_STOP_REQ_RISC_STOP); 607 do { 608 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) & 609 ADV_STOP_ACK_RISC_STOP) { 610 return (1); 611 } 612 DELAY(1000); 613 } while (count++ < 20); 614 } 615 return (0); 616} 617 618int 619adv_is_chip_halted(adv) 620 struct adv_softc *adv; 621{ 622 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) { 623 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) { 624 return (1); 625 } 626 } 627 return (0); 628} 629 630/* 631 * XXX The numeric constants and the loops in this routine 632 * need to be documented. 633 */ 634void 635adv_ack_interrupt(adv) 636 struct adv_softc *adv; 637{ 638 u_int8_t host_flag; 639 u_int8_t risc_flag; 640 int loop; 641 642 loop = 0; 643 do { 644 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B); 645 if (loop++ > 0x7FFF) { 646 break; 647 } 648 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0); 649 650 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 651 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 652 host_flag | ADV_HOST_FLAG_ACK_INT); 653 654 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK); 655 loop = 0; 656 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) { 657 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK); 658 if (loop++ > 3) { 659 break; 660 } 661 } 662 663 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 664} 665 666/* 667 * Handle all conditions that may halt the chip waiting 668 * for us to intervene. 669 */ 670void 671adv_isr_chip_halted(adv) 672 struct adv_softc *adv; 673{ 674 u_int16_t int_halt_code; 675 u_int8_t halt_qp; 676 u_int16_t halt_q_addr; 677 u_int8_t target_ix; 678 u_int8_t q_cntl; 679 u_int8_t tid_no; 680 target_bit_vector target_id; 681 target_bit_vector scsi_busy; 682 u_int8_t asyn_sdtr; 683 u_int8_t sdtr_data; 684 685 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W); 686 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B); 687 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp); 688 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX); 689 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL); 690 tid_no = ADV_TIX_TO_TID(target_ix); 691 target_id = ADV_TID_TO_TARGET_ID(tid_no); 692 if (adv->needs_async_bug_fix & target_id) 693 asyn_sdtr = ASYN_SDTR_DATA_FIX; 694 else 695 asyn_sdtr = 0; 696 if (int_halt_code == ADV_HALT_EXTMSG_IN) { 697 struct sdtr_xmsg sdtr_xmsg; 698 int sdtr_accept; 699 700 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, 701 (u_int16_t *) &sdtr_xmsg, 702 sizeof(sdtr_xmsg) >> 1); 703 if ((sdtr_xmsg.msg_type == MSG_EXTENDED) && 704 (sdtr_xmsg.msg_len == MSG_EXT_SDTR_LEN)) { 705 sdtr_accept = TRUE; 706 if (sdtr_xmsg.msg_req == MSG_EXT_SDTR) { 707 if (sdtr_xmsg.req_ack_offset > ADV_SYN_MAX_OFFSET) { 708 709 sdtr_accept = FALSE; 710 sdtr_xmsg.req_ack_offset = ADV_SYN_MAX_OFFSET; 711 } 712 sdtr_data = adv_get_card_sync_setting(sdtr_xmsg.xfer_period, 713 sdtr_xmsg.req_ack_offset); 714 if (sdtr_xmsg.req_ack_offset == 0) { 715 q_cntl &= ~QC_MSG_OUT; 716 adv->initiate_sdtr &= ~target_id; 717 adv->sdtr_done &= ~target_id; 718 adv_set_chip_sdtr(adv, asyn_sdtr, tid_no); 719 } else if (sdtr_data == 0) { 720 q_cntl |= QC_MSG_OUT; 721 adv->initiate_sdtr &= ~target_id; 722 adv->sdtr_done &= ~target_id; 723 adv_set_chip_sdtr(adv, asyn_sdtr, tid_no); 724 } else { 725 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { 726 q_cntl &= ~QC_MSG_OUT; 727 adv->sdtr_done |= target_id; 728 adv->initiate_sdtr |= target_id; 729 adv->needs_async_bug_fix &= ~target_id; 730 adv_set_chip_sdtr(adv, sdtr_data, tid_no); 731 } else { 732 733 q_cntl |= QC_MSG_OUT; 734 735 adv_msgout_sdtr(adv, 736 sdtr_xmsg.xfer_period, 737 sdtr_xmsg.req_ack_offset); 738 adv->needs_async_bug_fix &= ~target_id; 739 adv_set_chip_sdtr(adv, sdtr_data, tid_no); 740 adv->sdtr_done |= target_id; 741 adv->initiate_sdtr |= target_id; 742 } 743 } 744 745 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); 746 } 747 } 748 /* 749 * XXX Hey, shouldn't we be rejecting any messages we don't understand? 750 * The old code also did not un-halt the processor if it recieved 751 * an extended message that it didn't understand. That didn't 752 * seem right, so I changed this routine to always un-halt the 753 * processor at the end. 754 */ 755 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) { 756 u_int8_t tag_code; 757 u_int8_t q_status; 758 759 q_cntl |= QC_REQ_SENSE; 760 if (((adv->initiate_sdtr & target_id) != 0) && 761 ((adv->sdtr_done & target_id) != 0)) { 762 763 sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no); 764 /* XXX Macrotize the extraction of the index from sdtr_data ??? */ 765 adv_msgout_sdtr(adv, adv_sdtr_period_tbl[(sdtr_data >> 4) & 0x0F], 766 sdtr_data & ADV_SYN_MAX_OFFSET); 767 q_cntl |= QC_MSG_OUT; 768 } 769 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); 770 771 /* Don't tag request sense commands */ 772 tag_code = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE); 773 tag_code &= ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG); 774 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE, tag_code); 775 776 q_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS); 777 q_status |= (QS_READY | QS_BUSY); 778 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS, q_status); 779 780 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B); 781 scsi_busy &= ~target_id; 782 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy); 783 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) { 784 struct sdtr_xmsg out_msg; 785 786 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG, 787 (u_int16_t *) &out_msg, 788 sizeof(out_msg)/2); 789 790 if ((out_msg.msg_type == MSG_EXTENDED) && 791 (out_msg.msg_len == MSG_EXT_SDTR_LEN) && 792 (out_msg.msg_req == MSG_EXT_SDTR)) { 793 794 adv->initiate_sdtr &= ~target_id; 795 adv->sdtr_done &= ~target_id; 796 adv_set_chip_sdtr(adv, asyn_sdtr, tid_no); 797 } 798 q_cntl &= ~QC_MSG_OUT; 799 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); 800 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) { 801 u_int8_t cur_dvc_qng; 802 u_int8_t scsi_status; 803 804 /* 805 * XXX It would be nice if we could push the responsibility for handling 806 * this situation onto the generic SCSI layer as other drivers do. 807 * This would be done by completing the command with the status byte 808 * set to QUEUE_FULL, whereupon it will request that any transactions 809 * pending on the target that where scheduled after this one be aborted 810 * (so as to maintain queue ordering) and the number of requests the 811 * upper level will attempt to send this target will be reduced. 812 * 813 * With this current strategy, am I guaranteed that once I unbusy the 814 * target the queued up transactions will be sent in the order they 815 * were queued? If the ASC chip does a round-robin on all queued 816 * transactions looking for queues to run, the order is not guaranteed. 817 */ 818 scsi_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_SCSI_STATUS); 819 cur_dvc_qng = adv_read_lram_8(adv, ADV_QADR_BEG + target_ix); 820 printf("adv%d: Queue full - target %d, active transactions %d\n", adv->unit, 821 tid_no, cur_dvc_qng); 822#if 0 823 /* XXX FIX LATER */ 824 if ((cur_dvc_qng > 0) && (adv->cur_dvc_qng[tid_no] > 0)) { 825 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B); 826 scsi_busy |= target_id; 827 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy); 828 asc_dvc->queue_full_or_busy |= target_id; 829 830 if (scsi_status == SS_QUEUE_FULL) { 831 if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) { 832 cur_dvc_qng -= 1; 833 asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng; 834 835 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + tid_no, 836 cur_dvc_qng); 837 } 838 } 839 } 840#endif 841 } 842 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0); 843} 844 845/* Internal Routines */ 846 847static void 848adv_read_lram_16_multi(adv, s_addr, buffer, count) 849 struct adv_softc *adv; 850 u_int16_t s_addr; 851 u_int16_t *buffer; 852 int count; 853{ 854 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 855 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count); 856} 857 858static void 859adv_write_lram_16_multi(adv, s_addr, buffer, count) 860 struct adv_softc *adv; 861 u_int16_t s_addr; 862 u_int16_t *buffer; 863 int count; 864{ 865 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 866 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count); 867} 868 869static void 870adv_mset_lram_16(adv, s_addr, set_value, count) 871 struct adv_softc *adv; 872 u_int16_t s_addr; 873 u_int16_t set_value; 874 int count; 875{ 876 int i; 877 878 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 879 for (i = 0; i < count; i++) 880 ADV_OUTW(adv, ADV_LRAM_DATA, set_value); 881} 882 883static u_int32_t 884adv_msum_lram_16(adv, s_addr, count) 885 struct adv_softc *adv; 886 u_int16_t s_addr; 887 int count; 888{ 889 u_int32_t sum; 890 int i; 891 892 sum = 0; 893 for (i = 0; i < count; i++, s_addr += 2) 894 sum += adv_read_lram_16(adv, s_addr); 895 return (sum); 896} 897 898static int 899adv_write_and_verify_lram_16(adv, addr, value) 900 struct adv_softc *adv; 901 u_int16_t addr; 902 u_int16_t value; 903{ 904 int retval; 905 906 retval = 0; 907 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 908 ADV_OUTW(adv, ADV_LRAM_DATA, value); 909 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 910 if (value != ADV_INW(adv, ADV_LRAM_DATA)) 911 retval = 1; 912 return (retval); 913} 914 915static u_int32_t 916adv_read_lram_32(adv, addr) 917 struct adv_softc *adv; 918 u_int16_t addr; 919{ 920 u_int16_t val_low, val_high; 921 922 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 923 924#if BYTE_ORDER == BIG_ENDIAN 925 val_high = ADV_INW(adv, ADV_LRAM_DATA); 926 val_low = ADV_INW(adv, ADV_LRAM_DATA); 927#else 928 val_low = ADV_INW(adv, ADV_LRAM_DATA); 929 val_high = ADV_INW(adv, ADV_LRAM_DATA); 930#endif 931 932 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low); 933} 934 935static void 936adv_write_lram_32(adv, addr, value) 937 struct adv_softc *adv; 938 u_int16_t addr; 939 u_int32_t value; 940{ 941 ADV_OUTW(adv, ADV_LRAM_ADDR, addr); 942 943#if BYTE_ORDER == BIG_ENDIAN 944 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF)); 945 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF)); 946#else 947 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF)); 948 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF)); 949#endif 950} 951 952static void 953adv_write_lram_32_multi(adv, s_addr, buffer, count) 954 struct adv_softc *adv; 955 u_int16_t s_addr; 956 u_int32_t *buffer; 957 int count; 958{ 959 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 960 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count * 2); 961} 962 963static u_int16_t 964adv_read_eeprom_16(adv, addr) 965 struct adv_softc *adv; 966 u_int8_t addr; 967{ 968 u_int16_t read_wval; 969 u_int8_t cmd_reg; 970 971 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE); 972 DELAY(1000); 973 cmd_reg = addr | ADV_EEPROM_CMD_READ; 974 adv_write_eeprom_cmd_reg(adv, cmd_reg); 975 DELAY(1000); 976 read_wval = ADV_INW(adv, ADV_EEPROM_DATA); 977 DELAY(1000); 978 return (read_wval); 979} 980 981static u_int16_t 982adv_write_eeprom_16(adv, addr, value) 983 struct adv_softc *adv; 984 u_int8_t addr; 985 u_int16_t value; 986{ 987 u_int16_t read_value; 988 989 read_value = adv_read_eeprom_16(adv, addr); 990 if (read_value != value) { 991 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE); 992 DELAY(1000); 993 994 ADV_OUTW(adv, ADV_EEPROM_DATA, value); 995 DELAY(1000); 996 997 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr); 998 DELAY(20 * 1000); 999 1000 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE); 1001 DELAY(1000); 1002 read_value = adv_read_eeprom_16(adv, addr); 1003 } 1004 return (read_value); 1005} 1006 1007static int 1008adv_write_eeprom_cmd_reg(adv, cmd_reg) 1009 struct adv_softc *adv; 1010 u_int8_t cmd_reg; 1011{ 1012 u_int8_t read_back; 1013 int retry; 1014 1015 retry = 0; 1016 while (1) { 1017 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg); 1018 DELAY(1000); 1019 read_back = ADV_INB(adv, ADV_EEPROM_CMD); 1020 if (read_back == cmd_reg) { 1021 return (1); 1022 } 1023 if (retry++ > ADV_EEPROM_MAX_RETRY) { 1024 return (0); 1025 } 1026 } 1027} 1028 1029static int 1030adv_set_eeprom_config_once(adv, eeprom_config) 1031 struct adv_softc *adv; 1032 struct adv_eeprom_config *eeprom_config; 1033{ 1034 int n_error; 1035 u_int16_t *wbuf; 1036 u_int16_t sum; 1037 u_int8_t s_addr; 1038 u_int8_t cfg_beg; 1039 u_int8_t cfg_end; 1040 1041 wbuf = (u_int16_t *)eeprom_config; 1042 n_error = 0; 1043 sum = 0; 1044 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { 1045 sum += *wbuf; 1046 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) { 1047 n_error++; 1048 } 1049 } 1050 if (adv->type & ADV_VL) { 1051 cfg_beg = ADV_EEPROM_CFG_BEG_VL; 1052 cfg_end = ADV_EEPROM_MAX_ADDR_VL; 1053 } else { 1054 cfg_beg = ADV_EEPROM_CFG_BEG; 1055 cfg_end = ADV_EEPROM_MAX_ADDR; 1056 } 1057 1058 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { 1059 sum += *wbuf; 1060 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) { 1061 n_error++; 1062 } 1063 } 1064 *wbuf = sum; 1065 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) { 1066 n_error++; 1067 } 1068 wbuf = (u_int16_t *)eeprom_config; 1069 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { 1070 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) { 1071 n_error++; 1072 } 1073 } 1074 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) { 1075 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) { 1076 n_error++; 1077 } 1078 } 1079 return (n_error); 1080} 1081 1082static u_int32_t 1083adv_load_microcode(adv, s_addr, mcode_buf, mcode_size) 1084 struct adv_softc *adv; 1085 u_int16_t s_addr; 1086 u_int16_t *mcode_buf; 1087 u_int16_t mcode_size; 1088{ 1089 u_int32_t chksum; 1090 u_int16_t mcode_lram_size; 1091 u_int16_t mcode_chksum; 1092 1093 mcode_lram_size = mcode_size >> 1; 1094 /* XXX Why zero the memory just before you write the whole thing?? */ 1095 /* adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);*/ 1096 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size); 1097 1098 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size); 1099 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG, 1100 ((mcode_size - s_addr - ADV_CODE_SEC_BEG) >> 1)); 1101 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum); 1102 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size); 1103 return (chksum); 1104} 1105 1106static void 1107adv_init_lram(adv) 1108 struct adv_softc *adv; 1109{ 1110 u_int8_t i; 1111 u_int16_t s_addr; 1112 1113 adv_mset_lram_16(adv, ADV_QADR_BEG, 0, 1114 (u_int16_t)((((int)adv->max_openings + 2 + 1) * 64) >> 1)); 1115 1116 i = ADV_MIN_ACTIVE_QNO; 1117 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE; 1118 1119 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1); 1120 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings); 1121 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); 1122 i++; 1123 s_addr += ADV_QBLK_SIZE; 1124 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) { 1125 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1); 1126 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1); 1127 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); 1128 } 1129 1130 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END); 1131 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1); 1132 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings); 1133 i++; 1134 s_addr += ADV_QBLK_SIZE; 1135 1136 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) { 1137 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i); 1138 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i); 1139 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); 1140 } 1141} 1142 1143static int 1144adv_init_microcode_var(adv) 1145 struct adv_softc *adv; 1146{ 1147 int i; 1148 1149 for (i = 0; i <= ADV_MAX_TID; i++) { 1150 adv_write_lram_8(adv, ADVV_SDTR_DATA_BEG + i, 1151 adv->sdtr_data[i]); 1152 } 1153 1154 adv_init_qlink_var(adv); 1155 1156 /* XXX Again, what about wide busses??? */ 1157 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 1158 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id); 1159 1160 /* What are the extra 8 bytes for?? */ 1161 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, vtophys(&(adv->overrun_buf[0])) + 8); 1162 1163 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE - 8); 1164 1165#if 0 1166 /* If we're going to print anything, RCS ids are more meaningful */ 1167 mcode_date = adv_read_lram_16(adv, ADVV_MC_DATE_W); 1168 mcode_version = adv_read_lram_16(adv, ADVV_MC_VER_W); 1169#endif 1170 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 1171 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 1172 printf("adv%d: Unable to set program counter. Aborting.\n", adv->unit); 1173 return (1); 1174 } 1175 if (adv_start_chip(adv) != 1) { 1176 printf("adv%d: Unable to start on board processor. Aborting.\n", 1177 adv->unit); 1178 return (1); 1179 } 1180 return (0); 1181} 1182 1183static void 1184adv_init_qlink_var(adv) 1185 struct adv_softc *adv; 1186{ 1187 int i; 1188 u_int16_t lram_addr; 1189 1190 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1); 1191 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings); 1192 1193 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1); 1194 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings); 1195 1196 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B, 1197 (u_int8_t)((int) adv->max_openings + 1)); 1198 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B, 1199 (u_int8_t)((int) adv->max_openings + 2)); 1200 1201 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings); 1202 1203 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0); 1204 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0); 1205 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0); 1206 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0); 1207 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0); 1208 1209 adv_write_lram_8(adv, ADVV_CDBCNT_B, 0); 1210 1211 lram_addr = ADV_QADR_BEG; 1212 for (i = 0; i < 32; i++, lram_addr += 2) 1213 adv_write_lram_16(adv, lram_addr, 0); 1214} 1215static void 1216adv_disable_interrupt(adv) 1217 struct adv_softc *adv; 1218{ 1219 u_int16_t cfg; 1220 1221 cfg = ADV_INW(adv, ADV_CONFIG_LSW); 1222 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON); 1223} 1224 1225static void 1226adv_enable_interrupt(adv) 1227 struct adv_softc *adv; 1228{ 1229 u_int16_t cfg; 1230 1231 cfg = ADV_INW(adv, ADV_CONFIG_LSW); 1232 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON); 1233} 1234 1235static void 1236adv_toggle_irq_act(adv) 1237 struct adv_softc *adv; 1238{ 1239 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT); 1240 ADV_OUTW(adv, ADV_CHIP_STATUS, 0); 1241} 1242 1243#if UNUSED 1244static void 1245adv_start_execution(adv) 1246 struct adv_softc *adv; 1247{ 1248 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) { 1249 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0); 1250 } 1251} 1252#endif 1253 1254static int 1255adv_start_chip(adv) 1256 struct adv_softc *adv; 1257{ 1258 ADV_OUTB(adv, ADV_CHIP_CTRL, 0); 1259 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) 1260 return (0); 1261 return (1); 1262} 1263 1264static int 1265adv_stop_chip(adv) 1266 struct adv_softc *adv; 1267{ 1268 u_int8_t cc_val; 1269 1270 cc_val = ADV_INB(adv, ADV_CHIP_CTRL) 1271 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG)); 1272 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT); 1273 adv_set_chip_ih(adv, ADV_INS_HALT); 1274 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM); 1275 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) { 1276 return (0); 1277 } 1278 return (1); 1279} 1280 1281static void 1282adv_set_chip_ih(adv, ins_code) 1283 struct adv_softc *adv; 1284 u_int16_t ins_code; 1285{ 1286 adv_set_bank(adv, 1); 1287 ADV_OUTW(adv, ADV_REG_IH, ins_code); 1288 adv_set_bank(adv, 0); 1289} 1290 1291static void 1292adv_set_bank(adv, bank) 1293 struct adv_softc *adv; 1294 u_int8_t bank; 1295{ 1296 u_int8_t control; 1297 1298 /* 1299 * Start out with the bank reset to 0 1300 */ 1301 control = ADV_INB(adv, ADV_CHIP_CTRL) 1302 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST 1303 | ADV_CC_DIAG | ADV_CC_SCSI_RESET 1304 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE)); 1305 if (bank == 1) { 1306 control |= ADV_CC_BANK_ONE; 1307 } else if (bank == 2) { 1308 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE; 1309 } 1310 ADV_OUTB(adv, ADV_CHIP_CTRL, control); 1311} 1312 1313#if UNUSED 1314static u_int8_t 1315adv_get_chip_scsi_ctrl(adv) 1316 struct adv_softc *adv; 1317{ 1318 u_int8_t scsi_ctrl; 1319 1320 adv_set_bank(adv, 1); 1321 scsi_ctrl = ADV_INB(adv, ADV_REG_SC); 1322 adv_set_bank(adv, 0); 1323 return (scsi_ctrl); 1324} 1325#endif 1326 1327static int 1328adv_sgcount_to_qcount(sgcount) 1329 int sgcount; 1330{ 1331 int n_sg_list_qs; 1332 1333 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q); 1334 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0) 1335 n_sg_list_qs++; 1336 return (n_sg_list_qs + 1); 1337} 1338 1339/* 1340 * XXX Looks like more padding issues in this routine as well. 1341 * There has to be a way to turn this into an insw. 1342 */ 1343static void 1344adv_get_q_info(adv, s_addr, inbuf, words) 1345 struct adv_softc *adv; 1346 u_int16_t s_addr; 1347 u_int16_t *inbuf; 1348 int words; 1349{ 1350 int i; 1351 1352 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 1353 for (i = 0; i < words; i++, inbuf++) { 1354 if (i == 5) { 1355 continue; 1356 } 1357 *inbuf = ADV_INW(adv, ADV_LRAM_DATA); 1358 } 1359} 1360 1361static u_int 1362adv_get_num_free_queues(adv, n_qs) 1363 struct adv_softc *adv; 1364 u_int8_t n_qs; 1365{ 1366 u_int cur_used_qs; 1367 u_int cur_free_qs; 1368 1369 if (n_qs == 1) 1370 cur_used_qs = adv->cur_active + 1371 adv->openings_needed + 1372 ADV_MIN_FREE_Q; 1373 else 1374 cur_used_qs = adv->cur_active + 1375 ADV_MIN_FREE_Q; 1376 1377 if ((cur_used_qs + n_qs) <= adv->max_openings) { 1378 cur_free_qs = adv->max_openings - cur_used_qs; 1379 return (cur_free_qs); 1380 } 1381 if (n_qs > 1) 1382 if (n_qs > adv->openings_needed) 1383 adv->openings_needed = n_qs; 1384 return (0); 1385} 1386 1387static u_int8_t 1388adv_alloc_free_queues(adv, free_q_head, n_free_q) 1389 struct adv_softc *adv; 1390 u_int8_t free_q_head; 1391 u_int8_t n_free_q; 1392{ 1393 int i; 1394 1395 for (i = 0; i < n_free_q; i++) { 1396 free_q_head = adv_alloc_free_queue(adv, free_q_head); 1397 if (free_q_head == ADV_QLINK_END) 1398 break; 1399 } 1400 return (free_q_head); 1401} 1402 1403static u_int8_t 1404adv_alloc_free_queue(adv, free_q_head) 1405 struct adv_softc *adv; 1406 u_int8_t free_q_head; 1407{ 1408 u_int16_t q_addr; 1409 u_int8_t next_qp; 1410 u_int8_t q_status; 1411 1412 next_qp = ADV_QLINK_END; 1413 q_addr = ADV_QNO_TO_QADDR(free_q_head); 1414 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS); 1415 1416 if ((q_status & QS_READY) == 0) 1417 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD); 1418 1419 return (next_qp); 1420} 1421 1422static int 1423adv_send_scsi_queue(adv, scsiq, n_q_required) 1424 struct adv_softc *adv; 1425 struct adv_scsi_q *scsiq; 1426 u_int8_t n_q_required; 1427{ 1428 u_int8_t free_q_head; 1429 u_int8_t next_qp; 1430 u_int8_t tid_no; 1431 u_int8_t target_ix; 1432 int retval; 1433 1434 retval = 1; 1435 target_ix = scsiq->q2.target_ix; 1436 tid_no = ADV_TIX_TO_TID(target_ix); 1437 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF; 1438 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required)) 1439 != ADV_QLINK_END) { 1440 if (n_q_required > 1) { 1441 /* 1442 * Only reset the shortage value when processing 1443 * a "normal" request and not error recovery or 1444 * other requests that dip into our reserved queues. 1445 * Generally speaking, a normal request will need more 1446 * than one queue. 1447 */ 1448 adv->openings_needed = 0; 1449 } 1450 scsiq->q1.q_no = free_q_head; 1451 1452 /* 1453 * Now that we know our Q number, point our sense 1454 * buffer pointer to an area below 16M if we are 1455 * an ISA adapter. 1456 */ 1457 if (adv->sense_buffers != NULL) 1458 scsiq->q1.sense_addr = (u_int32_t)vtophys(&(adv->sense_buffers[free_q_head])); 1459 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head); 1460 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp); 1461 adv->cur_active += n_q_required; 1462 retval = 0; 1463 } 1464 return (retval); 1465} 1466 1467 1468static void 1469adv_put_ready_sg_list_queue(adv, scsiq, q_no) 1470 struct adv_softc *adv; 1471 struct adv_scsi_q *scsiq; 1472 u_int8_t q_no; 1473{ 1474 u_int8_t sg_list_dwords; 1475 u_int8_t sg_index, i; 1476 u_int8_t sg_entry_cnt; 1477 u_int8_t next_qp; 1478 u_int16_t q_addr; 1479 struct adv_sg_head *sg_head; 1480 struct adv_sg_list_q scsi_sg_q; 1481 1482 sg_head = scsiq->sg_head; 1483 1484 if (sg_head) { 1485 sg_entry_cnt = sg_head->entry_cnt - 1; 1486#ifdef DIAGNOSTIC 1487 if (sg_entry_cnt == 0) 1488 panic("adv_put_ready_sg_list_queue: ScsiQ with a SG list but only one element"); 1489 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0) 1490 panic("adv_put_ready_sg_list_queue: ScsiQ with a SG list but QC_SG_HEAD not set"); 1491#endif 1492 q_addr = ADV_QNO_TO_QADDR(q_no); 1493 sg_index = 1; 1494 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; 1495 scsi_sg_q.sg_head_qp = q_no; 1496 scsi_sg_q.cntl = QCSG_SG_XFER_LIST; 1497 for (i = 0; i < sg_head->queue_cnt; i++) { 1498 u_int8_t segs_this_q; 1499 1500 if (sg_entry_cnt > ADV_SG_LIST_PER_Q) 1501 segs_this_q = ADV_SG_LIST_PER_Q; 1502 else { 1503 /* This will be the last segment then */ 1504 segs_this_q = sg_entry_cnt; 1505 scsi_sg_q.cntl |= QCSG_SG_XFER_END; 1506 } 1507 scsi_sg_q.seq_no = i + 1; 1508 sg_list_dwords = segs_this_q * 2; 1509 if (i == 0) { 1510 scsi_sg_q.sg_list_cnt = segs_this_q; 1511 scsi_sg_q.sg_cur_list_cnt = segs_this_q; 1512 } else { 1513 scsi_sg_q.sg_list_cnt = segs_this_q - 1; 1514 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1; 1515 } 1516 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD); 1517 scsi_sg_q.q_no = next_qp; 1518 q_addr = ADV_QNO_TO_QADDR(next_qp); 1519 1520 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_SGHD_CPY_BEG, 1521 (u_int16_t *)&scsi_sg_q, 1522 sizeof(scsi_sg_q) >> 1); 1523 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG, 1524 (u_int32_t *)&sg_head->sg_list[sg_index], 1525 sg_list_dwords); 1526 sg_entry_cnt -= segs_this_q; 1527 sg_index += ADV_SG_LIST_PER_Q; 1528 } 1529 } 1530 adv_put_ready_queue(adv, scsiq, q_no); 1531} 1532 1533static void 1534adv_put_ready_queue(adv, scsiq, q_no) 1535 struct adv_softc *adv; 1536 struct adv_scsi_q *scsiq; 1537 u_int8_t q_no; 1538{ 1539 u_int16_t q_addr; 1540 u_int8_t tid_no; 1541 u_int8_t sdtr_data; 1542 u_int8_t syn_period_ix; 1543 u_int8_t syn_offset; 1544 1545 if (((adv->initiate_sdtr & scsiq->q1.target_id) != 0) && 1546 ((adv->sdtr_done & scsiq->q1.target_id) == 0)) { 1547 1548 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix); 1549 1550 sdtr_data = adv_read_lram_8(adv, ADVV_SDTR_DATA_BEG + tid_no); 1551 syn_period_ix = (sdtr_data >> 4) & (ADV_SYN_XFER_NO - 1); 1552 syn_offset = sdtr_data & ADV_SYN_MAX_OFFSET; 1553 adv_msgout_sdtr(adv, adv_sdtr_period_tbl[syn_period_ix], 1554 syn_offset); 1555 1556 scsiq->q1.cntl |= QC_MSG_OUT; 1557 } 1558 q_addr = ADV_QNO_TO_QADDR(q_no); 1559 1560 scsiq->q1.status = QS_FREE; 1561 1562 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG, 1563 (u_int16_t *)scsiq->cdbptr, 1564 scsiq->q2.cdb_len >> 1); 1565 1566#if BYTE_ORDER == BIG_ENDIAN 1567 adv_adj_scsiq_endian(scsiq); 1568#endif 1569 1570 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG, 1571 (u_int16_t *) &scsiq->q1.cntl, 1572 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1); 1573 1574#if CC_WRITE_IO_COUNT 1575 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT, 1576 adv->req_count); 1577#endif 1578 1579#if CC_CLEAR_DMA_REMAIN 1580 1581 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0); 1582 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0); 1583#endif 1584 1585 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS, 1586 (scsiq->q1.q_no << 8) | QS_READY); 1587} 1588 1589static void 1590adv_put_scsiq(adv, s_addr, buffer, words) 1591 struct adv_softc *adv; 1592 u_int16_t s_addr; 1593 u_int16_t *buffer; 1594 int words; 1595{ 1596 int i; 1597 1598 /* 1599 * XXX This routine makes *gross* assumptions 1600 * about padding in the data structures. 1601 * Either the data structures should have explicit 1602 * padding members added, or they should have padding 1603 * turned off via compiler attributes depending on 1604 * which yields better overall performance. My hunch 1605 * would be that turning off padding would be the 1606 * faster approach as an outsw is much faster than 1607 * this crude loop and accessing un-aligned data 1608 * members isn't *that* expensive. The other choice 1609 * would be to modify the ASC script so that the 1610 * the adv_scsiq_1 structure can be re-arranged so 1611 * padding isn't required. 1612 */ 1613 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); 1614 for (i = 0; i < words; i++, buffer++) { 1615 if (i == 2 || i == 10) { 1616 continue; 1617 } 1618 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer); 1619 } 1620} 1621 1622static u_int8_t 1623adv_msgout_sdtr(adv, sdtr_period, sdtr_offset) 1624 struct adv_softc *adv; 1625 u_int8_t sdtr_period; 1626 u_int8_t sdtr_offset; 1627{ 1628 struct sdtr_xmsg sdtr_buf; 1629 1630 sdtr_buf.msg_type = MSG_EXTENDED; 1631 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN; 1632 sdtr_buf.msg_req = MSG_EXT_SDTR; 1633 sdtr_buf.xfer_period = sdtr_period; 1634 sdtr_offset &= ADV_SYN_MAX_OFFSET; 1635 sdtr_buf.req_ack_offset = sdtr_offset; 1636 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG, 1637 (u_int16_t *) &sdtr_buf, 1638 sizeof(sdtr_buf) / 2); 1639 1640 return (adv_get_card_sync_setting(sdtr_period, sdtr_offset)); 1641} 1642 1643static u_int8_t 1644adv_get_card_sync_setting(period, offset) 1645 u_int8_t period; 1646 u_int8_t offset; 1647{ 1648 u_int i; 1649 1650 if (period >= adv_sdtr_period_tbl[0]) { 1651 for (i = 0; i < sizeof(adv_sdtr_period_tbl); i++) { 1652 if (period <= adv_sdtr_period_tbl[i]) 1653 return ((adv_sdtr_period_tbl[i] << 4) | offset); 1654 } 1655 } 1656 return (0); 1657} 1658 1659static void 1660adv_set_chip_sdtr(adv, sdtr_data, tid_no) 1661 struct adv_softc *adv; 1662 u_int8_t sdtr_data; 1663 u_int8_t tid_no; 1664{ 1665 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data); 1666 adv_write_lram_8(adv, ADVV_SDTR_DONE_BEG + tid_no, sdtr_data); 1667} 1668