1/* $NetBSD: esiop.c,v 1.63 2024/02/08 19:44:08 andvar Exp $ */ 2 3/* 4 * Copyright (c) 2002 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28/* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 29 30#include <sys/cdefs.h> 31__KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.63 2024/02/08 19:44:08 andvar Exp $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/device.h> 36#include <sys/malloc.h> 37#include <sys/buf.h> 38#include <sys/kernel.h> 39 40#include <machine/endian.h> 41#include <sys/bus.h> 42 43#include <dev/microcode/siop/esiop.out> 44 45#include <dev/scsipi/scsi_all.h> 46#include <dev/scsipi/scsi_message.h> 47#include <dev/scsipi/scsipi_all.h> 48 49#include <dev/scsipi/scsiconf.h> 50 51#include <dev/ic/siopreg.h> 52#include <dev/ic/siopvar_common.h> 53#include <dev/ic/esiopvar.h> 54 55#include "opt_siop.h" 56 57/* 58#define SIOP_DEBUG 59#define SIOP_DEBUG_DR 60#define SIOP_DEBUG_INTR 61#define SIOP_DEBUG_SCHED 62#define SIOP_DUMP_SCRIPT 63*/ 64 65#define SIOP_STATS 66 67#ifndef SIOP_DEFAULT_TARGET 68#define SIOP_DEFAULT_TARGET 7 69#endif 70 71/* number of cmd descriptors per block */ 72#define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer)) 73 74void esiop_reset(struct esiop_softc *); 75void esiop_checkdone(struct esiop_softc *); 76void esiop_handle_reset(struct esiop_softc *); 77void esiop_scsicmd_end(struct esiop_cmd *, int); 78void esiop_unqueue(struct esiop_softc *, int, int); 79int esiop_handle_qtag_reject(struct esiop_cmd *); 80static void esiop_start(struct esiop_softc *, struct esiop_cmd *); 81void esiop_timeout(void *); 82void esiop_scsipi_request(struct scsipi_channel *, 83 scsipi_adapter_req_t, void *); 84void esiop_dump_script(struct esiop_softc *); 85void esiop_morecbd(struct esiop_softc *); 86void esiop_moretagtbl(struct esiop_softc *); 87void siop_add_reselsw(struct esiop_softc *, int); 88void esiop_target_register(struct esiop_softc *, uint32_t); 89 90void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *); 91 92#ifdef SIOP_STATS 93static int esiop_stat_intr = 0; 94static int esiop_stat_intr_shortxfer = 0; 95static int esiop_stat_intr_sdp = 0; 96static int esiop_stat_intr_done = 0; 97static int esiop_stat_intr_xferdisc = 0; 98static int esiop_stat_intr_lunresel = 0; 99static int esiop_stat_intr_qfull = 0; 100void esiop_printstats(void); 101#define INCSTAT(x) x++ 102#else 103#define INCSTAT(x) 104#endif 105 106static inline void esiop_script_sync(struct esiop_softc *, int); 107static inline void 108esiop_script_sync(struct esiop_softc *sc, int ops) 109{ 110 111 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) 112 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 113 PAGE_SIZE, ops); 114} 115 116static inline uint32_t esiop_script_read(struct esiop_softc *, u_int); 117static inline uint32_t 118esiop_script_read(struct esiop_softc *sc, u_int offset) 119{ 120 121 if (sc->sc_c.features & SF_CHIP_RAM) { 122 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 123 offset * 4); 124 } else { 125 return le32toh(sc->sc_c.sc_script[offset]); 126 } 127} 128 129static inline void esiop_script_write(struct esiop_softc *, u_int, 130 uint32_t); 131static inline void 132esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val) 133{ 134 135 if (sc->sc_c.features & SF_CHIP_RAM) { 136 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 137 offset * 4, val); 138 } else { 139 sc->sc_c.sc_script[offset] = htole32(val); 140 } 141} 142 143void 144esiop_attach(struct esiop_softc *sc) 145{ 146 struct esiop_dsatbl *tagtbl_donering; 147 148 if (siop_common_attach(&sc->sc_c) != 0 ) 149 return; 150 151 TAILQ_INIT(&sc->free_list); 152 TAILQ_INIT(&sc->cmds); 153 TAILQ_INIT(&sc->free_tagtbl); 154 TAILQ_INIT(&sc->tag_tblblk); 155 sc->sc_currschedslot = 0; 156#ifdef SIOP_DEBUG 157 aprint_debug_dev(sc->sc_c.sc_dev, 158 "script size = %d, PHY addr=0x%x, VIRT=%p\n", 159 (int)sizeof(esiop_script), 160 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script); 161#endif 162 163 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG; 164 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request; 165 166 /* 167 * get space for the CMD done slot. For this we use a tag table entry. 168 * It's the same size and allows us to not waste 3/4 of a page 169 */ 170#ifdef DIAGNOSTIC 171 if (ESIOP_NTAG != A_ndone_slots) { 172 aprint_error_dev(sc->sc_c.sc_dev, 173 "size of tag DSA table different from the done ring\n"); 174 return; 175 } 176#endif 177 esiop_moretagtbl(sc); 178 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl); 179 if (tagtbl_donering == NULL) { 180 aprint_error_dev(sc->sc_c.sc_dev, 181 "no memory for command done ring\n"); 182 return; 183 } 184 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next); 185 sc->sc_done_map = tagtbl_donering->tblblk->blkmap; 186 sc->sc_done_offset = tagtbl_donering->tbl_offset; 187 sc->sc_done_slot = &tagtbl_donering->tbl[0]; 188 189 /* Do a bus reset, so that devices fall back to narrow/async */ 190 siop_resetbus(&sc->sc_c); 191 /* 192 * siop_reset() will reset the chip, thus clearing pending interrupts 193 */ 194 esiop_reset(sc); 195#ifdef SIOP_DUMP_SCRIPT 196 esiop_dump_script(sc); 197#endif 198 199 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint, CFARGS_NONE); 200} 201 202void 203esiop_reset(struct esiop_softc *sc) 204{ 205 int i, j; 206 uint32_t addr; 207 uint32_t msgin_addr, sem_addr; 208 209 siop_common_reset(&sc->sc_c); 210 211 /* 212 * we copy the script at the beginning of RAM. Then there is 4 bytes 213 * for messages in, and 4 bytes for semaphore 214 */ 215 sc->sc_free_offset = __arraycount(esiop_script); 216 msgin_addr = 217 sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr; 218 sc->sc_free_offset += 1; 219 sc->sc_semoffset = sc->sc_free_offset; 220 sem_addr = 221 sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr; 222 sc->sc_free_offset += 1; 223 /* then we have the scheduler ring */ 224 sc->sc_shedoffset = sc->sc_free_offset; 225 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE; 226 /* then the targets DSA table */ 227 sc->sc_target_table_offset = sc->sc_free_offset; 228 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets; 229 /* copy and patch the script */ 230 if (sc->sc_c.features & SF_CHIP_RAM) { 231 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0, 232 esiop_script, 233 __arraycount(esiop_script)); 234 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) { 235 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 236 E_tlq_offset_Used[j] * 4, 237 sizeof(struct siop_common_xfer)); 238 } 239 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) { 240 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 241 E_saved_offset_offset_Used[j] * 4, 242 sizeof(struct siop_common_xfer) + 4); 243 } 244 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) { 245 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 246 E_abs_msgin2_Used[j] * 4, msgin_addr); 247 } 248 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) { 249 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 250 E_abs_sem_Used[j] * 4, sem_addr); 251 } 252 253 if (sc->sc_c.features & SF_CHIP_LED0) { 254 bus_space_write_region_4(sc->sc_c.sc_ramt, 255 sc->sc_c.sc_ramh, 256 Ent_led_on1, esiop_led_on, 257 __arraycount(esiop_led_on)); 258 bus_space_write_region_4(sc->sc_c.sc_ramt, 259 sc->sc_c.sc_ramh, 260 Ent_led_on2, esiop_led_on, 261 __arraycount(esiop_led_on)); 262 bus_space_write_region_4(sc->sc_c.sc_ramt, 263 sc->sc_c.sc_ramh, 264 Ent_led_off, esiop_led_off, 265 __arraycount(esiop_led_off)); 266 } 267 } else { 268 for (j = 0; j < __arraycount(esiop_script); j++) { 269 sc->sc_c.sc_script[j] = htole32(esiop_script[j]); 270 } 271 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) { 272 sc->sc_c.sc_script[E_tlq_offset_Used[j]] = 273 htole32(sizeof(struct siop_common_xfer)); 274 } 275 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) { 276 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] = 277 htole32(sizeof(struct siop_common_xfer) + 4); 278 } 279 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) { 280 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] = 281 htole32(msgin_addr); 282 } 283 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) { 284 sc->sc_c.sc_script[E_abs_sem_Used[j]] = 285 htole32(sem_addr); 286 } 287 288 if (sc->sc_c.features & SF_CHIP_LED0) { 289 for (j = 0; j < __arraycount(esiop_led_on); j++) 290 sc->sc_c.sc_script[ 291 Ent_led_on1 / sizeof(esiop_led_on[0]) + j 292 ] = htole32(esiop_led_on[j]); 293 for (j = 0; j < __arraycount(esiop_led_on); j++) 294 sc->sc_c.sc_script[ 295 Ent_led_on2 / sizeof(esiop_led_on[0]) + j 296 ] = htole32(esiop_led_on[j]); 297 for (j = 0; j < __arraycount(esiop_led_off); j++) 298 sc->sc_c.sc_script[ 299 Ent_led_off / sizeof(esiop_led_off[0]) + j 300 ] = htole32(esiop_led_off[j]); 301 } 302 } 303 /* get base of scheduler ring */ 304 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t); 305 /* init scheduler */ 306 for (i = 0; i < A_ncmd_slots; i++) { 307 esiop_script_write(sc, 308 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free); 309 } 310 sc->sc_currschedslot = 0; 311 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0); 312 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr); 313 /* 314 * 0x78000000 is a 'move data8 to reg'. data8 is the second 315 * octet, reg offset is the third. 316 */ 317 esiop_script_write(sc, Ent_cmdr0 / 4, 318 0x78640000 | ((addr & 0x000000ff) << 8)); 319 esiop_script_write(sc, Ent_cmdr1 / 4, 320 0x78650000 | ((addr & 0x0000ff00) )); 321 esiop_script_write(sc, Ent_cmdr2 / 4, 322 0x78660000 | ((addr & 0x00ff0000) >> 8)); 323 esiop_script_write(sc, Ent_cmdr3 / 4, 324 0x78670000 | ((addr & 0xff000000) >> 16)); 325 /* done ring */ 326 for (i = 0; i < A_ndone_slots; i++) 327 sc->sc_done_slot[i] = 0; 328 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map, 329 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t), 330 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 331 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset; 332 sc->sc_currdoneslot = 0; 333 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0); 334 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr); 335 esiop_script_write(sc, Ent_doner0 / 4, 336 0x786c0000 | ((addr & 0x000000ff) << 8)); 337 esiop_script_write(sc, Ent_doner1 / 4, 338 0x786d0000 | ((addr & 0x0000ff00) )); 339 esiop_script_write(sc, Ent_doner2 / 4, 340 0x786e0000 | ((addr & 0x00ff0000) >> 8)); 341 esiop_script_write(sc, Ent_doner3 / 4, 342 0x786f0000 | ((addr & 0xff000000) >> 16)); 343 344 /* set flags */ 345 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0); 346 /* write pointer of base of target DSA table */ 347 addr = (sc->sc_target_table_offset * sizeof(uint32_t)) + 348 sc->sc_c.sc_scriptaddr; 349 esiop_script_write(sc, (Ent_load_targtable / 4) + 0, 350 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) | 351 ((addr & 0x000000ff) << 8)); 352 esiop_script_write(sc, (Ent_load_targtable / 4) + 2, 353 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) | 354 ((addr & 0x0000ff00) )); 355 esiop_script_write(sc, (Ent_load_targtable / 4) + 4, 356 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) | 357 ((addr & 0x00ff0000) >> 8)); 358 esiop_script_write(sc, (Ent_load_targtable / 4) + 6, 359 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) | 360 ((addr & 0xff000000) >> 16)); 361#ifdef SIOP_DEBUG 362 printf("%s: target table offset %d free offset %d\n", 363 device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset, 364 sc->sc_free_offset); 365#endif 366 367 /* register existing targets */ 368 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) { 369 if (sc->sc_c.targets[i]) 370 esiop_target_register(sc, i); 371 } 372 /* start script */ 373 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) { 374 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0, 375 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 376 } 377 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, 378 sc->sc_c.sc_scriptaddr + Ent_reselect); 379} 380 381#if 0 382#define CALL_SCRIPT(ent) do { \ 383 printf ("start script DSA 0x%lx DSP 0x%lx\n", \ 384 esiop_cmd->cmd_c.dsa, \ 385 sc->sc_c.sc_scriptaddr + ent); \ 386 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \ 387 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 388} while (/* CONSTCOND */0) 389#else 390#define CALL_SCRIPT(ent) do { \ 391 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \ 392 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \ 393} while (/* CONSTCOND */0) 394#endif 395 396int 397esiop_intr(void *v) 398{ 399 struct esiop_softc *sc = v; 400 struct esiop_target *esiop_target; 401 struct esiop_cmd *esiop_cmd; 402 struct esiop_lun *esiop_lun; 403 struct scsipi_xfer *xs; 404 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */ 405 uint32_t irqcode; 406 int need_reset = 0; 407 int offset, target, lun, tag; 408 uint32_t tflags; 409 uint32_t addr; 410 int freetarget = 0; 411 int slot; 412 int retval = 0; 413 414again: 415 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT); 416 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) { 417 return retval; 418 } 419 retval = 1; 420 INCSTAT(esiop_stat_intr); 421 esiop_checkdone(sc); 422 if (istat & ISTAT_INTF) { 423 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 424 SIOP_ISTAT, ISTAT_INTF); 425 goto again; 426 } 427 428 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) == 429 (ISTAT_DIP | ISTAT_ABRT)) { 430 /* clear abort */ 431 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 432 SIOP_ISTAT, 0); 433 } 434 435 /* get CMD from T/L/Q */ 436 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 437 SIOP_SCRATCHC); 438#ifdef SIOP_DEBUG_INTR 439 printf("interrupt, istat=0x%x tflags=0x%x " 440 "DSA=0x%x DSP=0x%lx\n", istat, tflags, 441 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 442 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 443 SIOP_DSP) - 444 sc->sc_c.sc_scriptaddr)); 445#endif 446 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1; 447 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1; 448 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1; 449 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1; 450 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1; 451 452 if (target >= 0 && lun >= 0) { 453 esiop_target = (struct esiop_target *)sc->sc_c.targets[target]; 454 if (esiop_target == NULL) { 455 printf("esiop_target (target %d) not valid\n", target); 456 goto none; 457 } 458 esiop_lun = esiop_target->esiop_lun[lun]; 459 if (esiop_lun == NULL) { 460 printf("esiop_lun (target %d lun %d) not valid\n", 461 target, lun); 462 goto none; 463 } 464 esiop_cmd = 465 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active; 466 if (esiop_cmd == NULL) { 467 printf("esiop_cmd (target %d lun %d tag %d)" 468 " not valid\n", 469 target, lun, tag); 470 goto none; 471 } 472 xs = esiop_cmd->cmd_c.xs; 473#ifdef DIAGNOSTIC 474 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) { 475 printf("esiop_cmd (target %d lun %d) " 476 "not active (%d)\n", target, lun, 477 esiop_cmd->cmd_c.status); 478 goto none; 479 } 480#endif 481 esiop_table_sync(esiop_cmd, 482 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 483 } else { 484none: 485 xs = NULL; 486 esiop_target = NULL; 487 esiop_lun = NULL; 488 esiop_cmd = NULL; 489 } 490 if (istat & ISTAT_DIP) { 491 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 492 SIOP_DSTAT); 493 if (dstat & DSTAT_ABRT) { 494 /* was probably generated by a bus reset IOCTL */ 495 if ((dstat & DSTAT_DFE) == 0) 496 siop_clearfifo(&sc->sc_c); 497 goto reset; 498 } 499 if (dstat & DSTAT_SSI) { 500 printf("single step dsp 0x%08x dsa 0x08%x\n", 501 (int)(bus_space_read_4(sc->sc_c.sc_rt, 502 sc->sc_c.sc_rh, SIOP_DSP) - 503 sc->sc_c.sc_scriptaddr), 504 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 505 SIOP_DSA)); 506 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 && 507 (istat & ISTAT_SIP) == 0) { 508 bus_space_write_1(sc->sc_c.sc_rt, 509 sc->sc_c.sc_rh, SIOP_DCNTL, 510 bus_space_read_1(sc->sc_c.sc_rt, 511 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD); 512 } 513 return 1; 514 } 515 516 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) { 517 printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev)); 518 if (dstat & DSTAT_IID) 519 printf(" Illegal instruction"); 520 if (dstat & DSTAT_BF) 521 printf(" bus fault"); 522 if (dstat & DSTAT_MDPE) 523 printf(" parity"); 524 if (dstat & DSTAT_DFE) 525 printf(" DMA fifo empty"); 526 else 527 siop_clearfifo(&sc->sc_c); 528 printf(", DSP=0x%x DSA=0x%x: ", 529 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 530 SIOP_DSP) - sc->sc_c.sc_scriptaddr), 531 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA)); 532 if (esiop_cmd) 533 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n", 534 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0], 535 le32toh(esiop_cmd->cmd_tables->status)); 536 else 537 printf(" current T/L/Q invalid\n"); 538 need_reset = 1; 539 } 540 } 541 if (istat & ISTAT_SIP) { 542 if (istat & ISTAT_DIP) 543 delay(10); 544 /* 545 * Can't read sist0 & sist1 independently, or we have to 546 * insert delay 547 */ 548 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 549 SIOP_SIST0); 550 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 551 SIOP_SSTAT1); 552#ifdef SIOP_DEBUG_INTR 553 printf("scsi interrupt, sist=0x%x sstat1=0x%x " 554 "DSA=0x%x DSP=0x%lx\n", sist, sstat1, 555 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 556 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 557 SIOP_DSP) - 558 sc->sc_c.sc_scriptaddr)); 559#endif 560 if (sist & SIST0_RST) { 561 esiop_handle_reset(sc); 562 /* no table to flush here */ 563 return 1; 564 } 565 if (sist & SIST0_SGE) { 566 if (esiop_cmd) 567 scsipi_printaddr(xs->xs_periph); 568 else 569 printf("%s:", device_xname(sc->sc_c.sc_dev)); 570 printf("scsi gross error\n"); 571 if (esiop_target) 572 esiop_target->target_c.flags &= ~TARF_DT; 573#ifdef SIOP_DEBUG 574 printf("DSA=0x%x DSP=0x%lx\n", 575 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 576 SIOP_DSA), 577 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, 578 sc->sc_c.sc_rh, SIOP_DSP) - 579 sc->sc_c.sc_scriptaddr)); 580 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n", 581 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 582 SIOP_SDID), 583 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 584 SIOP_SCNTL3), 585 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 586 SIOP_SXFER), 587 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 588 SIOP_SCNTL4)); 589 590#endif 591 goto reset; 592 } 593 if ((sist & SIST0_MA) && need_reset == 0) { 594 if (esiop_cmd) { 595 int scratchc0; 596 dstat = bus_space_read_1(sc->sc_c.sc_rt, 597 sc->sc_c.sc_rh, SIOP_DSTAT); 598 /* 599 * first restore DSA, in case we were in a S/G 600 * operation. 601 */ 602 bus_space_write_4(sc->sc_c.sc_rt, 603 sc->sc_c.sc_rh, 604 SIOP_DSA, esiop_cmd->cmd_c.dsa); 605 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt, 606 sc->sc_c.sc_rh, SIOP_SCRATCHC); 607 switch (sstat1 & SSTAT1_PHASE_MASK) { 608 case SSTAT1_PHASE_STATUS: 609 /* 610 * previous phase may be aborted for any reason 611 * ( for example, the target has less data to 612 * transfer than requested). Compute resid and 613 * just go to status, the command should 614 * terminate. 615 */ 616 INCSTAT(esiop_stat_intr_shortxfer); 617 if (scratchc0 & A_f_c_data) 618 siop_ma(&esiop_cmd->cmd_c); 619 else if ((dstat & DSTAT_DFE) == 0) 620 siop_clearfifo(&sc->sc_c); 621 CALL_SCRIPT(Ent_status); 622 return 1; 623 case SSTAT1_PHASE_MSGIN: 624 /* 625 * target may be ready to disconnect 626 * Compute resid which would be used later 627 * if a save data pointer is needed. 628 */ 629 INCSTAT(esiop_stat_intr_xferdisc); 630 if (scratchc0 & A_f_c_data) 631 siop_ma(&esiop_cmd->cmd_c); 632 else if ((dstat & DSTAT_DFE) == 0) 633 siop_clearfifo(&sc->sc_c); 634 bus_space_write_1(sc->sc_c.sc_rt, 635 sc->sc_c.sc_rh, SIOP_SCRATCHC, 636 scratchc0 & ~A_f_c_data); 637 CALL_SCRIPT(Ent_msgin); 638 return 1; 639 } 640 aprint_error_dev(sc->sc_c.sc_dev, 641 "unexpected phase mismatch %d\n", 642 sstat1 & SSTAT1_PHASE_MASK); 643 } else { 644 aprint_error_dev(sc->sc_c.sc_dev, 645 "phase mismatch without command\n"); 646 } 647 need_reset = 1; 648 } 649 if (sist & SIST0_PAR) { 650 /* parity error, reset */ 651 if (esiop_cmd) 652 scsipi_printaddr(xs->xs_periph); 653 else 654 printf("%s:", device_xname(sc->sc_c.sc_dev)); 655 printf("parity error\n"); 656 if (esiop_target) 657 esiop_target->target_c.flags &= ~TARF_DT; 658 goto reset; 659 } 660 if ((sist & (SIST1_STO << 8)) && need_reset == 0) { 661 /* 662 * selection time out, assume there's no device here 663 * We also have to update the ring pointer ourselves 664 */ 665 slot = bus_space_read_1(sc->sc_c.sc_rt, 666 sc->sc_c.sc_rh, SIOP_SCRATCHE); 667 esiop_script_sync(sc, 668 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 669#ifdef SIOP_DEBUG_SCHED 670 printf("sel timeout target %d, slot %d\n", 671 target, slot); 672#endif 673 /* 674 * mark this slot as free, and advance to next slot 675 */ 676 esiop_script_write(sc, 677 sc->sc_shedoffset + slot * CMD_SLOTSIZE, 678 A_f_cmd_free); 679 addr = bus_space_read_4(sc->sc_c.sc_rt, 680 sc->sc_c.sc_rh, SIOP_SCRATCHD); 681 if (slot < (A_ncmd_slots - 1)) { 682 bus_space_write_1(sc->sc_c.sc_rt, 683 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1); 684 addr = addr + sizeof(struct esiop_slot); 685 } else { 686 bus_space_write_1(sc->sc_c.sc_rt, 687 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0); 688 addr = sc->sc_c.sc_scriptaddr + 689 sc->sc_shedoffset * sizeof(uint32_t); 690 } 691 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 692 SIOP_SCRATCHD, addr); 693 esiop_script_sync(sc, 694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 695 if (esiop_cmd) { 696 esiop_cmd->cmd_c.status = CMDST_DONE; 697 xs->error = XS_SELTIMEOUT; 698 freetarget = 1; 699 goto end; 700 } else { 701 printf("%s: selection timeout without " 702 "command, target %d (sdid 0x%x), " 703 "slot %d\n", 704 device_xname(sc->sc_c.sc_dev), target, 705 bus_space_read_1(sc->sc_c.sc_rt, 706 sc->sc_c.sc_rh, SIOP_SDID), slot); 707 need_reset = 1; 708 } 709 } 710 if (sist & SIST0_UDC) { 711 /* 712 * unexpected disconnect. Usually the target signals 713 * a fatal condition this way. Attempt to get sense. 714 */ 715 if (esiop_cmd) { 716 esiop_cmd->cmd_tables->status = 717 htole32(SCSI_CHECK); 718 goto end; 719 } 720 aprint_error_dev(sc->sc_c.sc_dev, 721 "unexpected disconnect without command\n"); 722 goto reset; 723 } 724 if (sist & (SIST1_SBMC << 8)) { 725 /* SCSI bus mode change */ 726 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1) 727 goto reset; 728 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { 729 /* 730 * we have a script interrupt, it will 731 * restart the script. 732 */ 733 goto scintr; 734 } 735 /* 736 * else we have to restart it ourselves, at the 737 * interrupted instruction. 738 */ 739 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 740 SIOP_DSP, 741 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 742 SIOP_DSP) - 8); 743 return 1; 744 } 745 /* Else it's an unhandled exception (for now). */ 746 aprint_error_dev(sc->sc_c.sc_dev, 747 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x " 748 "DSA=0x%x DSP=0x%x\n", sist, 749 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 750 SIOP_SSTAT1), 751 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA), 752 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 753 SIOP_DSP) - sc->sc_c.sc_scriptaddr)); 754 if (esiop_cmd) { 755 esiop_cmd->cmd_c.status = CMDST_DONE; 756 xs->error = XS_SELTIMEOUT; 757 goto end; 758 } 759 need_reset = 1; 760 } 761 if (need_reset) { 762reset: 763 /* fatal error, reset the bus */ 764 siop_resetbus(&sc->sc_c); 765 /* no table to flush here */ 766 return 1; 767 } 768 769scintr: 770 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */ 771 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 772 SIOP_DSPS); 773#ifdef SIOP_DEBUG_INTR 774 printf("script interrupt 0x%x\n", irqcode); 775#endif 776 /* 777 * no command, or an inactive command is only valid for a 778 * reselect interrupt 779 */ 780 if ((irqcode & 0x80) == 0) { 781 if (esiop_cmd == NULL) { 782 aprint_error_dev(sc->sc_c.sc_dev, 783 "script interrupt (0x%x) with invalid DSA !!!\n", 784 irqcode); 785 goto reset; 786 } 787 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) { 788 aprint_error_dev(sc->sc_c.sc_dev, 789 "command with invalid status " 790 "(IRQ code 0x%x current status %d) !\n", 791 irqcode, esiop_cmd->cmd_c.status); 792 xs = NULL; 793 } 794 } 795 switch(irqcode) { 796 case A_int_err: 797 printf("error, DSP=0x%x\n", 798 (int)(bus_space_read_4(sc->sc_c.sc_rt, 799 sc->sc_c.sc_rh, SIOP_DSP) - 800 sc->sc_c.sc_scriptaddr)); 801 if (xs) { 802 xs->error = XS_SELTIMEOUT; 803 goto end; 804 } else { 805 goto reset; 806 } 807 case A_int_msgin: 808 { 809 int msgin = bus_space_read_1(sc->sc_c.sc_rt, 810 sc->sc_c.sc_rh, SIOP_SFBR); 811 if (msgin == MSG_MESSAGE_REJECT) { 812 int msg, extmsg; 813 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) { 814 /* 815 * message was part of a identify + 816 * something else. Identify shouldn't 817 * have been rejected. 818 */ 819 msg = 820 esiop_cmd->cmd_tables->msg_out[1]; 821 extmsg = 822 esiop_cmd->cmd_tables->msg_out[3]; 823 } else { 824 msg = 825 esiop_cmd->cmd_tables->msg_out[0]; 826 extmsg = 827 esiop_cmd->cmd_tables->msg_out[2]; 828 } 829 if (msg == MSG_MESSAGE_REJECT) { 830 /* MSG_REJECT for a MSG_REJECT !*/ 831 if (xs) 832 scsipi_printaddr(xs->xs_periph); 833 else 834 printf("%s: ", device_xname( 835 sc->sc_c.sc_dev)); 836 printf("our reject message was " 837 "rejected\n"); 838 goto reset; 839 } 840 if (msg == MSG_EXTENDED && 841 extmsg == MSG_EXT_WDTR) { 842 /* WDTR rejected, initiate sync */ 843 if ((esiop_target->target_c.flags & 844 TARF_SYNC) == 0) { 845 esiop_target->target_c.status = 846 TARST_OK; 847 siop_update_xfer_mode(&sc->sc_c, 848 target); 849 /* no table to flush here */ 850 CALL_SCRIPT(Ent_msgin_ack); 851 return 1; 852 } 853 esiop_target->target_c.status = 854 TARST_SYNC_NEG; 855 siop_sdtr_msg(&esiop_cmd->cmd_c, 0, 856 sc->sc_c.st_minsync, 857 sc->sc_c.maxoff); 858 esiop_table_sync(esiop_cmd, 859 BUS_DMASYNC_PREREAD | 860 BUS_DMASYNC_PREWRITE); 861 CALL_SCRIPT(Ent_send_msgout); 862 return 1; 863 } else if (msg == MSG_EXTENDED && 864 extmsg == MSG_EXT_SDTR) { 865 /* sync rejected */ 866 esiop_target->target_c.offset = 0; 867 esiop_target->target_c.period = 0; 868 esiop_target->target_c.status = 869 TARST_OK; 870 siop_update_xfer_mode(&sc->sc_c, 871 target); 872 /* no table to flush here */ 873 CALL_SCRIPT(Ent_msgin_ack); 874 return 1; 875 } else if (msg == MSG_EXTENDED && 876 extmsg == MSG_EXT_PPR) { 877 /* PPR rejected */ 878 esiop_target->target_c.offset = 0; 879 esiop_target->target_c.period = 0; 880 esiop_target->target_c.status = 881 TARST_OK; 882 siop_update_xfer_mode(&sc->sc_c, 883 target); 884 /* no table to flush here */ 885 CALL_SCRIPT(Ent_msgin_ack); 886 return 1; 887 } else if (msg == MSG_SIMPLE_Q_TAG || 888 msg == MSG_HEAD_OF_Q_TAG || 889 msg == MSG_ORDERED_Q_TAG) { 890 if (esiop_handle_qtag_reject( 891 esiop_cmd) == -1) 892 goto reset; 893 CALL_SCRIPT(Ent_msgin_ack); 894 return 1; 895 } 896 if (xs) 897 scsipi_printaddr(xs->xs_periph); 898 else 899 printf("%s: ", 900 device_xname(sc->sc_c.sc_dev)); 901 if (msg == MSG_EXTENDED) { 902 printf("scsi message reject, extended " 903 "message sent was 0x%x\n", extmsg); 904 } else { 905 printf("scsi message reject, message " 906 "sent was 0x%x\n", msg); 907 } 908 /* no table to flush here */ 909 CALL_SCRIPT(Ent_msgin_ack); 910 return 1; 911 } 912 if (msgin == MSG_IGN_WIDE_RESIDUE) { 913 /* use the extmsgdata table to get the second byte */ 914 esiop_cmd->cmd_tables->t_extmsgdata.count = 915 htole32(1); 916 esiop_table_sync(esiop_cmd, 917 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 918 CALL_SCRIPT(Ent_get_extmsgdata); 919 return 1; 920 } 921 if (xs) 922 scsipi_printaddr(xs->xs_periph); 923 else 924 printf("%s: ", device_xname(sc->sc_c.sc_dev)); 925 printf("unhandled message 0x%x\n", msgin); 926 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 927 esiop_cmd->cmd_tables->t_msgout.count= htole32(1); 928 esiop_table_sync(esiop_cmd, 929 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 930 CALL_SCRIPT(Ent_send_msgout); 931 return 1; 932 } 933 case A_int_extmsgin: 934#ifdef SIOP_DEBUG_INTR 935 printf("extended message: msg 0x%x len %d\n", 936 esiop_cmd->cmd_tables->msg_in[2], 937 esiop_cmd->cmd_tables->msg_in[1]); 938#endif 939 if (esiop_cmd->cmd_tables->msg_in[1] > 940 sizeof(esiop_cmd->cmd_tables->msg_in) - 2) 941 aprint_error_dev(sc->sc_c.sc_dev, 942 "extended message too big (%d)\n", 943 esiop_cmd->cmd_tables->msg_in[1]); 944 esiop_cmd->cmd_tables->t_extmsgdata.count = 945 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1); 946 esiop_table_sync(esiop_cmd, 947 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 948 CALL_SCRIPT(Ent_get_extmsgdata); 949 return 1; 950 case A_int_extmsgdata: 951#ifdef SIOP_DEBUG_INTR 952 { 953 int i; 954 printf("extended message: 0x%x, data:", 955 esiop_cmd->cmd_tables->msg_in[2]); 956 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1]; 957 i++) 958 printf(" 0x%x", 959 esiop_cmd->cmd_tables->msg_in[i]); 960 printf("\n"); 961 } 962#endif 963 if (esiop_cmd->cmd_tables->msg_in[0] == 964 MSG_IGN_WIDE_RESIDUE) { 965 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */ 966 if (esiop_cmd->cmd_tables->msg_in[3] != 1) 967 printf("MSG_IGN_WIDE_RESIDUE: " 968 "bad len %d\n", 969 esiop_cmd->cmd_tables->msg_in[3]); 970 switch (siop_iwr(&esiop_cmd->cmd_c)) { 971 case SIOP_NEG_MSGOUT: 972 esiop_table_sync(esiop_cmd, 973 BUS_DMASYNC_PREREAD | 974 BUS_DMASYNC_PREWRITE); 975 CALL_SCRIPT(Ent_send_msgout); 976 return 1; 977 case SIOP_NEG_ACK: 978 CALL_SCRIPT(Ent_msgin_ack); 979 return 1; 980 default: 981 panic("invalid retval from " 982 "siop_iwr()"); 983 } 984 return 1; 985 } 986 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) { 987 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) { 988 case SIOP_NEG_MSGOUT: 989 esiop_update_scntl3(sc, 990 esiop_cmd->cmd_c.siop_target); 991 esiop_table_sync(esiop_cmd, 992 BUS_DMASYNC_PREREAD | 993 BUS_DMASYNC_PREWRITE); 994 CALL_SCRIPT(Ent_send_msgout); 995 return 1; 996 case SIOP_NEG_ACK: 997 esiop_update_scntl3(sc, 998 esiop_cmd->cmd_c.siop_target); 999 CALL_SCRIPT(Ent_msgin_ack); 1000 return 1; 1001 default: 1002 panic("invalid retval from " 1003 "siop_ppr_neg()"); 1004 } 1005 return 1; 1006 } 1007 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) { 1008 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) { 1009 case SIOP_NEG_MSGOUT: 1010 esiop_update_scntl3(sc, 1011 esiop_cmd->cmd_c.siop_target); 1012 esiop_table_sync(esiop_cmd, 1013 BUS_DMASYNC_PREREAD | 1014 BUS_DMASYNC_PREWRITE); 1015 CALL_SCRIPT(Ent_send_msgout); 1016 return 1; 1017 case SIOP_NEG_ACK: 1018 esiop_update_scntl3(sc, 1019 esiop_cmd->cmd_c.siop_target); 1020 CALL_SCRIPT(Ent_msgin_ack); 1021 return 1; 1022 default: 1023 panic("invalid retval from " 1024 "siop_wdtr_neg()"); 1025 } 1026 return 1; 1027 } 1028 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) { 1029 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) { 1030 case SIOP_NEG_MSGOUT: 1031 esiop_update_scntl3(sc, 1032 esiop_cmd->cmd_c.siop_target); 1033 esiop_table_sync(esiop_cmd, 1034 BUS_DMASYNC_PREREAD | 1035 BUS_DMASYNC_PREWRITE); 1036 CALL_SCRIPT(Ent_send_msgout); 1037 return 1; 1038 case SIOP_NEG_ACK: 1039 esiop_update_scntl3(sc, 1040 esiop_cmd->cmd_c.siop_target); 1041 CALL_SCRIPT(Ent_msgin_ack); 1042 return 1; 1043 default: 1044 panic("invalid retval from " 1045 "siop_sdtr_neg()"); 1046 } 1047 return 1; 1048 } 1049 /* send a message reject */ 1050 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT; 1051 esiop_cmd->cmd_tables->t_msgout.count = htole32(1); 1052 esiop_table_sync(esiop_cmd, 1053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1054 CALL_SCRIPT(Ent_send_msgout); 1055 return 1; 1056 case A_int_disc: 1057 INCSTAT(esiop_stat_intr_sdp); 1058 offset = bus_space_read_1(sc->sc_c.sc_rt, 1059 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1); 1060#ifdef SIOP_DEBUG_DR 1061 printf("disconnect offset %d\n", offset); 1062#endif 1063 siop_sdp(&esiop_cmd->cmd_c, offset); 1064 /* we start again with no offset */ 1065 ESIOP_XFER(esiop_cmd, saved_offset) = 1066 htole32(SIOP_NOOFFSET); 1067 esiop_table_sync(esiop_cmd, 1068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1069 CALL_SCRIPT(Ent_script_sched); 1070 return 1; 1071 case A_int_resfail: 1072 printf("reselect failed\n"); 1073 CALL_SCRIPT(Ent_script_sched); 1074 return 1; 1075 case A_int_done: 1076 if (xs == NULL) { 1077 printf("%s: done without command\n", 1078 device_xname(sc->sc_c.sc_dev)); 1079 CALL_SCRIPT(Ent_script_sched); 1080 return 1; 1081 } 1082#ifdef SIOP_DEBUG_INTR 1083 printf("done, DSA=0x%lx target id 0x%x last msg " 1084 "in=0x%x status=0x%x\n", 1085 (u_long)esiop_cmd->cmd_c.dsa, 1086 le32toh(esiop_cmd->cmd_tables->id), 1087 esiop_cmd->cmd_tables->msg_in[0], 1088 le32toh(esiop_cmd->cmd_tables->status)); 1089#endif 1090 INCSTAT(esiop_stat_intr_done); 1091 esiop_cmd->cmd_c.status = CMDST_DONE; 1092 goto end; 1093 default: 1094 printf("unknown irqcode %x\n", irqcode); 1095 if (xs) { 1096 xs->error = XS_SELTIMEOUT; 1097 goto end; 1098 } 1099 goto reset; 1100 } 1101 return 1; 1102 } 1103 /* 1104 * We just shouldn't get there, but on some KVM virtual hosts, 1105 * we do - see PR 48277. 1106 */ 1107 printf("esiop_intr: I shouldn't be there !\n"); 1108 return 1; 1109 1110end: 1111 /* 1112 * restart the script now if command completed properly 1113 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the 1114 * queue 1115 */ 1116 xs->status = le32toh(esiop_cmd->cmd_tables->status); 1117#ifdef SIOP_DEBUG_INTR 1118 printf("esiop_intr end: status %d\n", xs->status); 1119#endif 1120 if (tag >= 0) 1121 esiop_lun->tactive[tag] = NULL; 1122 else 1123 esiop_lun->active = NULL; 1124 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 1125 SIOP_SCRATCHA + 1); 1126 /* 1127 * if we got a disconnect between the last data phase 1128 * and the status phase, offset will be 0. In this 1129 * case, cmd_tables->saved_offset will have the proper value 1130 * if it got updated by the controller 1131 */ 1132 if (offset == 0 && 1133 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET)) 1134 offset = 1135 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff; 1136 1137 esiop_scsicmd_end(esiop_cmd, offset); 1138 if (freetarget && esiop_target->target_c.status == TARST_PROBING) 1139 esiop_del_dev(sc, target, lun); 1140 CALL_SCRIPT(Ent_script_sched); 1141 return 1; 1142} 1143 1144void 1145esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset) 1146{ 1147 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs; 1148 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc; 1149 1150 siop_update_resid(&esiop_cmd->cmd_c, offset); 1151 1152 switch(xs->status) { 1153 case SCSI_OK: 1154 xs->error = XS_NOERROR; 1155 break; 1156 case SCSI_BUSY: 1157 xs->error = XS_BUSY; 1158 break; 1159 case SCSI_CHECK: 1160 xs->error = XS_BUSY; 1161 /* remove commands in the queue and scheduler */ 1162 esiop_unqueue(sc, xs->xs_periph->periph_target, 1163 xs->xs_periph->periph_lun); 1164 break; 1165 case SCSI_QUEUE_FULL: 1166 INCSTAT(esiop_stat_intr_qfull); 1167#ifdef SIOP_DEBUG 1168 printf("%s:%d:%d: queue full (tag %d)\n", 1169 device_xname(sc->sc_c.sc_dev), 1170 xs->xs_periph->periph_target, 1171 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag); 1172#endif 1173 xs->error = XS_BUSY; 1174 break; 1175 case SCSI_SIOP_NOCHECK: 1176 /* 1177 * don't check status, xs->error is already valid 1178 */ 1179 break; 1180 case SCSI_SIOP_NOSTATUS: 1181 /* 1182 * the status byte was not updated, cmd was 1183 * aborted 1184 */ 1185 xs->error = XS_SELTIMEOUT; 1186 break; 1187 default: 1188 scsipi_printaddr(xs->xs_periph); 1189 printf("invalid status code %d\n", xs->status); 1190 xs->error = XS_DRIVER_STUFFUP; 1191 } 1192 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 1193 bus_dmamap_sync(sc->sc_c.sc_dmat, 1194 esiop_cmd->cmd_c.dmamap_data, 0, 1195 esiop_cmd->cmd_c.dmamap_data->dm_mapsize, 1196 (xs->xs_control & XS_CTL_DATA_IN) ? 1197 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1198 bus_dmamap_unload(sc->sc_c.sc_dmat, 1199 esiop_cmd->cmd_c.dmamap_data); 1200 } 1201 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd); 1202 if ((xs->xs_control & XS_CTL_POLL) == 0) 1203 callout_stop(&xs->xs_callout); 1204 esiop_cmd->cmd_c.status = CMDST_FREE; 1205 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next); 1206#if 0 1207 if (xs->resid != 0) 1208 printf("resid %d datalen %d\n", xs->resid, xs->datalen); 1209#endif 1210 scsipi_done (xs); 1211} 1212 1213void 1214esiop_checkdone(struct esiop_softc *sc) 1215{ 1216 int target, lun, tag; 1217 struct esiop_target *esiop_target; 1218 struct esiop_lun *esiop_lun; 1219 struct esiop_cmd *esiop_cmd; 1220 uint32_t slot; 1221 int needsync = 0; 1222 int status; 1223 uint32_t sem, offset; 1224 1225 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1226 sem = esiop_script_read(sc, sc->sc_semoffset); 1227 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done); 1228 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) { 1229 /* 1230 * at last one command have been started, 1231 * so we should have free slots now 1232 */ 1233 sc->sc_flags &= ~SCF_CHAN_NOSLOT; 1234 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1); 1235 } 1236 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1237 1238 if ((sem & A_sem_done) == 0) { 1239 /* no pending done command */ 1240 return; 1241 } 1242 1243 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map, 1244 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t), 1245 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1246next: 1247 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) { 1248 if (needsync) 1249 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map, 1250 sc->sc_done_offset, 1251 A_ndone_slots * sizeof(uint32_t), 1252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1253 return; 1254 } 1255 1256 needsync = 1; 1257 1258 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]); 1259 sc->sc_done_slot[sc->sc_currdoneslot] = 0; 1260 sc->sc_currdoneslot += 1; 1261 if (sc->sc_currdoneslot == A_ndone_slots) 1262 sc->sc_currdoneslot = 0; 1263 1264 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1; 1265 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1; 1266 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1; 1267 1268 esiop_target = (target >= 0) ? 1269 (struct esiop_target *)sc->sc_c.targets[target] : NULL; 1270 if (esiop_target == NULL) { 1271 printf("esiop_target (target %d) not valid\n", target); 1272 goto next; 1273 } 1274 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL; 1275 if (esiop_lun == NULL) { 1276 printf("esiop_lun (target %d lun %d) not valid\n", 1277 target, lun); 1278 goto next; 1279 } 1280 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active; 1281 if (esiop_cmd == NULL) { 1282 printf("esiop_cmd (target %d lun %d tag %d) not valid\n", 1283 target, lun, tag); 1284 goto next; 1285 } 1286 1287 esiop_table_sync(esiop_cmd, 1288 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1289 status = le32toh(esiop_cmd->cmd_tables->status); 1290#ifdef DIAGNOSTIC 1291 if (status != SCSI_OK) { 1292 printf("command for T/L/Q %d/%d/%d status %d\n", 1293 target, lun, tag, status); 1294 goto next; 1295 } 1296 1297#endif 1298 /* Ok, this command has been handled */ 1299 esiop_cmd->cmd_c.xs->status = status; 1300 if (tag >= 0) 1301 esiop_lun->tactive[tag] = NULL; 1302 else 1303 esiop_lun->active = NULL; 1304 /* 1305 * scratcha was eventually saved in saved_offset by script. 1306 * fetch offset from it 1307 */ 1308 offset = 0; 1309 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET)) 1310 offset = 1311 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff; 1312 esiop_scsicmd_end(esiop_cmd, offset); 1313 goto next; 1314} 1315 1316void 1317esiop_unqueue(struct esiop_softc *sc, int target, int lun) 1318{ 1319 int slot, tag; 1320 uint32_t slotdsa; 1321 struct esiop_cmd *esiop_cmd; 1322 struct esiop_lun *esiop_lun = 1323 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun]; 1324 1325 /* first make sure to read valid data */ 1326 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1327 1328 for (tag = 0; tag < ESIOP_NTAG; tag++) { 1329 /* look for commands in the scheduler, not yet started */ 1330 if (esiop_lun->tactive[tag] == NULL) 1331 continue; 1332 esiop_cmd = esiop_lun->tactive[tag]; 1333 for (slot = 0; slot < A_ncmd_slots; slot++) { 1334 slotdsa = esiop_script_read(sc, 1335 sc->sc_shedoffset + slot * CMD_SLOTSIZE); 1336 /* if the slot has any flag, it won't match the DSA */ 1337 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */ 1338 /* Mark this slot as ignore */ 1339 esiop_script_write(sc, 1340 sc->sc_shedoffset + slot * CMD_SLOTSIZE, 1341 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore); 1342 /* ask to requeue */ 1343 esiop_cmd->cmd_c.xs->error = XS_REQUEUE; 1344 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1345 esiop_lun->tactive[tag] = NULL; 1346 esiop_scsicmd_end(esiop_cmd, 0); 1347 break; 1348 } 1349 } 1350 } 1351 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1352} 1353 1354/* 1355 * handle a rejected queue tag message: the command will run untagged, 1356 * has to adjust the reselect script. 1357 */ 1358 1359 1360int 1361esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd) 1362{ 1363 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc; 1364 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target; 1365 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun; 1366 int tag = esiop_cmd->cmd_tables->msg_out[2]; 1367 struct esiop_target *esiop_target = 1368 (struct esiop_target*)sc->sc_c.targets[target]; 1369 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun]; 1370 1371#ifdef SIOP_DEBUG 1372 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n", 1373 device_xname(sc->sc_c.sc_dev), target, lun, tag, 1374 esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status); 1375#endif 1376 1377 if (esiop_lun->active != NULL) { 1378 aprint_error_dev(sc->sc_c.sc_dev, 1379 "untagged command already running for target %d " 1380 "lun %d (status %d)\n", 1381 target, lun, esiop_lun->active->cmd_c.status); 1382 return -1; 1383 } 1384 /* clear tag slot */ 1385 esiop_lun->tactive[tag] = NULL; 1386 /* add command to non-tagged slot */ 1387 esiop_lun->active = esiop_cmd; 1388 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG; 1389 esiop_cmd->cmd_c.tag = -1; 1390 /* update DSA table */ 1391 esiop_script_write(sc, esiop_target->lun_table_offset + 1392 lun * 2 + A_target_luntbl / sizeof(uint32_t), 1393 esiop_cmd->cmd_c.dsa); 1394 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1395 return 0; 1396} 1397 1398/* 1399 * handle a bus reset: reset chip, unqueue all active commands, free all 1400 * target struct and report lossage to upper layer. 1401 * As the upper layer may requeue immediately we have to first store 1402 * all active commands in a temporary queue. 1403 */ 1404void 1405esiop_handle_reset(struct esiop_softc *sc) 1406{ 1407 struct esiop_cmd *esiop_cmd; 1408 struct esiop_lun *esiop_lun; 1409 int target, lun, tag; 1410 /* 1411 * scsi bus reset. reset the chip and restart 1412 * the queue. Need to clean up all active commands 1413 */ 1414 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev)); 1415 /* stop, reset and restart the chip */ 1416 esiop_reset(sc); 1417 1418 if (sc->sc_flags & SCF_CHAN_NOSLOT) { 1419 /* chip has been reset, all slots are free now */ 1420 sc->sc_flags &= ~SCF_CHAN_NOSLOT; 1421 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1); 1422 } 1423 /* 1424 * Process all commands: first commands completes, then commands 1425 * being executed 1426 */ 1427 esiop_checkdone(sc); 1428 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) { 1429 struct esiop_target *esiop_target = 1430 (struct esiop_target *)sc->sc_c.targets[target]; 1431 if (esiop_target == NULL) 1432 continue; 1433 for (lun = 0; lun < 8; lun++) { 1434 esiop_lun = esiop_target->esiop_lun[lun]; 1435 if (esiop_lun == NULL) 1436 continue; 1437 for (tag = -1; tag < 1438 ((sc->sc_c.targets[target]->flags & TARF_TAG) ? 1439 ESIOP_NTAG : 0); 1440 tag++) { 1441 if (tag >= 0) 1442 esiop_cmd = esiop_lun->tactive[tag]; 1443 else 1444 esiop_cmd = esiop_lun->active; 1445 if (esiop_cmd == NULL) 1446 continue; 1447 scsipi_printaddr( 1448 esiop_cmd->cmd_c.xs->xs_periph); 1449 printf("command with tag id %d reset\n", tag); 1450 esiop_cmd->cmd_c.xs->error = 1451 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ? 1452 XS_TIMEOUT : XS_RESET; 1453 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1454 if (tag >= 0) 1455 esiop_lun->tactive[tag] = NULL; 1456 else 1457 esiop_lun->active = NULL; 1458 esiop_cmd->cmd_c.status = CMDST_DONE; 1459 esiop_scsicmd_end(esiop_cmd, 0); 1460 } 1461 } 1462 sc->sc_c.targets[target]->status = TARST_ASYNC; 1463 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT); 1464 sc->sc_c.targets[target]->period = 1465 sc->sc_c.targets[target]->offset = 0; 1466 siop_update_xfer_mode(&sc->sc_c, target); 1467 } 1468 1469 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL); 1470} 1471 1472void 1473esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1474 void *arg) 1475{ 1476 struct scsipi_xfer *xs; 1477 struct scsipi_periph *periph; 1478 struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev); 1479 struct esiop_cmd *esiop_cmd; 1480 struct esiop_target *esiop_target; 1481 int s, error, i; 1482 int target; 1483 int lun; 1484 1485 switch (req) { 1486 case ADAPTER_REQ_RUN_XFER: 1487 xs = arg; 1488 periph = xs->xs_periph; 1489 target = periph->periph_target; 1490 lun = periph->periph_lun; 1491 1492 s = splbio(); 1493 /* 1494 * first check if there are pending complete commands. 1495 * this can free us some resources (in the rings for example). 1496 * we have to lock it to avoid recursion. 1497 */ 1498 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) { 1499 sc->sc_flags |= SCF_CHAN_ADAPTREQ; 1500 esiop_checkdone(sc); 1501 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ; 1502 } 1503#ifdef SIOP_DEBUG_SCHED 1504 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun, 1505 xs->xs_tag_type, xs->xs_tag_id); 1506#endif 1507 esiop_cmd = TAILQ_FIRST(&sc->free_list); 1508 if (esiop_cmd == NULL) { 1509 xs->error = XS_RESOURCE_SHORTAGE; 1510 scsipi_done(xs); 1511 splx(s); 1512 return; 1513 } 1514 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next); 1515#ifdef DIAGNOSTIC 1516 if (esiop_cmd->cmd_c.status != CMDST_FREE) 1517 panic("siop_scsicmd: new cmd not free"); 1518#endif 1519 esiop_target = (struct esiop_target*)sc->sc_c.targets[target]; 1520 if (esiop_target == NULL) { 1521#ifdef SIOP_DEBUG 1522 printf("%s: alloc siop_target for target %d\n", 1523 device_xname(sc->sc_c.sc_dev), target); 1524#endif 1525 sc->sc_c.targets[target] = 1526 malloc(sizeof(struct esiop_target), 1527 M_DEVBUF, M_NOWAIT | M_ZERO); 1528 if (sc->sc_c.targets[target] == NULL) { 1529 aprint_error_dev(sc->sc_c.sc_dev, 1530 "can't malloc memory for " 1531 "target %d\n", 1532 target); 1533 xs->error = XS_RESOURCE_SHORTAGE; 1534 scsipi_done(xs); 1535 TAILQ_INSERT_TAIL(&sc->free_list, 1536 esiop_cmd, next); 1537 splx(s); 1538 return; 1539 } 1540 esiop_target = 1541 (struct esiop_target*)sc->sc_c.targets[target]; 1542 esiop_target->target_c.status = TARST_PROBING; 1543 esiop_target->target_c.flags = 0; 1544 esiop_target->target_c.id = 1545 sc->sc_c.clock_div << 24; /* scntl3 */ 1546 esiop_target->target_c.id |= target << 16; /* id */ 1547 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */ 1548 1549 for (i=0; i < 8; i++) 1550 esiop_target->esiop_lun[i] = NULL; 1551 esiop_target_register(sc, target); 1552 } 1553 if (esiop_target->esiop_lun[lun] == NULL) { 1554 esiop_target->esiop_lun[lun] = 1555 malloc(sizeof(struct esiop_lun), M_DEVBUF, 1556 M_NOWAIT|M_ZERO); 1557 if (esiop_target->esiop_lun[lun] == NULL) { 1558 aprint_error_dev(sc->sc_c.sc_dev, 1559 "can't alloc esiop_lun for " 1560 "target %d lun %d\n", 1561 target, lun); 1562 xs->error = XS_RESOURCE_SHORTAGE; 1563 scsipi_done(xs); 1564 TAILQ_INSERT_TAIL(&sc->free_list, 1565 esiop_cmd, next); 1566 splx(s); 1567 return; 1568 } 1569 } 1570 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target]; 1571 esiop_cmd->cmd_c.xs = xs; 1572 esiop_cmd->cmd_c.flags = 0; 1573 esiop_cmd->cmd_c.status = CMDST_READY; 1574 1575 /* load the DMA maps */ 1576 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1577 esiop_cmd->cmd_c.dmamap_cmd, 1578 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT); 1579 if (error) { 1580 aprint_error_dev(sc->sc_c.sc_dev, 1581 "unable to load cmd DMA map: %d\n", 1582 error); 1583 xs->error = (error == EAGAIN) ? 1584 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP; 1585 scsipi_done(xs); 1586 esiop_cmd->cmd_c.status = CMDST_FREE; 1587 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next); 1588 splx(s); 1589 return; 1590 } 1591 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 1592 error = bus_dmamap_load(sc->sc_c.sc_dmat, 1593 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen, 1594 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1595 ((xs->xs_control & XS_CTL_DATA_IN) ? 1596 BUS_DMA_READ : BUS_DMA_WRITE)); 1597 if (error) { 1598 aprint_error_dev(sc->sc_c.sc_dev, 1599 "unable to load data DMA map: %d\n", 1600 error); 1601 xs->error = (error == EAGAIN) ? 1602 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP; 1603 scsipi_done(xs); 1604 bus_dmamap_unload(sc->sc_c.sc_dmat, 1605 esiop_cmd->cmd_c.dmamap_cmd); 1606 esiop_cmd->cmd_c.status = CMDST_FREE; 1607 TAILQ_INSERT_TAIL(&sc->free_list, 1608 esiop_cmd, next); 1609 splx(s); 1610 return; 1611 } 1612 bus_dmamap_sync(sc->sc_c.sc_dmat, 1613 esiop_cmd->cmd_c.dmamap_data, 0, 1614 esiop_cmd->cmd_c.dmamap_data->dm_mapsize, 1615 (xs->xs_control & XS_CTL_DATA_IN) ? 1616 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1617 } 1618 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd, 1619 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize, 1620 BUS_DMASYNC_PREWRITE); 1621 1622 if (xs->xs_tag_type) 1623 esiop_cmd->cmd_c.tag = xs->xs_tag_id; 1624 else 1625 esiop_cmd->cmd_c.tag = -1; 1626 siop_setuptables(&esiop_cmd->cmd_c); 1627 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET); 1628 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun); 1629 ESIOP_XFER(esiop_cmd, tlq) |= 1630 htole32((target << 8) | (lun << 16)); 1631 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) { 1632 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag); 1633 ESIOP_XFER(esiop_cmd, tlq) |= 1634 htole32(esiop_cmd->cmd_c.tag << 24); 1635 } 1636 1637 esiop_table_sync(esiop_cmd, 1638 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1639 esiop_start(sc, esiop_cmd); 1640 if (xs->xs_control & XS_CTL_POLL) { 1641 /* poll for command completion */ 1642 while ((xs->xs_status & XS_STS_DONE) == 0) { 1643 delay(1000); 1644 esiop_intr(sc); 1645 } 1646 } 1647 splx(s); 1648 return; 1649 1650 case ADAPTER_REQ_GROW_RESOURCES: 1651#ifdef SIOP_DEBUG 1652 printf("%s grow resources (%d)\n", 1653 device_xname(sc->sc_c.sc_dev), 1654 sc->sc_c.sc_adapt.adapt_openings); 1655#endif 1656 esiop_morecbd(sc); 1657 return; 1658 1659 case ADAPTER_REQ_SET_XFER_MODE: 1660 { 1661 struct scsipi_xfer_mode *xm = arg; 1662 if (sc->sc_c.targets[xm->xm_target] == NULL) 1663 return; 1664 s = splbio(); 1665 if (xm->xm_mode & PERIPH_CAP_TQING) { 1666 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG; 1667 /* allocate tag tables for this device */ 1668 for (lun = 0; 1669 lun < sc->sc_c.sc_chan.chan_nluns; lun++) { 1670 if (scsipi_lookup_periph(chan, 1671 xm->xm_target, lun) != NULL) 1672 esiop_add_dev(sc, xm->xm_target, lun); 1673 } 1674 } 1675 if ((xm->xm_mode & PERIPH_CAP_WIDE16) && 1676 (sc->sc_c.features & SF_BUS_WIDE)) 1677 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE; 1678 if (xm->xm_mode & PERIPH_CAP_SYNC) 1679 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC; 1680 if ((xm->xm_mode & PERIPH_CAP_DT) && 1681 (sc->sc_c.features & SF_CHIP_DT)) 1682 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT; 1683 if ((xm->xm_mode & 1684 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) || 1685 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING) 1686 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC; 1687 1688 splx(s); 1689 } 1690 } 1691} 1692 1693static void 1694esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd) 1695{ 1696 struct esiop_lun *esiop_lun; 1697 struct esiop_target *esiop_target; 1698 int timeout; 1699 int target, lun, slot; 1700 1701 /* 1702 * first make sure to read valid data 1703 */ 1704 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1705 1706 /* 1707 * We use a circular queue here. sc->sc_currschedslot points to a 1708 * free slot, unless we have filled the queue. Check this. 1709 */ 1710 slot = sc->sc_currschedslot; 1711 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) & 1712 A_f_cmd_free) == 0) { 1713 /* 1714 * no more free slot, no need to continue. freeze the queue 1715 * and requeue this command. 1716 */ 1717 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1); 1718 sc->sc_flags |= SCF_CHAN_NOSLOT; 1719 esiop_script_write(sc, sc->sc_semoffset, 1720 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start); 1721 esiop_script_sync(sc, 1722 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1723 esiop_cmd->cmd_c.xs->error = XS_REQUEUE; 1724 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK; 1725 esiop_scsicmd_end(esiop_cmd, 0); 1726 return; 1727 } 1728 /* OK, we can use this slot */ 1729 1730 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target; 1731 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun; 1732 esiop_target = (struct esiop_target*)sc->sc_c.targets[target]; 1733 esiop_lun = esiop_target->esiop_lun[lun]; 1734 /* if non-tagged command active, panic: this shouldn't happen */ 1735 if (esiop_lun->active != NULL) { 1736 panic("esiop_start: tagged cmd while untagged running"); 1737 } 1738#ifdef DIAGNOSTIC 1739 /* sanity check the tag if needed */ 1740 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) { 1741 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG || 1742 esiop_cmd->cmd_c.tag < 0) { 1743 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph); 1744 printf(": tag id %d\n", esiop_cmd->cmd_c.tag); 1745 panic("esiop_start: invalid tag id"); 1746 } 1747 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL) 1748 panic("esiop_start: tag not free"); 1749 } 1750#endif 1751#ifdef SIOP_DEBUG_SCHED 1752 printf("using slot %d for DSA 0x%lx\n", slot, 1753 (u_long)esiop_cmd->cmd_c.dsa); 1754#endif 1755 /* mark command as active */ 1756 if (esiop_cmd->cmd_c.status == CMDST_READY) 1757 esiop_cmd->cmd_c.status = CMDST_ACTIVE; 1758 else 1759 panic("esiop_start: bad status"); 1760 /* DSA table for reselect */ 1761 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) { 1762 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd; 1763 /* DSA table for reselect */ 1764 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] = 1765 htole32(esiop_cmd->cmd_c.dsa); 1766 bus_dmamap_sync(sc->sc_c.sc_dmat, 1767 esiop_lun->lun_tagtbl->tblblk->blkmap, 1768 esiop_lun->lun_tagtbl->tbl_offset, 1769 sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE); 1770 } else { 1771 esiop_lun->active = esiop_cmd; 1772 esiop_script_write(sc, 1773 esiop_target->lun_table_offset + 1774 lun * 2 + A_target_luntbl / sizeof(uint32_t), 1775 esiop_cmd->cmd_c.dsa); 1776 } 1777 /* scheduler slot: DSA */ 1778 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE, 1779 esiop_cmd->cmd_c.dsa); 1780 /* make sure SCRIPT processor will read valid data */ 1781 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1782 /* handle timeout */ 1783 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) { 1784 /* start exire timer */ 1785 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout); 1786 if (timeout == 0) 1787 timeout = 1; 1788 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout, 1789 timeout, esiop_timeout, esiop_cmd); 1790 } 1791 /* Signal script it has some work to do */ 1792 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, 1793 SIOP_ISTAT, ISTAT_SIGP); 1794 /* update the current slot, and wait for IRQ */ 1795 sc->sc_currschedslot++; 1796 if (sc->sc_currschedslot >= A_ncmd_slots) 1797 sc->sc_currschedslot = 0; 1798} 1799 1800void 1801esiop_timeout(void *v) 1802{ 1803 struct esiop_cmd *esiop_cmd = v; 1804 struct esiop_softc *sc = 1805 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc; 1806 int s; 1807#ifdef SIOP_DEBUG 1808 int slot, slotdsa; 1809#endif 1810 1811 s = splbio(); 1812 esiop_table_sync(esiop_cmd, 1813 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1814 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph); 1815#ifdef SIOP_DEBUG 1816 printf("command timeout (status %d)\n", 1817 le32toh(esiop_cmd->cmd_tables->status)); 1818 1819 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1820 for (slot = 0; slot < A_ncmd_slots; slot++) { 1821 slotdsa = esiop_script_read(sc, 1822 sc->sc_shedoffset + slot * CMD_SLOTSIZE); 1823 if ((slotdsa & 0x01) == 0) 1824 printf("slot %d not free (0x%x)\n", slot, slotdsa); 1825 } 1826 printf("istat 0x%x ", 1827 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT)); 1828 printf("DSP 0x%lx DSA 0x%x\n", 1829 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) 1830 - sc->sc_c.sc_scriptaddr), 1831 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA)); 1832 (void)bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2); 1833 printf("istat 0x%x\n", 1834 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT)); 1835#else 1836 printf("command timeout, CDB: "); 1837 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd); 1838 printf("\n"); 1839#endif 1840 /* reset the scsi bus */ 1841 siop_resetbus(&sc->sc_c); 1842 1843 /* deactivate callout */ 1844 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout); 1845 /* 1846 * mark command has being timed out and just return; 1847 * the bus reset will generate an interrupt, 1848 * it will be handled in siop_intr() 1849 */ 1850 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT; 1851 splx(s); 1852} 1853 1854void 1855esiop_dump_script(struct esiop_softc *sc) 1856{ 1857 int i; 1858 1859 for (i = 0; i < PAGE_SIZE / 4; i += 2) { 1860 printf("0x%04x: 0x%08x 0x%08x", i * 4, 1861 esiop_script_read(sc, i), 1862 esiop_script_read(sc, i + 1)); 1863 if ((esiop_script_read(sc, i) & 0xe0000000) == 0xc0000000) { 1864 i++; 1865 printf(" 0x%08x", esiop_script_read(sc, i + 1)); 1866 } 1867 printf("\n"); 1868 } 1869} 1870 1871void 1872esiop_morecbd(struct esiop_softc *sc) 1873{ 1874 int error, i, s; 1875 bus_dma_segment_t seg; 1876 int rseg; 1877 struct esiop_cbd *newcbd; 1878 struct esiop_xfer *xfer; 1879 bus_addr_t dsa; 1880 1881 /* allocate a new list head */ 1882 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO); 1883 if (newcbd == NULL) { 1884 aprint_error_dev(sc->sc_c.sc_dev, 1885 "can't allocate memory for command descriptors " 1886 "head\n"); 1887 return; 1888 } 1889 1890 /* allocate cmd list */ 1891 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB, 1892 M_DEVBUF, M_NOWAIT|M_ZERO); 1893 if (newcbd->cmds == NULL) { 1894 aprint_error_dev(sc->sc_c.sc_dev, 1895 "can't allocate memory for command descriptors\n"); 1896 goto bad3; 1897 } 1898 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 1899 &seg, 1, &rseg, BUS_DMA_NOWAIT); 1900 if (error) { 1901 aprint_error_dev(sc->sc_c.sc_dev, 1902 "unable to allocate cbd DMA memory, error = %d\n", 1903 error); 1904 goto bad2; 1905 } 1906 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE, 1907 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 1908 if (error) { 1909 aprint_error_dev(sc->sc_c.sc_dev, 1910 "unable to map cbd DMA memory, error = %d\n", 1911 error); 1912 goto bad2; 1913 } 1914 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 1915 BUS_DMA_NOWAIT, &newcbd->xferdma); 1916 if (error) { 1917 aprint_error_dev(sc->sc_c.sc_dev, 1918 "unable to create cbd DMA map, error = %d\n", error); 1919 goto bad1; 1920 } 1921 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, 1922 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 1923 if (error) { 1924 aprint_error_dev(sc->sc_c.sc_dev, 1925 "unable to load cbd DMA map, error = %d\n", error); 1926 goto bad0; 1927 } 1928#ifdef SIOP_DEBUG 1929 aprint_debug_dev(sc->sc_c.sc_dev, "alloc newcdb at PHY addr 0x%lx\n", 1930 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr); 1931#endif 1932 for (i = 0; i < SIOP_NCMDPB; i++) { 1933 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG, 1934 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1935 &newcbd->cmds[i].cmd_c.dmamap_data); 1936 if (error) { 1937 aprint_error_dev(sc->sc_c.sc_dev, 1938 "unable to create data DMA map for cbd: " 1939 "error %d\n", error); 1940 goto bad0; 1941 } 1942 error = bus_dmamap_create(sc->sc_c.sc_dmat, 1943 sizeof(struct scsipi_generic), 1, 1944 sizeof(struct scsipi_generic), 0, 1945 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1946 &newcbd->cmds[i].cmd_c.dmamap_cmd); 1947 if (error) { 1948 aprint_error_dev(sc->sc_c.sc_dev, 1949 "unable to create cmd DMA map for cbd %d\n", error); 1950 goto bad0; 1951 } 1952 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c; 1953 newcbd->cmds[i].esiop_cbdp = newcbd; 1954 xfer = &newcbd->xfers[i]; 1955 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer; 1956 memset(newcbd->cmds[i].cmd_tables, 0, 1957 sizeof(struct esiop_xfer)); 1958 dsa = newcbd->xferdma->dm_segs[0].ds_addr + 1959 i * sizeof(struct esiop_xfer); 1960 newcbd->cmds[i].cmd_c.dsa = dsa; 1961 newcbd->cmds[i].cmd_c.status = CMDST_FREE; 1962 xfer->siop_tables.t_msgout.count= htole32(1); 1963 xfer->siop_tables.t_msgout.addr = htole32(dsa); 1964 xfer->siop_tables.t_msgin.count= htole32(1); 1965 xfer->siop_tables.t_msgin.addr = htole32(dsa + 1966 offsetof(struct siop_common_xfer, msg_in)); 1967 xfer->siop_tables.t_extmsgin.count= htole32(2); 1968 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 1969 offsetof(struct siop_common_xfer, msg_in) + 1); 1970 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 1971 offsetof(struct siop_common_xfer, msg_in) + 3); 1972 xfer->siop_tables.t_status.count= htole32(1); 1973 xfer->siop_tables.t_status.addr = htole32(dsa + 1974 offsetof(struct siop_common_xfer, status)); 1975 1976 s = splbio(); 1977 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next); 1978 splx(s); 1979#ifdef SIOP_DEBUG 1980 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i, 1981 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr), 1982 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr), 1983 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr)); 1984#endif 1985 } 1986 s = splbio(); 1987 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next); 1988 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB; 1989 splx(s); 1990 return; 1991bad0: 1992 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma); 1993 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma); 1994bad1: 1995 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg); 1996bad2: 1997 free(newcbd->cmds, M_DEVBUF); 1998bad3: 1999 free(newcbd, M_DEVBUF); 2000} 2001 2002void 2003esiop_moretagtbl(struct esiop_softc *sc) 2004{ 2005 int error, i, j, s; 2006 bus_dma_segment_t seg; 2007 int rseg; 2008 struct esiop_dsatblblk *newtblblk; 2009 struct esiop_dsatbl *newtbls; 2010 uint32_t *tbls; 2011 2012 /* allocate a new list head */ 2013 newtblblk = malloc(sizeof(struct esiop_dsatblblk), 2014 M_DEVBUF, M_NOWAIT|M_ZERO); 2015 if (newtblblk == NULL) { 2016 aprint_error_dev(sc->sc_c.sc_dev, 2017 "can't allocate memory for tag DSA table block\n"); 2018 return; 2019 } 2020 2021 /* allocate tbl list */ 2022 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB, 2023 M_DEVBUF, M_NOWAIT|M_ZERO); 2024 if (newtbls == NULL) { 2025 aprint_error_dev(sc->sc_c.sc_dev, 2026 "can't allocate memory for command descriptors\n"); 2027 goto bad3; 2028 } 2029 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 2030 &seg, 1, &rseg, BUS_DMA_NOWAIT); 2031 if (error) { 2032 aprint_error_dev(sc->sc_c.sc_dev, 2033 "unable to allocate tbl DMA memory, error = %d\n", error); 2034 goto bad2; 2035 } 2036 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE, 2037 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 2038 if (error) { 2039 aprint_error_dev(sc->sc_c.sc_dev, 2040 "unable to map tbls DMA memory, error = %d\n", error); 2041 goto bad2; 2042 } 2043 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 2044 BUS_DMA_NOWAIT, &newtblblk->blkmap); 2045 if (error) { 2046 aprint_error_dev(sc->sc_c.sc_dev, 2047 "unable to create tbl DMA map, error = %d\n", error); 2048 goto bad1; 2049 } 2050 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap, 2051 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 2052 if (error) { 2053 aprint_error_dev(sc->sc_c.sc_dev, 2054 "unable to load tbl DMA map, error = %d\n", error); 2055 goto bad0; 2056 } 2057#ifdef SIOP_DEBUG 2058 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n", 2059 device_xname(sc->sc_c.sc_dev), 2060 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr); 2061#endif 2062 for (i = 0; i < ESIOP_NTPB; i++) { 2063 newtbls[i].tblblk = newtblblk; 2064 newtbls[i].tbl = &tbls[i * ESIOP_NTAG]; 2065 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t); 2066 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr + 2067 newtbls[i].tbl_offset; 2068 for (j = 0; j < ESIOP_NTAG; j++) 2069 newtbls[i].tbl[j] = j; 2070 s = splbio(); 2071 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next); 2072 splx(s); 2073 } 2074 s = splbio(); 2075 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next); 2076 splx(s); 2077 return; 2078bad0: 2079 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap); 2080 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap); 2081bad1: 2082 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg); 2083bad2: 2084 free(newtbls, M_DEVBUF); 2085bad3: 2086 free(newtblblk, M_DEVBUF); 2087} 2088 2089void 2090esiop_update_scntl3(struct esiop_softc *sc, 2091 struct siop_common_target *_siop_target) 2092{ 2093 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target; 2094 2095 esiop_script_write(sc, esiop_target->lun_table_offset, 2096 esiop_target->target_c.id); 2097 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2098} 2099 2100void 2101esiop_add_dev(struct esiop_softc *sc, int target, int lun) 2102{ 2103 struct esiop_target *esiop_target = 2104 (struct esiop_target *)sc->sc_c.targets[target]; 2105 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun]; 2106 2107 if (esiop_lun->lun_tagtbl != NULL) 2108 return; /* already allocated */ 2109 2110 /* we need a tag DSA table */ 2111 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl); 2112 if (esiop_lun->lun_tagtbl == NULL) { 2113 esiop_moretagtbl(sc); 2114 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl); 2115 if (esiop_lun->lun_tagtbl == NULL) { 2116 /* no resources, run untagged */ 2117 esiop_target->target_c.flags &= ~TARF_TAG; 2118 return; 2119 } 2120 } 2121 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next); 2122 /* Update LUN DSA table */ 2123 esiop_script_write(sc, esiop_target->lun_table_offset + 2124 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t), 2125 esiop_lun->lun_tagtbl->tbl_dsa); 2126 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2127} 2128 2129void 2130esiop_del_dev(struct esiop_softc *sc, int target, int lun) 2131{ 2132 struct esiop_target *esiop_target; 2133 2134#ifdef SIOP_DEBUG 2135 printf("%s:%d:%d: free lun sw entry\n", 2136 device_xname(sc->sc_c.sc_dev), target, lun); 2137#endif 2138 if (sc->sc_c.targets[target] == NULL) 2139 return; 2140 esiop_target = (struct esiop_target *)sc->sc_c.targets[target]; 2141 free(esiop_target->esiop_lun[lun], M_DEVBUF); 2142 esiop_target->esiop_lun[lun] = NULL; 2143} 2144 2145void 2146esiop_target_register(struct esiop_softc *sc, uint32_t target) 2147{ 2148 struct esiop_target *esiop_target = 2149 (struct esiop_target *)sc->sc_c.targets[target]; 2150 struct esiop_lun *esiop_lun; 2151 int lun; 2152 2153 /* get a DSA table for this target */ 2154 esiop_target->lun_table_offset = sc->sc_free_offset; 2155 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2; 2156#ifdef SIOP_DEBUG 2157 printf("%s: lun table for target %d offset %d free offset %d\n", 2158 device_xname(sc->sc_c.sc_dev), target, 2159 esiop_target->lun_table_offset, 2160 sc->sc_free_offset); 2161#endif 2162 /* first 32 bytes are ID (for select) */ 2163 esiop_script_write(sc, esiop_target->lun_table_offset, 2164 esiop_target->target_c.id); 2165 /* Record this table in the target DSA table */ 2166 esiop_script_write(sc, 2167 sc->sc_target_table_offset + target, 2168 (esiop_target->lun_table_offset * sizeof(uint32_t)) + 2169 sc->sc_c.sc_scriptaddr); 2170 /* if we have a tag table, register it */ 2171 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) { 2172 esiop_lun = esiop_target->esiop_lun[lun]; 2173 if (esiop_lun == NULL) 2174 continue; 2175 if (esiop_lun->lun_tagtbl) 2176 esiop_script_write(sc, esiop_target->lun_table_offset + 2177 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t), 2178 esiop_lun->lun_tagtbl->tbl_dsa); 2179 } 2180 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2181} 2182 2183#ifdef SIOP_STATS 2184void 2185esiop_printstats(void) 2186{ 2187 2188 printf("esiop_stat_intr %d\n", esiop_stat_intr); 2189 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer); 2190 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc); 2191 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp); 2192 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done); 2193 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel); 2194 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull); 2195} 2196#endif 2197