icp.c revision 1.15
1/* $NetBSD: icp.c,v 1.15 2005/08/25 18:35:39 drochner Exp $ */ 2 3/*- 4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39/* 40 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by Niklas Hallqvist. 53 * 4. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 * 67 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp 68 */ 69 70/* 71 * This driver would not have written if it was not for the hardware donations 72 * from both ICP-Vortex and �ko.neT. I want to thank them for their support. 73 * 74 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by 75 * Intel. 76 * 77 * Support for the ICP-Vortex management tools added by 78 * Jason R. Thorpe of Wasabi Systems, Inc., based on code 79 * provided by Achim Leubner <achim.leubner@intel.com>. 80 * 81 * Additional support for dynamic rescan of cacheservice drives by 82 * Jason R. Thorpe of Wasabi Systems, Inc. 83 */ 84 85#include <sys/cdefs.h> 86__KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.15 2005/08/25 18:35:39 drochner Exp $"); 87 88#include <sys/param.h> 89#include <sys/systm.h> 90#include <sys/kernel.h> 91#include <sys/device.h> 92#include <sys/queue.h> 93#include <sys/proc.h> 94#include <sys/buf.h> 95#include <sys/endian.h> 96#include <sys/malloc.h> 97#include <sys/disk.h> 98 99#include <uvm/uvm_extern.h> 100 101#include <machine/bswap.h> 102#include <machine/bus.h> 103 104#include <dev/pci/pcireg.h> 105#include <dev/pci/pcivar.h> 106#include <dev/pci/pcidevs.h> 107 108#include <dev/ic/icpreg.h> 109#include <dev/ic/icpvar.h> 110 111#include <dev/scsipi/scsipi_all.h> 112#include <dev/scsipi/scsiconf.h> 113 114#include "locators.h" 115 116int icp_async_event(struct icp_softc *, int); 117void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic); 118void icp_chain(struct icp_softc *); 119int icp_print(void *, const char *); 120int icp_submatch(struct device *, struct cfdata *, 121 const locdesc_t *, void *); 122void icp_watchdog(void *); 123void icp_ucmd_intr(struct icp_ccb *); 124void icp_recompute_openings(struct icp_softc *); 125 126int icp_count; /* total # of controllers, for ioctl interface */ 127 128/* 129 * Statistics for the ioctl interface to query. 130 * 131 * XXX Global. They should probably be made per-controller 132 * XXX at some point. 133 */ 134gdt_statist_t icp_stats; 135 136int 137icp_init(struct icp_softc *icp, const char *intrstr) 138{ 139 struct icp_attach_args icpa; 140 struct icp_binfo binfo; 141 struct icp_ccb *ic; 142 u_int16_t cdev_cnt; 143 int i, j, state, feat, nsegs, rv; 144 int locs[ICPCF_NLOCS]; 145 146 state = 0; 147 148 if (intrstr != NULL) 149 aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname, 150 intrstr); 151 152 SIMPLEQ_INIT(&icp->icp_ccb_queue); 153 SIMPLEQ_INIT(&icp->icp_ccb_freelist); 154 SIMPLEQ_INIT(&icp->icp_ucmd_queue); 155 callout_init(&icp->icp_wdog_callout); 156 157 /* 158 * Allocate a scratch area. 159 */ 160 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1, 161 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 162 &icp->icp_scr_dmamap) != 0) { 163 aprint_error("%s: cannot create scratch dmamap\n", 164 icp->icp_dv.dv_xname); 165 return (1); 166 } 167 state++; 168 169 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0, 170 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 171 aprint_error("%s: cannot alloc scratch dmamem\n", 172 icp->icp_dv.dv_xname); 173 goto bail_out; 174 } 175 state++; 176 177 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs, 178 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) { 179 aprint_error("%s: cannot map scratch dmamem\n", 180 icp->icp_dv.dv_xname); 181 goto bail_out; 182 } 183 state++; 184 185 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr, 186 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) { 187 aprint_error("%s: cannot load scratch dmamap\n", 188 icp->icp_dv.dv_xname); 189 goto bail_out; 190 } 191 state++; 192 193 /* 194 * Allocate and initialize the command control blocks. 195 */ 196 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO); 197 if ((icp->icp_ccbs = ic) == NULL) { 198 aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname); 199 goto bail_out; 200 } 201 state++; 202 203 for (i = 0; i < ICP_NCCBS; i++, ic++) { 204 /* 205 * The first two command indexes have special meanings, so 206 * we can't use them. 207 */ 208 ic->ic_ident = i + 2; 209 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER, 210 ICP_MAXSG, ICP_MAX_XFER, 0, 211 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 212 &ic->ic_xfer_map); 213 if (rv != 0) 214 break; 215 icp->icp_nccbs++; 216 icp_ccb_free(icp, ic); 217 } 218#ifdef DIAGNOSTIC 219 if (icp->icp_nccbs != ICP_NCCBS) 220 aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname, 221 icp->icp_nccbs, ICP_NCCBS); 222#endif 223 224 /* 225 * Initalize the controller. 226 */ 227 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) { 228 aprint_error("%s: screen service init error %d\n", 229 icp->icp_dv.dv_xname, icp->icp_status); 230 goto bail_out; 231 } 232 233 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 234 aprint_error("%s: cache service init error %d\n", 235 icp->icp_dv.dv_xname, icp->icp_status); 236 goto bail_out; 237 } 238 239 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0); 240 241 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) { 242 aprint_error("%s: cache service mount error %d\n", 243 icp->icp_dv.dv_xname, icp->icp_status); 244 goto bail_out; 245 } 246 247 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 248 aprint_error("%s: cache service post-mount init error %d\n", 249 icp->icp_dv.dv_xname, icp->icp_status); 250 goto bail_out; 251 } 252 cdev_cnt = (u_int16_t)icp->icp_info; 253 icp->icp_fw_vers = icp->icp_service; 254 255 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) { 256 aprint_error("%s: raw service init error %d\n", 257 icp->icp_dv.dv_xname, icp->icp_status); 258 goto bail_out; 259 } 260 261 /* 262 * Set/get raw service features (scatter/gather). 263 */ 264 feat = 0; 265 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER, 266 0, 0)) 267 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0)) 268 feat = icp->icp_info; 269 270 if ((feat & ICP_SCATTER_GATHER) == 0) { 271#ifdef DIAGNOSTIC 272 aprint_normal( 273 "%s: scatter/gather not supported (raw service)\n", 274 icp->icp_dv.dv_xname); 275#endif 276 } else 277 icp->icp_features |= ICP_FEAT_RAWSERVICE; 278 279 /* 280 * Set/get cache service features (scatter/gather). 281 */ 282 feat = 0; 283 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0, 284 ICP_SCATTER_GATHER, 0)) 285 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0)) 286 feat = icp->icp_info; 287 288 if ((feat & ICP_SCATTER_GATHER) == 0) { 289#ifdef DIAGNOSTIC 290 aprint_normal( 291 "%s: scatter/gather not supported (cache service)\n", 292 icp->icp_dv.dv_xname); 293#endif 294 } else 295 icp->icp_features |= ICP_FEAT_CACHESERVICE; 296 297 /* 298 * Pull some information from the board and dump. 299 */ 300 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO, 301 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) { 302 aprint_error("%s: unable to retrive board info\n", 303 icp->icp_dv.dv_xname); 304 goto bail_out; 305 } 306 memcpy(&binfo, icp->icp_scr, sizeof(binfo)); 307 308 aprint_normal( 309 "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n", 310 icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string, 311 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20); 312 313 /* 314 * Determine the number of devices, and number of openings per 315 * device. 316 */ 317 if (icp->icp_features & ICP_FEAT_CACHESERVICE) { 318 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) { 319 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0, 320 0)) 321 continue; 322 323 icp->icp_cdr[j].cd_size = icp->icp_info; 324 if (icp->icp_cdr[j].cd_size != 0) 325 icp->icp_ndevs++; 326 327 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0, 328 0)) 329 icp->icp_cdr[j].cd_type = icp->icp_info; 330 } 331 } 332 333 if (icp->icp_features & ICP_FEAT_RAWSERVICE) { 334 icp->icp_nchan = binfo.bi_chan_count; 335 icp->icp_ndevs += icp->icp_nchan; 336 } 337 338 icp_recompute_openings(icp); 339 340 /* 341 * Attach SCSI channels. 342 */ 343 if (icp->icp_features & ICP_FEAT_RAWSERVICE) { 344 struct icp_ioc_version *iv; 345 struct icp_rawioc *ri; 346 struct icp_getch *gc; 347 348 iv = (struct icp_ioc_version *)icp->icp_scr; 349 iv->iv_version = htole32(ICP_IOC_NEWEST); 350 iv->iv_listents = ICP_MAXBUS; 351 iv->iv_firstchan = 0; 352 iv->iv_lastchan = ICP_MAXBUS - 1; 353 iv->iv_listoffset = htole32(sizeof(*iv)); 354 355 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, 356 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL, 357 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) { 358 ri = (struct icp_rawioc *)(iv + 1); 359 for (j = 0; j < binfo.bi_chan_count; j++, ri++) 360 icp->icp_bus_id[j] = ri->ri_procid; 361 } else { 362 /* 363 * Fall back to the old method. 364 */ 365 gc = (struct icp_getch *)icp->icp_scr; 366 367 for (j = 0; j < binfo.bi_chan_count; j++) { 368 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, 369 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN, 370 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL, 371 sizeof(*gc))) { 372 aprint_error( 373 "%s: unable to get chan info", 374 icp->icp_dv.dv_xname); 375 goto bail_out; 376 } 377 icp->icp_bus_id[j] = gc->gc_scsiid; 378 } 379 } 380 381 for (j = 0; j < binfo.bi_chan_count; j++) { 382 if (icp->icp_bus_id[j] > ICP_MAXID_FC) 383 icp->icp_bus_id[j] = ICP_MAXID_FC; 384 385 icpa.icpa_unit = j + ICPA_UNIT_SCSI; 386 387 locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI; 388 389 icp->icp_children[icpa.icpa_unit] = 390 config_found_sm_loc(&icp->icp_dv, "icp", locs, 391 &icpa, icp_print, icp_submatch); 392 } 393 } 394 395 /* 396 * Attach cache devices. 397 */ 398 if (icp->icp_features & ICP_FEAT_CACHESERVICE) { 399 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) { 400 if (icp->icp_cdr[j].cd_size == 0) 401 continue; 402 403 icpa.icpa_unit = j; 404 405 locs[ICPCF_UNIT] = j; 406 407 icp->icp_children[icpa.icpa_unit] = 408 config_found_sm_loc(&icp->icp_dv, "icp", locs, 409 &icpa, icp_print, icp_submatch); 410 } 411 } 412 413 /* 414 * Start the watchdog. 415 */ 416 icp_watchdog(icp); 417 418 /* 419 * Count the controller, and we're done! 420 */ 421 icp_count++; 422 423 return (0); 424 425 bail_out: 426 if (state > 4) 427 for (j = 0; j < i; j++) 428 bus_dmamap_destroy(icp->icp_dmat, 429 icp->icp_ccbs[j].ic_xfer_map); 430 if (state > 3) 431 free(icp->icp_ccbs, M_DEVBUF); 432 if (state > 2) 433 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap); 434 if (state > 1) 435 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr, 436 ICP_SCRATCH_SIZE); 437 if (state > 0) 438 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs); 439 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap); 440 441 return (1); 442} 443 444void 445icp_register_servicecb(struct icp_softc *icp, int unit, 446 const struct icp_servicecb *cb) 447{ 448 449 icp->icp_servicecb[unit] = cb; 450} 451 452void 453icp_rescan(struct icp_softc *icp, int unit) 454{ 455 struct icp_attach_args icpa; 456 u_int newsize, newtype; 457 int locs[ICPCF_NLOCS]; 458 459 /* 460 * NOTE: It is very important that the queue be frozen and not 461 * commands running when this is called. The ioctl mutex must 462 * also be held. 463 */ 464 465 KASSERT(icp->icp_qfreeze != 0); 466 KASSERT(icp->icp_running == 0); 467 KASSERT(unit < ICP_MAX_HDRIVES); 468 469 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) { 470#ifdef ICP_DEBUG 471 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n", 472 icp->icp_dv.dv_xname, unit, icp->icp_status); 473#endif 474 goto gone; 475 } 476 if ((newsize = icp->icp_info) == 0) { 477#ifdef ICP_DEBUG 478 printf("%s: rescan: unit %d has zero size\n", 479 icp->icp_dv.dv_xname, unit); 480#endif 481 gone: 482 /* 483 * Host drive is no longer present; detach if a child 484 * is currently there. 485 */ 486 if (icp->icp_cdr[unit].cd_size != 0) 487 icp->icp_ndevs--; 488 icp->icp_cdr[unit].cd_size = 0; 489 if (icp->icp_children[unit] != NULL) { 490 (void) config_detach(icp->icp_children[unit], 491 DETACH_FORCE); 492 icp->icp_children[unit] = NULL; 493 } 494 return; 495 } 496 497 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0)) 498 newtype = icp->icp_info; 499 else { 500#ifdef ICP_DEBUG 501 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n", 502 icp->icp_dv.dv_xname, unit); 503#endif 504 newtype = 0; /* XXX? */ 505 } 506 507#ifdef ICP_DEBUG 508 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n", 509 icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size, 510 icp->icp_cdr[unit].cd_type, newsize, newtype); 511#endif 512 513 /* 514 * If the type or size changed, detach any old child (if it exists) 515 * and attach a new one. 516 */ 517 if (icp->icp_children[unit] == NULL || 518 newsize != icp->icp_cdr[unit].cd_size || 519 newtype != icp->icp_cdr[unit].cd_type) { 520 if (icp->icp_cdr[unit].cd_size == 0) 521 icp->icp_ndevs++; 522 icp->icp_cdr[unit].cd_size = newsize; 523 icp->icp_cdr[unit].cd_type = newtype; 524 if (icp->icp_children[unit] != NULL) 525 (void) config_detach(icp->icp_children[unit], 526 DETACH_FORCE); 527 528 icpa.icpa_unit = unit; 529 530 locs[ICPCF_UNIT] = unit; 531 532 icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv, 533 "icp", locs, &icpa, icp_print, icp_submatch); 534 } 535 536 icp_recompute_openings(icp); 537} 538 539void 540icp_rescan_all(struct icp_softc *icp) 541{ 542 int unit; 543 u_int16_t cdev_cnt; 544 545 /* 546 * This is the old method of rescanning the host drives. We 547 * start by reinitializing the cache service. 548 */ 549 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 550 printf("%s: unable to re-initialize cache service for rescan\n", 551 icp->icp_dv.dv_xname); 552 return; 553 } 554 cdev_cnt = (u_int16_t) icp->icp_info; 555 556 /* For each host drive, do the new-style rescan. */ 557 for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++) 558 icp_rescan(icp, unit); 559 560 /* Now detach anything in the slots after cdev_cnt. */ 561 for (; unit < ICP_MAX_HDRIVES; unit++) { 562 if (icp->icp_cdr[unit].cd_size != 0) { 563#ifdef ICP_DEBUG 564 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n", 565 icp->icp_dv.dv_xname, unit, cdev_cnt); 566#endif 567 icp->icp_ndevs--; 568 icp->icp_cdr[unit].cd_size = 0; 569 if (icp->icp_children[unit] != NULL) { 570 (void) config_detach(icp->icp_children[unit], 571 DETACH_FORCE); 572 icp->icp_children[unit] = NULL; 573 } 574 } 575 } 576 577 icp_recompute_openings(icp); 578} 579 580void 581icp_recompute_openings(struct icp_softc *icp) 582{ 583 int unit, openings; 584 585 if (icp->icp_ndevs != 0) 586 openings = 587 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs; 588 else 589 openings = 0; 590 if (openings == icp->icp_openings) 591 return; 592 icp->icp_openings = openings; 593 594#ifdef ICP_DEBUG 595 printf("%s: %d device%s, %d openings per device\n", 596 icp->icp_dv.dv_xname, icp->icp_ndevs, 597 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings); 598#endif 599 600 for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) { 601 if (icp->icp_children[unit] != NULL) 602 (*icp->icp_servicecb[unit]->iscb_openings)( 603 icp->icp_children[unit], icp->icp_openings); 604 } 605} 606 607void 608icp_watchdog(void *cookie) 609{ 610 struct icp_softc *icp; 611 int s; 612 613 icp = cookie; 614 615 s = splbio(); 616 icp_intr(icp); 617 if (ICP_HAS_WORK(icp)) 618 icp_ccb_enqueue(icp, NULL); 619 splx(s); 620 621 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ, 622 icp_watchdog, icp); 623} 624 625int 626icp_print(void *aux, const char *pnp) 627{ 628 struct icp_attach_args *icpa; 629 const char *str; 630 631 icpa = (struct icp_attach_args *)aux; 632 633 if (pnp != NULL) { 634 if (icpa->icpa_unit < ICPA_UNIT_SCSI) 635 str = "block device"; 636 else 637 str = "SCSI channel"; 638 aprint_normal("%s at %s", str, pnp); 639 } 640 aprint_normal(" unit %d", icpa->icpa_unit); 641 642 return (UNCONF); 643} 644 645int 646icp_submatch(struct device *parent, struct cfdata *cf, 647 const locdesc_t *locs, void *aux) 648{ 649 650 if (cf->cf_loc[ICPCF_UNIT] != ICPCF_UNIT_DEFAULT && 651 cf->cf_loc[ICPCF_UNIT] != locs[ICPCF_UNIT]) 652 return (0); 653 654 return (config_match(parent, cf, aux)); 655} 656 657int 658icp_async_event(struct icp_softc *icp, int service) 659{ 660 661 if (service == ICP_SCREENSERVICE) { 662 if (icp->icp_status == ICP_S_MSG_REQUEST) { 663 /* XXX */ 664 } 665 } else { 666 if ((icp->icp_fw_vers & 0xff) >= 0x1a) { 667 icp->icp_evt.size = 0; 668 icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit; 669 icp->icp_evt.eu.async.status = icp->icp_status; 670 /* 671 * Severity and event string are filled in by the 672 * hardware interface interrupt handler. 673 */ 674 printf("%s: %s\n", icp->icp_dv.dv_xname, 675 icp->icp_evt.event_string); 676 } else { 677 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async); 678 icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit; 679 icp->icp_evt.eu.async.service = service; 680 icp->icp_evt.eu.async.status = icp->icp_status; 681 icp->icp_evt.eu.async.info = icp->icp_info; 682 /* XXXJRT FIX THIS */ 683 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord = 684 icp->icp_info2; 685 } 686 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt); 687 } 688 689 return (0); 690} 691 692int 693icp_intr(void *cookie) 694{ 695 struct icp_softc *icp; 696 struct icp_intr_ctx ctx; 697 struct icp_ccb *ic; 698 699 icp = cookie; 700 701 ctx.istatus = (*icp->icp_get_status)(icp); 702 if (!ctx.istatus) { 703 icp->icp_status = ICP_S_NO_STATUS; 704 return (0); 705 } 706 707 (*icp->icp_intr)(icp, &ctx); 708 709 icp->icp_status = ctx.cmd_status; 710 icp->icp_service = ctx.service; 711 icp->icp_info = ctx.info; 712 icp->icp_info2 = ctx.info2; 713 714 switch (ctx.istatus) { 715 case ICP_ASYNCINDEX: 716 icp_async_event(icp, ctx.service); 717 return (1); 718 719 case ICP_SPEZINDEX: 720 printf("%s: uninitialized or unknown service (%d/%d)\n", 721 icp->icp_dv.dv_xname, ctx.info, ctx.info2); 722 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver); 723 icp->icp_evt.eu.driver.ionode = icp->icp_dv.dv_unit; 724 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt); 725 return (1); 726 } 727 728 if ((ctx.istatus - 2) > icp->icp_nccbs) 729 panic("icp_intr: bad command index returned"); 730 731 ic = &icp->icp_ccbs[ctx.istatus - 2]; 732 ic->ic_status = icp->icp_status; 733 734 if ((ic->ic_flags & IC_ALLOCED) == 0) { 735 /* XXX ICP's "iir" driver just sends an event here. */ 736 panic("icp_intr: inactive CCB identified"); 737 } 738 739 /* 740 * Try to protect ourselves from the running command count already 741 * being 0 (e.g. if a polled command times out). 742 */ 743 KDASSERT(icp->icp_running != 0); 744 if (--icp->icp_running == 0 && 745 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) { 746 icp->icp_flags &= ~ICP_F_WAIT_FREEZE; 747 wakeup(&icp->icp_qfreeze); 748 } 749 750 switch (icp->icp_status) { 751 case ICP_S_BSY: 752#ifdef ICP_DEBUG 753 printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname); 754#endif 755 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 756 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain); 757 else 758 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain); 759 break; 760 761 default: 762 ic->ic_flags |= IC_COMPLETE; 763 764 if ((ic->ic_flags & IC_WAITING) != 0) 765 wakeup(ic); 766 else if (ic->ic_intr != NULL) 767 (*ic->ic_intr)(ic); 768 769 if (ICP_HAS_WORK(icp)) 770 icp_ccb_enqueue(icp, NULL); 771 772 break; 773 } 774 775 return (1); 776} 777 778struct icp_ucmd_ctx { 779 gdt_ucmd_t *iu_ucmd; 780 u_int32_t iu_cnt; 781}; 782 783void 784icp_ucmd_intr(struct icp_ccb *ic) 785{ 786 struct icp_softc *icp = (void *) ic->ic_dv; 787 struct icp_ucmd_ctx *iu = ic->ic_context; 788 gdt_ucmd_t *ucmd = iu->iu_ucmd; 789 790 ucmd->status = icp->icp_status; 791 ucmd->info = icp->icp_info; 792 793 if (iu->iu_cnt != 0) { 794 bus_dmamap_sync(icp->icp_dmat, 795 icp->icp_scr_dmamap, 796 ICP_SCRATCH_UCMD, iu->iu_cnt, 797 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 798 memcpy(ucmd->data, 799 icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt); 800 } 801 802 icp->icp_ucmd_ccb = NULL; 803 804 ic->ic_flags |= IC_COMPLETE; 805 wakeup(ic); 806} 807 808/* 809 * NOTE: We assume that it is safe to sleep here! 810 */ 811int 812icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode, 813 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 814{ 815 struct icp_ioctlcmd *icmd; 816 struct icp_cachecmd *cc; 817 struct icp_rawcmd *rc; 818 int retries, rv; 819 struct icp_ccb *ic; 820 821 retries = ICP_RETRIES; 822 823 do { 824 ic = icp_ccb_alloc_wait(icp); 825 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd)); 826 ic->ic_cmd.cmd_opcode = htole16(opcode); 827 828 switch (service) { 829 case ICP_CACHESERVICE: 830 if (opcode == ICP_IOCTL) { 831 icmd = &ic->ic_cmd.cmd_packet.ic; 832 icmd->ic_subfunc = htole16(arg1); 833 icmd->ic_channel = htole32(arg2); 834 icmd->ic_bufsize = htole32(arg3); 835 icmd->ic_addr = 836 htole32(icp->icp_scr_seg[0].ds_addr); 837 838 bus_dmamap_sync(icp->icp_dmat, 839 icp->icp_scr_dmamap, 0, arg3, 840 BUS_DMASYNC_PREWRITE | 841 BUS_DMASYNC_PREREAD); 842 } else { 843 cc = &ic->ic_cmd.cmd_packet.cc; 844 cc->cc_deviceno = htole16(arg1); 845 cc->cc_blockno = htole32(arg2); 846 } 847 break; 848 849 case ICP_SCSIRAWSERVICE: 850 rc = &ic->ic_cmd.cmd_packet.rc; 851 rc->rc_direction = htole32(arg1); 852 rc->rc_bus = arg2; 853 rc->rc_target = arg3; 854 rc->rc_lun = arg3 >> 8; 855 break; 856 } 857 858 ic->ic_service = service; 859 ic->ic_cmdlen = sizeof(ic->ic_cmd); 860 rv = icp_ccb_poll(icp, ic, 10000); 861 862 switch (service) { 863 case ICP_CACHESERVICE: 864 if (opcode == ICP_IOCTL) { 865 bus_dmamap_sync(icp->icp_dmat, 866 icp->icp_scr_dmamap, 0, arg3, 867 BUS_DMASYNC_POSTWRITE | 868 BUS_DMASYNC_POSTREAD); 869 } 870 break; 871 } 872 873 icp_ccb_free(icp, ic); 874 } while (rv != 0 && --retries > 0); 875 876 return (icp->icp_status == ICP_S_OK); 877} 878 879int 880icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd) 881{ 882 struct icp_ccb *ic; 883 struct icp_ucmd_ctx iu; 884 u_int32_t cnt; 885 int error; 886 887 if (ucmd->service == ICP_CACHESERVICE) { 888 if (ucmd->command.cmd_opcode == ICP_IOCTL) { 889 cnt = ucmd->command.cmd_packet.ic.ic_bufsize; 890 if (cnt > GDT_SCRATCH_SZ) { 891 printf("%s: scratch buffer too small (%d/%d)\n", 892 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 893 return (EINVAL); 894 } 895 } else { 896 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt * 897 ICP_SECTOR_SIZE; 898 if (cnt > GDT_SCRATCH_SZ) { 899 printf("%s: scratch buffer too small (%d/%d)\n", 900 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 901 return (EINVAL); 902 } 903 } 904 } else { 905 cnt = ucmd->command.cmd_packet.rc.rc_sdlen + 906 ucmd->command.cmd_packet.rc.rc_sense_len; 907 if (cnt > GDT_SCRATCH_SZ) { 908 printf("%s: scratch buffer too small (%d/%d)\n", 909 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 910 return (EINVAL); 911 } 912 } 913 914 iu.iu_ucmd = ucmd; 915 iu.iu_cnt = cnt; 916 917 ic = icp_ccb_alloc_wait(icp); 918 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd)); 919 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode); 920 921 if (ucmd->service == ICP_CACHESERVICE) { 922 if (ucmd->command.cmd_opcode == ICP_IOCTL) { 923 struct icp_ioctlcmd *icmd, *uicmd; 924 925 icmd = &ic->ic_cmd.cmd_packet.ic; 926 uicmd = &ucmd->command.cmd_packet.ic; 927 928 icmd->ic_subfunc = htole16(uicmd->ic_subfunc); 929 icmd->ic_channel = htole32(uicmd->ic_channel); 930 icmd->ic_bufsize = htole32(uicmd->ic_bufsize); 931 icmd->ic_addr = 932 htole32(icp->icp_scr_seg[0].ds_addr + 933 ICP_SCRATCH_UCMD); 934 } else { 935 struct icp_cachecmd *cc, *ucc; 936 937 cc = &ic->ic_cmd.cmd_packet.cc; 938 ucc = &ucmd->command.cmd_packet.cc; 939 940 cc->cc_deviceno = htole16(ucc->cc_deviceno); 941 cc->cc_blockno = htole32(ucc->cc_blockno); 942 cc->cc_blockcnt = htole32(ucc->cc_blockcnt); 943 cc->cc_addr = htole32(0xffffffffU); 944 cc->cc_nsgent = htole32(1); 945 cc->cc_sg[0].sg_addr = 946 htole32(icp->icp_scr_seg[0].ds_addr + 947 ICP_SCRATCH_UCMD); 948 cc->cc_sg[0].sg_len = htole32(cnt); 949 } 950 } else { 951 struct icp_rawcmd *rc, *urc; 952 953 rc = &ic->ic_cmd.cmd_packet.rc; 954 urc = &ucmd->command.cmd_packet.rc; 955 956 rc->rc_direction = htole32(urc->rc_direction); 957 rc->rc_sdata = htole32(0xffffffffU); 958 rc->rc_sdlen = htole32(urc->rc_sdlen); 959 rc->rc_clen = htole32(urc->rc_clen); 960 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb)); 961 rc->rc_target = urc->rc_target; 962 rc->rc_lun = urc->rc_lun; 963 rc->rc_bus = urc->rc_bus; 964 rc->rc_sense_len = htole32(urc->rc_sense_len); 965 rc->rc_sense_addr = 966 htole32(icp->icp_scr_seg[0].ds_addr + 967 ICP_SCRATCH_UCMD + urc->rc_sdlen); 968 rc->rc_nsgent = htole32(1); 969 rc->rc_sg[0].sg_addr = 970 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD); 971 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len); 972 } 973 974 ic->ic_service = ucmd->service; 975 ic->ic_cmdlen = sizeof(ic->ic_cmd); 976 ic->ic_context = &iu; 977 978 /* 979 * XXX What units are ucmd->timeout in? Until we know, we 980 * XXX just pull a number out of thin air. 981 */ 982 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0)) 983 printf("%s: error %d waiting for ucmd to complete\n", 984 icp->icp_dv.dv_xname, error); 985 986 /* icp_ucmd_intr() has updated ucmd. */ 987 icp_ccb_free(icp, ic); 988 989 return (error); 990} 991 992struct icp_ccb * 993icp_ccb_alloc(struct icp_softc *icp) 994{ 995 struct icp_ccb *ic; 996 int s; 997 998 s = splbio(); 999 if (__predict_false((ic = 1000 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) { 1001 splx(s); 1002 return (NULL); 1003 } 1004 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain); 1005 splx(s); 1006 1007 ic->ic_flags = IC_ALLOCED; 1008 return (ic); 1009} 1010 1011struct icp_ccb * 1012icp_ccb_alloc_wait(struct icp_softc *icp) 1013{ 1014 struct icp_ccb *ic; 1015 int s; 1016 1017 s = splbio(); 1018 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) { 1019 icp->icp_flags |= ICP_F_WAIT_CCB; 1020 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0); 1021 } 1022 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain); 1023 splx(s); 1024 1025 ic->ic_flags = IC_ALLOCED; 1026 return (ic); 1027} 1028 1029void 1030icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic) 1031{ 1032 int s; 1033 1034 s = splbio(); 1035 ic->ic_flags = 0; 1036 ic->ic_intr = NULL; 1037 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain); 1038 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) { 1039 icp->icp_flags &= ~ICP_F_WAIT_CCB; 1040 wakeup(&icp->icp_ccb_freelist); 1041 } 1042 splx(s); 1043} 1044 1045void 1046icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic) 1047{ 1048 int s; 1049 1050 s = splbio(); 1051 1052 if (ic != NULL) { 1053 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 1054 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain); 1055 else 1056 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain); 1057 } 1058 1059 for (; icp->icp_qfreeze == 0;) { 1060 if (__predict_false((ic = 1061 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) { 1062 struct icp_ucmd_ctx *iu = ic->ic_context; 1063 gdt_ucmd_t *ucmd = iu->iu_ucmd; 1064 1065 /* 1066 * All user-generated commands share the same 1067 * scratch space, so if one is already running, 1068 * we have to stall the command queue. 1069 */ 1070 if (icp->icp_ucmd_ccb != NULL) 1071 break; 1072 if ((*icp->icp_test_busy)(icp)) 1073 break; 1074 icp->icp_ucmd_ccb = ic; 1075 1076 if (iu->iu_cnt != 0) { 1077 memcpy(icp->icp_scr + ICP_SCRATCH_UCMD, 1078 ucmd->data, iu->iu_cnt); 1079 bus_dmamap_sync(icp->icp_dmat, 1080 icp->icp_scr_dmamap, 1081 ICP_SCRATCH_UCMD, iu->iu_cnt, 1082 BUS_DMASYNC_PREREAD | 1083 BUS_DMASYNC_PREWRITE); 1084 } 1085 } else if (__predict_true((ic = 1086 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) { 1087 if ((*icp->icp_test_busy)(icp)) 1088 break; 1089 } else { 1090 /* no command found */ 1091 break; 1092 } 1093 icp_ccb_submit(icp, ic); 1094 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 1095 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain); 1096 else 1097 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain); 1098 } 1099 1100 splx(s); 1101} 1102 1103int 1104icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size, 1105 int dir) 1106{ 1107 struct icp_sg *sg; 1108 int nsegs, i, rv; 1109 bus_dmamap_t xfer; 1110 1111 xfer = ic->ic_xfer_map; 1112 1113 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL, 1114 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1115 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1116 if (rv != 0) 1117 return (rv); 1118 1119 nsegs = xfer->dm_nsegs; 1120 ic->ic_xfer_size = size; 1121 ic->ic_nsgent = nsegs; 1122 ic->ic_flags |= dir; 1123 sg = ic->ic_sg; 1124 1125 if (sg != NULL) { 1126 for (i = 0; i < nsegs; i++, sg++) { 1127 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr); 1128 sg->sg_len = htole32(xfer->dm_segs[i].ds_len); 1129 } 1130 } else if (nsegs > 1) 1131 panic("icp_ccb_map: no SG list specified, but nsegs > 1"); 1132 1133 if ((dir & IC_XFER_OUT) != 0) 1134 i = BUS_DMASYNC_PREWRITE; 1135 else /* if ((dir & IC_XFER_IN) != 0) */ 1136 i = BUS_DMASYNC_PREREAD; 1137 1138 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i); 1139 return (0); 1140} 1141 1142void 1143icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic) 1144{ 1145 int i; 1146 1147 if ((ic->ic_flags & IC_XFER_OUT) != 0) 1148 i = BUS_DMASYNC_POSTWRITE; 1149 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */ 1150 i = BUS_DMASYNC_POSTREAD; 1151 1152 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i); 1153 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map); 1154} 1155 1156int 1157icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1158{ 1159 int s, rv; 1160 1161 s = splbio(); 1162 1163 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) { 1164 if (!(*icp->icp_test_busy)(icp)) 1165 break; 1166 DELAY(10); 1167 } 1168 if (timo == 0) { 1169 printf("%s: submit: busy\n", icp->icp_dv.dv_xname); 1170 return (EAGAIN); 1171 } 1172 1173 icp_ccb_submit(icp, ic); 1174 1175 if (cold) { 1176 for (timo *= 10; timo != 0; timo--) { 1177 DELAY(100); 1178 icp_intr(icp); 1179 if ((ic->ic_flags & IC_COMPLETE) != 0) 1180 break; 1181 } 1182 } else { 1183 ic->ic_flags |= IC_WAITING; 1184 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1185 if ((rv = tsleep(ic, PRIBIO, "icpwccb", 1186 mstohz(timo))) != 0) { 1187 timo = 0; 1188 break; 1189 } 1190 } 1191 } 1192 1193 if (timo != 0) { 1194 if (ic->ic_status != ICP_S_OK) { 1195#ifdef ICP_DEBUG 1196 printf("%s: request failed; status=0x%04x\n", 1197 icp->icp_dv.dv_xname, ic->ic_status); 1198#endif 1199 rv = EIO; 1200 } else 1201 rv = 0; 1202 } else { 1203 printf("%s: command timed out\n", icp->icp_dv.dv_xname); 1204 rv = EIO; 1205 } 1206 1207 while ((*icp->icp_test_busy)(icp) != 0) 1208 DELAY(10); 1209 1210 splx(s); 1211 1212 return (rv); 1213} 1214 1215int 1216icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1217{ 1218 int s, rv; 1219 1220 ic->ic_flags |= IC_WAITING; 1221 1222 s = splbio(); 1223 icp_ccb_enqueue(icp, ic); 1224 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1225 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) { 1226 splx(s); 1227 return (rv); 1228 } 1229 } 1230 splx(s); 1231 1232 if (ic->ic_status != ICP_S_OK) { 1233 printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname, 1234 ic->ic_status); 1235 return (EIO); 1236 } 1237 1238 return (0); 1239} 1240 1241int 1242icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1243{ 1244 int s, rv; 1245 1246 ic->ic_dv = &icp->icp_dv; 1247 ic->ic_intr = icp_ucmd_intr; 1248 ic->ic_flags |= IC_UCMD; 1249 1250 s = splbio(); 1251 icp_ccb_enqueue(icp, ic); 1252 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1253 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) { 1254 splx(s); 1255 return (rv); 1256 } 1257 } 1258 splx(s); 1259 1260 return (0); 1261} 1262 1263void 1264icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic) 1265{ 1266 1267 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3; 1268 1269 (*icp->icp_set_sema0)(icp); 1270 DELAY(10); 1271 1272 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD); 1273 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident); 1274 1275 icp->icp_running++; 1276 1277 (*icp->icp_copy_cmd)(icp, ic); 1278 (*icp->icp_release_event)(icp, ic); 1279} 1280 1281int 1282icp_freeze(struct icp_softc *icp) 1283{ 1284 int s, error = 0; 1285 1286 s = splbio(); 1287 if (icp->icp_qfreeze++ == 0) { 1288 while (icp->icp_running != 0) { 1289 icp->icp_flags |= ICP_F_WAIT_FREEZE; 1290 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH, 1291 "icpqfrz", 0); 1292 if (error != 0 && --icp->icp_qfreeze == 0 && 1293 ICP_HAS_WORK(icp)) { 1294 icp_ccb_enqueue(icp, NULL); 1295 break; 1296 } 1297 } 1298 } 1299 splx(s); 1300 1301 return (error); 1302} 1303 1304void 1305icp_unfreeze(struct icp_softc *icp) 1306{ 1307 int s; 1308 1309 s = splbio(); 1310 KDASSERT(icp->icp_qfreeze != 0); 1311 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp)) 1312 icp_ccb_enqueue(icp, NULL); 1313 splx(s); 1314} 1315 1316/* XXX Global - should be per-controller? XXX */ 1317static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS]; 1318static int icp_event_oldidx; 1319static int icp_event_lastidx; 1320 1321gdt_evt_str * 1322icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx, 1323 gdt_evt_data *evt) 1324{ 1325 gdt_evt_str *e; 1326 1327 /* no source == no event */ 1328 if (source == 0) 1329 return (NULL); 1330 1331 e = &icp_event_buffer[icp_event_lastidx]; 1332 if (e->event_source == source && e->event_idx == idx && 1333 ((evt->size != 0 && e->event_data.size != 0 && 1334 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) || 1335 (evt->size == 0 && e->event_data.size == 0 && 1336 strcmp((char *) e->event_data.event_string, 1337 (char *) evt->event_string) == 0))) { 1338 e->last_stamp = time.tv_sec; 1339 e->same_count++; 1340 } else { 1341 if (icp_event_buffer[icp_event_lastidx].event_source != 0) { 1342 icp_event_lastidx++; 1343 if (icp_event_lastidx == ICP_MAX_EVENTS) 1344 icp_event_lastidx = 0; 1345 if (icp_event_lastidx == icp_event_oldidx) { 1346 icp_event_oldidx++; 1347 if (icp_event_oldidx == ICP_MAX_EVENTS) 1348 icp_event_oldidx = 0; 1349 } 1350 } 1351 e = &icp_event_buffer[icp_event_lastidx]; 1352 e->event_source = source; 1353 e->event_idx = idx; 1354 e->first_stamp = e->last_stamp = time.tv_sec; 1355 e->same_count = 1; 1356 e->event_data = *evt; 1357 e->application = 0; 1358 } 1359 return (e); 1360} 1361 1362int 1363icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr) 1364{ 1365 gdt_evt_str *e; 1366 int eindex, s; 1367 1368 s = splbio(); 1369 1370 if (handle == -1) 1371 eindex = icp_event_oldidx; 1372 else 1373 eindex = handle; 1374 1375 estr->event_source = 0; 1376 1377 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) { 1378 splx(s); 1379 return (eindex); 1380 } 1381 1382 e = &icp_event_buffer[eindex]; 1383 if (e->event_source != 0) { 1384 if (eindex != icp_event_lastidx) { 1385 eindex++; 1386 if (eindex == ICP_MAX_EVENTS) 1387 eindex = 0; 1388 } else 1389 eindex = -1; 1390 memcpy(estr, e, sizeof(gdt_evt_str)); 1391 } 1392 1393 splx(s); 1394 1395 return (eindex); 1396} 1397 1398void 1399icp_readapp_event(struct icp_softc *icp, u_int8_t application, 1400 gdt_evt_str *estr) 1401{ 1402 gdt_evt_str *e; 1403 int found = 0, eindex, s; 1404 1405 s = splbio(); 1406 1407 eindex = icp_event_oldidx; 1408 for (;;) { 1409 e = &icp_event_buffer[eindex]; 1410 if (e->event_source == 0) 1411 break; 1412 if ((e->application & application) == 0) { 1413 e->application |= application; 1414 found = 1; 1415 break; 1416 } 1417 if (eindex == icp_event_lastidx) 1418 break; 1419 eindex++; 1420 if (eindex == ICP_MAX_EVENTS) 1421 eindex = 0; 1422 } 1423 if (found) 1424 memcpy(estr, e, sizeof(gdt_evt_str)); 1425 else 1426 estr->event_source = 0; 1427 1428 splx(s); 1429} 1430 1431void 1432icp_clear_events(struct icp_softc *icp) 1433{ 1434 int s; 1435 1436 s = splbio(); 1437 icp_event_oldidx = icp_event_lastidx = 0; 1438 memset(icp_event_buffer, 0, sizeof(icp_event_buffer)); 1439 splx(s); 1440} 1441