hifn7751.c revision 117126
1/* $FreeBSD: head/sys/dev/hifn/hifn7751.c 117126 2003-07-01 15:52:06Z scottl $ */ 2/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 3 4/* 5 * Invertex AEON / Hifn 7751 driver 6 * Copyright (c) 1999 Invertex Inc. All rights reserved. 7 * Copyright (c) 1999 Theo de Raadt 8 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 9 * http://www.netsec.net 10 * 11 * This driver is based on a previous driver by Invertex, for which they 12 * requested: Please send any comments, feedback, bug-fixes, or feature 13 * requests to software@invertex.com. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. The name of the author may not be used to endorse or promote products 25 * derived from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 * 38 * Effort sponsored in part by the Defense Advanced Research Projects 39 * Agency (DARPA) and Air Force Research Laboratory, Air Force 40 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 41 * 42 */ 43 44/* 45 * Driver for the Hifn 7751 encryption processor. 46 */ 47#include "opt_hifn.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/proc.h> 52#include <sys/errno.h> 53#include <sys/malloc.h> 54#include <sys/kernel.h> 55#include <sys/mbuf.h> 56#include <sys/lock.h> 57#include <sys/mutex.h> 58#include <sys/sysctl.h> 59 60#include <vm/vm.h> 61#include <vm/pmap.h> 62 63#include <machine/clock.h> 64#include <machine/bus.h> 65#include <machine/resource.h> 66#include <sys/bus.h> 67#include <sys/rman.h> 68 69#include <opencrypto/cryptodev.h> 70#include <sys/random.h> 71 72#include <pci/pcivar.h> 73#include <pci/pcireg.h> 74 75#ifdef HIFN_RNDTEST 76#include <dev/rndtest/rndtest.h> 77#endif 78#include <dev/hifn/hifn7751reg.h> 79#include <dev/hifn/hifn7751var.h> 80 81/* 82 * Prototypes and count for the pci_device structure 83 */ 84static int hifn_probe(device_t); 85static int hifn_attach(device_t); 86static int hifn_detach(device_t); 87static int hifn_suspend(device_t); 88static int hifn_resume(device_t); 89static void hifn_shutdown(device_t); 90 91static device_method_t hifn_methods[] = { 92 /* Device interface */ 93 DEVMETHOD(device_probe, hifn_probe), 94 DEVMETHOD(device_attach, hifn_attach), 95 DEVMETHOD(device_detach, hifn_detach), 96 DEVMETHOD(device_suspend, hifn_suspend), 97 DEVMETHOD(device_resume, hifn_resume), 98 DEVMETHOD(device_shutdown, hifn_shutdown), 99 100 /* bus interface */ 101 DEVMETHOD(bus_print_child, bus_generic_print_child), 102 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 103 104 { 0, 0 } 105}; 106static driver_t hifn_driver = { 107 "hifn", 108 hifn_methods, 109 sizeof (struct hifn_softc) 110}; 111static devclass_t hifn_devclass; 112 113DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); 114MODULE_DEPEND(hifn, crypto, 1, 1, 1); 115#ifdef HIFN_RNDTEST 116MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 117#endif 118 119static void hifn_reset_board(struct hifn_softc *, int); 120static void hifn_reset_puc(struct hifn_softc *); 121static void hifn_puc_wait(struct hifn_softc *); 122static int hifn_enable_crypto(struct hifn_softc *); 123static void hifn_set_retry(struct hifn_softc *sc); 124static void hifn_init_dma(struct hifn_softc *); 125static void hifn_init_pci_registers(struct hifn_softc *); 126static int hifn_sramsize(struct hifn_softc *); 127static int hifn_dramsize(struct hifn_softc *); 128static int hifn_ramtype(struct hifn_softc *); 129static void hifn_sessions(struct hifn_softc *); 130static void hifn_intr(void *); 131static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 132static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 133static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); 134static int hifn_freesession(void *, u_int64_t); 135static int hifn_process(void *, struct cryptop *, int); 136static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 137static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 138static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 139static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 140static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 141static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 142static int hifn_init_pubrng(struct hifn_softc *); 143static void hifn_rng(void *); 144static void hifn_tick(void *); 145static void hifn_abort(struct hifn_softc *); 146static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 147 148static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 149static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 150 151static __inline__ u_int32_t 152READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 153{ 154 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 155 sc->sc_bar0_lastreg = (bus_size_t) -1; 156 return (v); 157} 158#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 159 160static __inline__ u_int32_t 161READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 162{ 163 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 164 sc->sc_bar1_lastreg = (bus_size_t) -1; 165 return (v); 166} 167#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 168 169SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 170 171#ifdef HIFN_DEBUG 172static int hifn_debug = 0; 173SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 174 0, "control debugging msgs"); 175#endif 176 177static struct hifn_stats hifnstats; 178SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 179 hifn_stats, "driver statistics"); 180static int hifn_maxbatch = 1; 181SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 182 0, "max ops to batch w/o interrupt"); 183 184/* 185 * Probe for a supported device. The PCI vendor and device 186 * IDs are used to detect devices we know how to handle. 187 */ 188static int 189hifn_probe(device_t dev) 190{ 191 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 192 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 193 return (0); 194 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 195 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 196 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 197 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 198 return (0); 199 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 200 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 201 return (0); 202 return (ENXIO); 203} 204 205static void 206hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 207{ 208 bus_addr_t *paddr = (bus_addr_t*) arg; 209 *paddr = segs->ds_addr; 210} 211 212static const char* 213hifn_partname(struct hifn_softc *sc) 214{ 215 /* XXX sprintf numbers when not decoded */ 216 switch (pci_get_vendor(sc->sc_dev)) { 217 case PCI_VENDOR_HIFN: 218 switch (pci_get_device(sc->sc_dev)) { 219 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 220 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 221 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 222 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 223 } 224 return "Hifn unknown-part"; 225 case PCI_VENDOR_INVERTEX: 226 switch (pci_get_device(sc->sc_dev)) { 227 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 228 } 229 return "Invertex unknown-part"; 230 case PCI_VENDOR_NETSEC: 231 switch (pci_get_device(sc->sc_dev)) { 232 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 233 } 234 return "NetSec unknown-part"; 235 } 236 return "Unknown-vendor unknown-part"; 237} 238 239static void 240default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 241{ 242 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); 243} 244 245/* 246 * Attach an interface that successfully probed. 247 */ 248static int 249hifn_attach(device_t dev) 250{ 251 struct hifn_softc *sc = device_get_softc(dev); 252 u_int32_t cmd; 253 caddr_t kva; 254 int rseg, rid; 255 char rbase; 256 u_int16_t ena, rev; 257 258 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 259 bzero(sc, sizeof (*sc)); 260 sc->sc_dev = dev; 261 262 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); 263 264 /* XXX handle power management */ 265 266 /* 267 * The 7951 has a random number generator and 268 * public key support; note this. 269 */ 270 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 271 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951) 272 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 273 /* 274 * The 7811 has a random number generator and 275 * we also note it's identity 'cuz of some quirks. 276 */ 277 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 278 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 279 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 280 281 /* 282 * Configure support for memory-mapped access to 283 * registers and for DMA operations. 284 */ 285#define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 286 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 287 cmd |= PCIM_ENA; 288 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 289 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 290 if ((cmd & PCIM_ENA) != PCIM_ENA) { 291 device_printf(dev, "failed to enable %s\n", 292 (cmd & PCIM_ENA) == 0 ? 293 "memory mapping & bus mastering" : 294 (cmd & PCIM_CMD_MEMEN) == 0 ? 295 "memory mapping" : "bus mastering"); 296 goto fail_pci; 297 } 298#undef PCIM_ENA 299 300 /* 301 * Setup PCI resources. Note that we record the bus 302 * tag and handle for each register mapping, this is 303 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 304 * and WRITE_REG_1 macros throughout the driver. 305 */ 306 rid = HIFN_BAR0; 307 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 308 0, ~0, 1, RF_ACTIVE); 309 if (sc->sc_bar0res == NULL) { 310 device_printf(dev, "cannot map bar%d register space\n", 0); 311 goto fail_pci; 312 } 313 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 314 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 315 sc->sc_bar0_lastreg = (bus_size_t) -1; 316 317 rid = HIFN_BAR1; 318 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 319 0, ~0, 1, RF_ACTIVE); 320 if (sc->sc_bar1res == NULL) { 321 device_printf(dev, "cannot map bar%d register space\n", 1); 322 goto fail_io0; 323 } 324 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 325 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 326 sc->sc_bar1_lastreg = (bus_size_t) -1; 327 328 hifn_set_retry(sc); 329 330 /* 331 * Setup the area where the Hifn DMA's descriptors 332 * and associated data structures. 333 */ 334 if (bus_dma_tag_create(NULL, /* parent */ 335 1, 0, /* alignment,boundary */ 336 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 337 BUS_SPACE_MAXADDR, /* highaddr */ 338 NULL, NULL, /* filter, filterarg */ 339 HIFN_MAX_DMALEN, /* maxsize */ 340 MAX_SCATTER, /* nsegments */ 341 HIFN_MAX_SEGLEN, /* maxsegsize */ 342 BUS_DMA_ALLOCNOW, /* flags */ 343 NULL, /* lockfunc */ 344 NULL, /* lockarg */ 345 &sc->sc_dmat)) { 346 device_printf(dev, "cannot allocate DMA tag\n"); 347 goto fail_io1; 348 } 349 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 350 device_printf(dev, "cannot create dma map\n"); 351 bus_dma_tag_destroy(sc->sc_dmat); 352 goto fail_io1; 353 } 354 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 355 device_printf(dev, "cannot alloc dma buffer\n"); 356 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 357 bus_dma_tag_destroy(sc->sc_dmat); 358 goto fail_io1; 359 } 360 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 361 sizeof (*sc->sc_dma), 362 hifn_dmamap_cb, &sc->sc_dma_physaddr, 363 BUS_DMA_NOWAIT)) { 364 device_printf(dev, "cannot load dma map\n"); 365 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 366 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 367 bus_dma_tag_destroy(sc->sc_dmat); 368 goto fail_io1; 369 } 370 sc->sc_dma = (struct hifn_dma *)kva; 371 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 372 373 KASSERT(sc->sc_st0 != NULL, ("hifn_attach: null bar0 tag!")); 374 KASSERT(sc->sc_sh0 != NULL, ("hifn_attach: null bar0 handle!")); 375 KASSERT(sc->sc_st1 != NULL, ("hifn_attach: null bar1 tag!")); 376 KASSERT(sc->sc_sh1 != NULL, ("hifn_attach: null bar1 handle!")); 377 378 /* 379 * Reset the board and do the ``secret handshake'' 380 * to enable the crypto support. Then complete the 381 * initialization procedure by setting up the interrupt 382 * and hooking in to the system crypto support so we'll 383 * get used for system services like the crypto device, 384 * IPsec, RNG device, etc. 385 */ 386 hifn_reset_board(sc, 0); 387 388 if (hifn_enable_crypto(sc) != 0) { 389 device_printf(dev, "crypto enabling failed\n"); 390 goto fail_mem; 391 } 392 hifn_reset_puc(sc); 393 394 hifn_init_dma(sc); 395 hifn_init_pci_registers(sc); 396 397 if (hifn_ramtype(sc)) 398 goto fail_mem; 399 400 if (sc->sc_drammodel == 0) 401 hifn_sramsize(sc); 402 else 403 hifn_dramsize(sc); 404 405 /* 406 * Workaround for NetSec 7751 rev A: half ram size because two 407 * of the address lines were left floating 408 */ 409 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 410 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 411 pci_get_revid(dev) == 0x61) /*XXX???*/ 412 sc->sc_ramsize >>= 1; 413 414 /* 415 * Arrange the interrupt line. 416 */ 417 rid = 0; 418 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 419 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 420 if (sc->sc_irq == NULL) { 421 device_printf(dev, "could not map interrupt\n"); 422 goto fail_mem; 423 } 424 /* 425 * NB: Network code assumes we are blocked with splimp() 426 * so make sure the IRQ is marked appropriately. 427 */ 428 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 429 hifn_intr, sc, &sc->sc_intrhand)) { 430 device_printf(dev, "could not setup interrupt\n"); 431 goto fail_intr2; 432 } 433 434 hifn_sessions(sc); 435 436 /* 437 * NB: Keep only the low 16 bits; this masks the chip id 438 * from the 7951. 439 */ 440 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 441 442 rseg = sc->sc_ramsize / 1024; 443 rbase = 'K'; 444 if (sc->sc_ramsize >= (1024 * 1024)) { 445 rbase = 'M'; 446 rseg /= 1024; 447 } 448 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", 449 hifn_partname(sc), rev, 450 rseg, rbase, sc->sc_drammodel ? 'd' : 's', 451 sc->sc_maxses); 452 453 sc->sc_cid = crypto_get_driverid(0); 454 if (sc->sc_cid < 0) { 455 device_printf(dev, "could not get crypto driver id\n"); 456 goto fail_intr; 457 } 458 459 WRITE_REG_0(sc, HIFN_0_PUCNFG, 460 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 461 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 462 463 switch (ena) { 464 case HIFN_PUSTAT_ENA_2: 465 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 466 hifn_newsession, hifn_freesession, hifn_process, sc); 467 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 468 hifn_newsession, hifn_freesession, hifn_process, sc); 469 /*FALLTHROUGH*/ 470 case HIFN_PUSTAT_ENA_1: 471 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 472 hifn_newsession, hifn_freesession, hifn_process, sc); 473 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 474 hifn_newsession, hifn_freesession, hifn_process, sc); 475 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 476 hifn_newsession, hifn_freesession, hifn_process, sc); 477 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 478 hifn_newsession, hifn_freesession, hifn_process, sc); 479 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 480 hifn_newsession, hifn_freesession, hifn_process, sc); 481 break; 482 } 483 484 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 485 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 486 487 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 488 hifn_init_pubrng(sc); 489 490 /* NB: 1 means the callout runs w/o Giant locked */ 491 callout_init(&sc->sc_tickto, 1); 492 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 493 494 return (0); 495 496fail_intr: 497 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 498fail_intr2: 499 /* XXX don't store rid */ 500 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 501fail_mem: 502 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 503 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 504 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 505 bus_dma_tag_destroy(sc->sc_dmat); 506 507 /* Turn off DMA polling */ 508 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 509 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 510fail_io1: 511 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 512fail_io0: 513 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 514fail_pci: 515 mtx_destroy(&sc->sc_mtx); 516 return (ENXIO); 517} 518 519/* 520 * Detach an interface that successfully probed. 521 */ 522static int 523hifn_detach(device_t dev) 524{ 525 struct hifn_softc *sc = device_get_softc(dev); 526 527 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 528 529 /* disable interrupts */ 530 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 531 532 /*XXX other resources */ 533 callout_stop(&sc->sc_tickto); 534 callout_stop(&sc->sc_rngto); 535#ifdef HIFN_RNDTEST 536 if (sc->sc_rndtest) 537 rndtest_detach(sc->sc_rndtest); 538#endif 539 540 /* Turn off DMA polling */ 541 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 542 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 543 544 crypto_unregister_all(sc->sc_cid); 545 546 bus_generic_detach(dev); /*XXX should be no children, right? */ 547 548 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 549 /* XXX don't store rid */ 550 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 551 552 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 553 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 554 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 555 bus_dma_tag_destroy(sc->sc_dmat); 556 557 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 558 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 559 560 mtx_destroy(&sc->sc_mtx); 561 562 return (0); 563} 564 565/* 566 * Stop all chip I/O so that the kernel's probe routines don't 567 * get confused by errant DMAs when rebooting. 568 */ 569static void 570hifn_shutdown(device_t dev) 571{ 572#ifdef notyet 573 hifn_stop(device_get_softc(dev)); 574#endif 575} 576 577/* 578 * Device suspend routine. Stop the interface and save some PCI 579 * settings in case the BIOS doesn't restore them properly on 580 * resume. 581 */ 582static int 583hifn_suspend(device_t dev) 584{ 585 struct hifn_softc *sc = device_get_softc(dev); 586#ifdef notyet 587 int i; 588 589 hifn_stop(sc); 590 for (i = 0; i < 5; i++) 591 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 592 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 593 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 594 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 595 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 596#endif 597 sc->sc_suspended = 1; 598 599 return (0); 600} 601 602/* 603 * Device resume routine. Restore some PCI settings in case the BIOS 604 * doesn't, re-enable busmastering, and restart the interface if 605 * appropriate. 606 */ 607static int 608hifn_resume(device_t dev) 609{ 610 struct hifn_softc *sc = device_get_softc(dev); 611#ifdef notyet 612 int i; 613 614 /* better way to do this? */ 615 for (i = 0; i < 5; i++) 616 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 617 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 618 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 619 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 620 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 621 622 /* reenable busmastering */ 623 pci_enable_busmaster(dev); 624 pci_enable_io(dev, HIFN_RES); 625 626 /* reinitialize interface if necessary */ 627 if (ifp->if_flags & IFF_UP) 628 rl_init(sc); 629#endif 630 sc->sc_suspended = 0; 631 632 return (0); 633} 634 635static int 636hifn_init_pubrng(struct hifn_softc *sc) 637{ 638 u_int32_t r; 639 int i; 640 641#ifdef HIFN_RNDTEST 642 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 643 if (sc->sc_rndtest) 644 sc->sc_harvest = rndtest_harvest; 645 else 646 sc->sc_harvest = default_harvest; 647#else 648 sc->sc_harvest = default_harvest; 649#endif 650 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 651 /* Reset 7951 public key/rng engine */ 652 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 653 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 654 655 for (i = 0; i < 100; i++) { 656 DELAY(1000); 657 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 658 HIFN_PUBRST_RESET) == 0) 659 break; 660 } 661 662 if (i == 100) { 663 device_printf(sc->sc_dev, "public key init failed\n"); 664 return (1); 665 } 666 } 667 668 /* Enable the rng, if available */ 669 if (sc->sc_flags & HIFN_HAS_RNG) { 670 if (sc->sc_flags & HIFN_IS_7811) { 671 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 672 if (r & HIFN_7811_RNGENA_ENA) { 673 r &= ~HIFN_7811_RNGENA_ENA; 674 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 675 } 676 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 677 HIFN_7811_RNGCFG_DEFL); 678 r |= HIFN_7811_RNGENA_ENA; 679 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 680 } else 681 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 682 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 683 HIFN_RNGCFG_ENA); 684 685 sc->sc_rngfirst = 1; 686 if (hz >= 100) 687 sc->sc_rnghz = hz / 100; 688 else 689 sc->sc_rnghz = 1; 690 /* NB: 1 means the callout runs w/o Giant locked */ 691 callout_init(&sc->sc_rngto, 1); 692 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 693 } 694 695 /* Enable public key engine, if available */ 696 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 697 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 698 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 699 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 700 } 701 702 return (0); 703} 704 705static void 706hifn_rng(void *vsc) 707{ 708#define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 709 struct hifn_softc *sc = vsc; 710 u_int32_t sts, num[2]; 711 int i; 712 713 if (sc->sc_flags & HIFN_IS_7811) { 714 for (i = 0; i < 5; i++) { 715 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 716 if (sts & HIFN_7811_RNGSTS_UFL) { 717 device_printf(sc->sc_dev, 718 "RNG underflow: disabling\n"); 719 return; 720 } 721 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 722 break; 723 724 /* 725 * There are at least two words in the RNG FIFO 726 * at this point. 727 */ 728 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 729 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 730 /* NB: discard first data read */ 731 if (sc->sc_rngfirst) 732 sc->sc_rngfirst = 0; 733 else 734 (*sc->sc_harvest)(sc->sc_rndtest, 735 num, sizeof (num)); 736 } 737 } else { 738 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 739 740 /* NB: discard first data read */ 741 if (sc->sc_rngfirst) 742 sc->sc_rngfirst = 0; 743 else 744 (*sc->sc_harvest)(sc->sc_rndtest, 745 num, sizeof (num[0])); 746 } 747 748 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 749#undef RANDOM_BITS 750} 751 752static void 753hifn_puc_wait(struct hifn_softc *sc) 754{ 755 int i; 756 757 for (i = 5000; i > 0; i--) { 758 DELAY(1); 759 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 760 break; 761 } 762 if (!i) 763 device_printf(sc->sc_dev, "proc unit did not reset\n"); 764} 765 766/* 767 * Reset the processing unit. 768 */ 769static void 770hifn_reset_puc(struct hifn_softc *sc) 771{ 772 /* Reset processing unit */ 773 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 774 hifn_puc_wait(sc); 775} 776 777/* 778 * Set the Retry and TRDY registers; note that we set them to 779 * zero because the 7811 locks up when forced to retry (section 780 * 3.6 of "Specification Update SU-0014-04". Not clear if we 781 * should do this for all Hifn parts, but it doesn't seem to hurt. 782 */ 783static void 784hifn_set_retry(struct hifn_softc *sc) 785{ 786 /* NB: RETRY only responds to 8-bit reads/writes */ 787 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 788 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 789} 790 791/* 792 * Resets the board. Values in the regesters are left as is 793 * from the reset (i.e. initial values are assigned elsewhere). 794 */ 795static void 796hifn_reset_board(struct hifn_softc *sc, int full) 797{ 798 u_int32_t reg; 799 800 /* 801 * Set polling in the DMA configuration register to zero. 0x7 avoids 802 * resetting the board and zeros out the other fields. 803 */ 804 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 805 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 806 807 /* 808 * Now that polling has been disabled, we have to wait 1 ms 809 * before resetting the board. 810 */ 811 DELAY(1000); 812 813 /* Reset the DMA unit */ 814 if (full) { 815 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 816 DELAY(1000); 817 } else { 818 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 819 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 820 hifn_reset_puc(sc); 821 } 822 823 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 824 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 825 826 /* Bring dma unit out of reset */ 827 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 828 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 829 830 hifn_puc_wait(sc); 831 hifn_set_retry(sc); 832 833 if (sc->sc_flags & HIFN_IS_7811) { 834 for (reg = 0; reg < 1000; reg++) { 835 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 836 HIFN_MIPSRST_CRAMINIT) 837 break; 838 DELAY(1000); 839 } 840 if (reg == 1000) 841 printf(": cram init timeout\n"); 842 } 843} 844 845static u_int32_t 846hifn_next_signature(u_int32_t a, u_int cnt) 847{ 848 int i; 849 u_int32_t v; 850 851 for (i = 0; i < cnt; i++) { 852 853 /* get the parity */ 854 v = a & 0x80080125; 855 v ^= v >> 16; 856 v ^= v >> 8; 857 v ^= v >> 4; 858 v ^= v >> 2; 859 v ^= v >> 1; 860 861 a = (v & 1) ^ (a << 1); 862 } 863 864 return a; 865} 866 867struct pci2id { 868 u_short pci_vendor; 869 u_short pci_prod; 870 char card_id[13]; 871}; 872static struct pci2id pci2id[] = { 873 { 874 PCI_VENDOR_HIFN, 875 PCI_PRODUCT_HIFN_7951, 876 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 877 0x00, 0x00, 0x00, 0x00, 0x00 } 878 }, { 879 PCI_VENDOR_NETSEC, 880 PCI_PRODUCT_NETSEC_7751, 881 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 882 0x00, 0x00, 0x00, 0x00, 0x00 } 883 }, { 884 PCI_VENDOR_INVERTEX, 885 PCI_PRODUCT_INVERTEX_AEON, 886 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 887 0x00, 0x00, 0x00, 0x00, 0x00 } 888 }, { 889 PCI_VENDOR_HIFN, 890 PCI_PRODUCT_HIFN_7811, 891 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 892 0x00, 0x00, 0x00, 0x00, 0x00 } 893 }, { 894 /* 895 * Other vendors share this PCI ID as well, such as 896 * http://www.powercrypt.com, and obviously they also 897 * use the same key. 898 */ 899 PCI_VENDOR_HIFN, 900 PCI_PRODUCT_HIFN_7751, 901 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 902 0x00, 0x00, 0x00, 0x00, 0x00 } 903 }, 904}; 905 906/* 907 * Checks to see if crypto is already enabled. If crypto isn't enable, 908 * "hifn_enable_crypto" is called to enable it. The check is important, 909 * as enabling crypto twice will lock the board. 910 */ 911static int 912hifn_enable_crypto(struct hifn_softc *sc) 913{ 914 u_int32_t dmacfg, ramcfg, encl, addr, i; 915 char *offtbl = NULL; 916 917 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 918 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 919 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 920 offtbl = pci2id[i].card_id; 921 break; 922 } 923 } 924 if (offtbl == NULL) { 925 device_printf(sc->sc_dev, "Unknown card!\n"); 926 return (1); 927 } 928 929 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 930 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 931 932 /* 933 * The RAM config register's encrypt level bit needs to be set before 934 * every read performed on the encryption level register. 935 */ 936 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 937 938 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 939 940 /* 941 * Make sure we don't re-unlock. Two unlocks kills chip until the 942 * next reboot. 943 */ 944 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 945#ifdef HIFN_DEBUG 946 if (hifn_debug) 947 device_printf(sc->sc_dev, 948 "Strong crypto already enabled!\n"); 949#endif 950 goto report; 951 } 952 953 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 954#ifdef HIFN_DEBUG 955 if (hifn_debug) 956 device_printf(sc->sc_dev, 957 "Unknown encryption level 0x%x\n", encl); 958#endif 959 return 1; 960 } 961 962 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 963 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 964 DELAY(1000); 965 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 966 DELAY(1000); 967 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 968 DELAY(1000); 969 970 for (i = 0; i <= 12; i++) { 971 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 972 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 973 974 DELAY(1000); 975 } 976 977 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 978 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 979 980#ifdef HIFN_DEBUG 981 if (hifn_debug) { 982 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 983 device_printf(sc->sc_dev, "Engine is permanently " 984 "locked until next system reset!\n"); 985 else 986 device_printf(sc->sc_dev, "Engine enabled " 987 "successfully!\n"); 988 } 989#endif 990 991report: 992 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 993 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 994 995 switch (encl) { 996 case HIFN_PUSTAT_ENA_1: 997 case HIFN_PUSTAT_ENA_2: 998 break; 999 case HIFN_PUSTAT_ENA_0: 1000 default: 1001 device_printf(sc->sc_dev, "disabled"); 1002 break; 1003 } 1004 1005 return 0; 1006} 1007 1008/* 1009 * Give initial values to the registers listed in the "Register Space" 1010 * section of the HIFN Software Development reference manual. 1011 */ 1012static void 1013hifn_init_pci_registers(struct hifn_softc *sc) 1014{ 1015 /* write fixed values needed by the Initialization registers */ 1016 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1017 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1018 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1019 1020 /* write all 4 ring address registers */ 1021 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1022 offsetof(struct hifn_dma, cmdr[0])); 1023 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1024 offsetof(struct hifn_dma, srcr[0])); 1025 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1026 offsetof(struct hifn_dma, dstr[0])); 1027 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1028 offsetof(struct hifn_dma, resr[0])); 1029 1030 DELAY(2000); 1031 1032 /* write status register */ 1033 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1034 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1035 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1036 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1037 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1038 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1039 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1040 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1041 HIFN_DMACSR_S_WAIT | 1042 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1043 HIFN_DMACSR_C_WAIT | 1044 HIFN_DMACSR_ENGINE | 1045 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1046 HIFN_DMACSR_PUBDONE : 0) | 1047 ((sc->sc_flags & HIFN_IS_7811) ? 1048 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1049 1050 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1051 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1052 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1053 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1054 ((sc->sc_flags & HIFN_IS_7811) ? 1055 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1056 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1057 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1058 1059 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1060 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1061 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1062 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1063 1064 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1065 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1066 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1067 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1068 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1069} 1070 1071/* 1072 * The maximum number of sessions supported by the card 1073 * is dependent on the amount of context ram, which 1074 * encryption algorithms are enabled, and how compression 1075 * is configured. This should be configured before this 1076 * routine is called. 1077 */ 1078static void 1079hifn_sessions(struct hifn_softc *sc) 1080{ 1081 u_int32_t pucnfg; 1082 int ctxsize; 1083 1084 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1085 1086 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1087 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1088 ctxsize = 128; 1089 else 1090 ctxsize = 512; 1091 sc->sc_maxses = 1 + 1092 ((sc->sc_ramsize - 32768) / ctxsize); 1093 } else 1094 sc->sc_maxses = sc->sc_ramsize / 16384; 1095 1096 if (sc->sc_maxses > 2048) 1097 sc->sc_maxses = 2048; 1098} 1099 1100/* 1101 * Determine ram type (sram or dram). Board should be just out of a reset 1102 * state when this is called. 1103 */ 1104static int 1105hifn_ramtype(struct hifn_softc *sc) 1106{ 1107 u_int8_t data[8], dataexpect[8]; 1108 int i; 1109 1110 for (i = 0; i < sizeof(data); i++) 1111 data[i] = dataexpect[i] = 0x55; 1112 if (hifn_writeramaddr(sc, 0, data)) 1113 return (-1); 1114 if (hifn_readramaddr(sc, 0, data)) 1115 return (-1); 1116 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1117 sc->sc_drammodel = 1; 1118 return (0); 1119 } 1120 1121 for (i = 0; i < sizeof(data); i++) 1122 data[i] = dataexpect[i] = 0xaa; 1123 if (hifn_writeramaddr(sc, 0, data)) 1124 return (-1); 1125 if (hifn_readramaddr(sc, 0, data)) 1126 return (-1); 1127 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1128 sc->sc_drammodel = 1; 1129 return (0); 1130 } 1131 1132 return (0); 1133} 1134 1135#define HIFN_SRAM_MAX (32 << 20) 1136#define HIFN_SRAM_STEP_SIZE 16384 1137#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1138 1139static int 1140hifn_sramsize(struct hifn_softc *sc) 1141{ 1142 u_int32_t a; 1143 u_int8_t data[8]; 1144 u_int8_t dataexpect[sizeof(data)]; 1145 int32_t i; 1146 1147 for (i = 0; i < sizeof(data); i++) 1148 data[i] = dataexpect[i] = i ^ 0x5a; 1149 1150 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1151 a = i * HIFN_SRAM_STEP_SIZE; 1152 bcopy(&i, data, sizeof(i)); 1153 hifn_writeramaddr(sc, a, data); 1154 } 1155 1156 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1157 a = i * HIFN_SRAM_STEP_SIZE; 1158 bcopy(&i, dataexpect, sizeof(i)); 1159 if (hifn_readramaddr(sc, a, data) < 0) 1160 return (0); 1161 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1162 return (0); 1163 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1164 } 1165 1166 return (0); 1167} 1168 1169/* 1170 * XXX For dram boards, one should really try all of the 1171 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1172 * is already set up correctly. 1173 */ 1174static int 1175hifn_dramsize(struct hifn_softc *sc) 1176{ 1177 u_int32_t cnfg; 1178 1179 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1180 HIFN_PUCNFG_DRAMMASK; 1181 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1182 return (0); 1183} 1184 1185static void 1186hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1187{ 1188 struct hifn_dma *dma = sc->sc_dma; 1189 1190 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1191 dma->cmdi = 0; 1192 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1193 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1194 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1195 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1196 } 1197 *cmdp = dma->cmdi++; 1198 dma->cmdk = dma->cmdi; 1199 1200 if (dma->srci == HIFN_D_SRC_RSIZE) { 1201 dma->srci = 0; 1202 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1203 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1204 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1205 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1206 } 1207 *srcp = dma->srci++; 1208 dma->srck = dma->srci; 1209 1210 if (dma->dsti == HIFN_D_DST_RSIZE) { 1211 dma->dsti = 0; 1212 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1213 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1214 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1215 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1216 } 1217 *dstp = dma->dsti++; 1218 dma->dstk = dma->dsti; 1219 1220 if (dma->resi == HIFN_D_RES_RSIZE) { 1221 dma->resi = 0; 1222 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1223 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1224 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1225 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1226 } 1227 *resp = dma->resi++; 1228 dma->resk = dma->resi; 1229} 1230 1231static int 1232hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1233{ 1234 struct hifn_dma *dma = sc->sc_dma; 1235 hifn_base_command_t wc; 1236 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1237 int r, cmdi, resi, srci, dsti; 1238 1239 wc.masks = htole16(3 << 13); 1240 wc.session_num = htole16(addr >> 14); 1241 wc.total_source_count = htole16(8); 1242 wc.total_dest_count = htole16(addr & 0x3fff); 1243 1244 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1245 1246 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1247 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1248 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1249 1250 /* build write command */ 1251 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1252 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1253 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1254 1255 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1256 + offsetof(struct hifn_dma, test_src)); 1257 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1258 + offsetof(struct hifn_dma, test_dst)); 1259 1260 dma->cmdr[cmdi].l = htole32(16 | masks); 1261 dma->srcr[srci].l = htole32(8 | masks); 1262 dma->dstr[dsti].l = htole32(4 | masks); 1263 dma->resr[resi].l = htole32(4 | masks); 1264 1265 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1266 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1267 1268 for (r = 10000; r >= 0; r--) { 1269 DELAY(10); 1270 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1271 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1272 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1273 break; 1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1275 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1276 } 1277 if (r == 0) { 1278 device_printf(sc->sc_dev, "writeramaddr -- " 1279 "result[%d](addr %d) still valid\n", resi, addr); 1280 r = -1; 1281 return (-1); 1282 } else 1283 r = 0; 1284 1285 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1286 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1287 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1288 1289 return (r); 1290} 1291 1292static int 1293hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1294{ 1295 struct hifn_dma *dma = sc->sc_dma; 1296 hifn_base_command_t rc; 1297 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1298 int r, cmdi, srci, dsti, resi; 1299 1300 rc.masks = htole16(2 << 13); 1301 rc.session_num = htole16(addr >> 14); 1302 rc.total_source_count = htole16(addr & 0x3fff); 1303 rc.total_dest_count = htole16(8); 1304 1305 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1306 1307 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1308 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1309 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1310 1311 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1312 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1313 1314 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1315 offsetof(struct hifn_dma, test_src)); 1316 dma->test_src = 0; 1317 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1318 offsetof(struct hifn_dma, test_dst)); 1319 dma->test_dst = 0; 1320 dma->cmdr[cmdi].l = htole32(8 | masks); 1321 dma->srcr[srci].l = htole32(8 | masks); 1322 dma->dstr[dsti].l = htole32(8 | masks); 1323 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1324 1325 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1326 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1327 1328 for (r = 10000; r >= 0; r--) { 1329 DELAY(10); 1330 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1331 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1332 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1333 break; 1334 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1336 } 1337 if (r == 0) { 1338 device_printf(sc->sc_dev, "readramaddr -- " 1339 "result[%d](addr %d) still valid\n", resi, addr); 1340 r = -1; 1341 } else { 1342 r = 0; 1343 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1344 } 1345 1346 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1347 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1348 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1349 1350 return (r); 1351} 1352 1353/* 1354 * Initialize the descriptor rings. 1355 */ 1356static void 1357hifn_init_dma(struct hifn_softc *sc) 1358{ 1359 struct hifn_dma *dma = sc->sc_dma; 1360 int i; 1361 1362 hifn_set_retry(sc); 1363 1364 /* initialize static pointer values */ 1365 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1366 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1367 offsetof(struct hifn_dma, command_bufs[i][0])); 1368 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1369 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1370 offsetof(struct hifn_dma, result_bufs[i][0])); 1371 1372 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1373 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1374 dma->srcr[HIFN_D_SRC_RSIZE].p = 1375 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1376 dma->dstr[HIFN_D_DST_RSIZE].p = 1377 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1378 dma->resr[HIFN_D_RES_RSIZE].p = 1379 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1380 1381 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1382 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1383 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1384} 1385 1386/* 1387 * Writes out the raw command buffer space. Returns the 1388 * command buffer size. 1389 */ 1390static u_int 1391hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1392{ 1393 u_int8_t *buf_pos; 1394 hifn_base_command_t *base_cmd; 1395 hifn_mac_command_t *mac_cmd; 1396 hifn_crypt_command_t *cry_cmd; 1397 int using_mac, using_crypt, len; 1398 u_int32_t dlen, slen; 1399 1400 buf_pos = buf; 1401 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1402 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1403 1404 base_cmd = (hifn_base_command_t *)buf_pos; 1405 base_cmd->masks = htole16(cmd->base_masks); 1406 slen = cmd->src_mapsize; 1407 if (cmd->sloplen) 1408 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1409 else 1410 dlen = cmd->dst_mapsize; 1411 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1412 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1413 dlen >>= 16; 1414 slen >>= 16; 1415 base_cmd->session_num = htole16(cmd->session_num | 1416 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1417 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1418 buf_pos += sizeof(hifn_base_command_t); 1419 1420 if (using_mac) { 1421 mac_cmd = (hifn_mac_command_t *)buf_pos; 1422 dlen = cmd->maccrd->crd_len; 1423 mac_cmd->source_count = htole16(dlen & 0xffff); 1424 dlen >>= 16; 1425 mac_cmd->masks = htole16(cmd->mac_masks | 1426 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1427 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1428 mac_cmd->reserved = 0; 1429 buf_pos += sizeof(hifn_mac_command_t); 1430 } 1431 1432 if (using_crypt) { 1433 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1434 dlen = cmd->enccrd->crd_len; 1435 cry_cmd->source_count = htole16(dlen & 0xffff); 1436 dlen >>= 16; 1437 cry_cmd->masks = htole16(cmd->cry_masks | 1438 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1439 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1440 cry_cmd->reserved = 0; 1441 buf_pos += sizeof(hifn_crypt_command_t); 1442 } 1443 1444 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1445 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1446 buf_pos += HIFN_MAC_KEY_LENGTH; 1447 } 1448 1449 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1450 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1451 case HIFN_CRYPT_CMD_ALG_3DES: 1452 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1453 buf_pos += HIFN_3DES_KEY_LENGTH; 1454 break; 1455 case HIFN_CRYPT_CMD_ALG_DES: 1456 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1457 buf_pos += cmd->cklen; 1458 break; 1459 case HIFN_CRYPT_CMD_ALG_RC4: 1460 len = 256; 1461 do { 1462 int clen; 1463 1464 clen = MIN(cmd->cklen, len); 1465 bcopy(cmd->ck, buf_pos, clen); 1466 len -= clen; 1467 buf_pos += clen; 1468 } while (len > 0); 1469 bzero(buf_pos, 4); 1470 buf_pos += 4; 1471 break; 1472 } 1473 } 1474 1475 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1476 bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH); 1477 buf_pos += HIFN_IV_LENGTH; 1478 } 1479 1480 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1481 bzero(buf_pos, 8); 1482 buf_pos += 8; 1483 } 1484 1485 return (buf_pos - buf); 1486} 1487 1488static int 1489hifn_dmamap_aligned(struct hifn_operand *op) 1490{ 1491 int i; 1492 1493 for (i = 0; i < op->nsegs; i++) { 1494 if (op->segs[i].ds_addr & 3) 1495 return (0); 1496 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1497 return (0); 1498 } 1499 return (1); 1500} 1501 1502static int 1503hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1504{ 1505 struct hifn_dma *dma = sc->sc_dma; 1506 struct hifn_operand *dst = &cmd->dst; 1507 u_int32_t p, l; 1508 int idx, used = 0, i; 1509 1510 idx = dma->dsti; 1511 for (i = 0; i < dst->nsegs - 1; i++) { 1512 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1513 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1514 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1515 HIFN_DSTR_SYNC(sc, idx, 1516 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1517 used++; 1518 1519 if (++idx == HIFN_D_DST_RSIZE) { 1520 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1521 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1522 HIFN_DSTR_SYNC(sc, idx, 1523 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1524 idx = 0; 1525 } 1526 } 1527 1528 if (cmd->sloplen == 0) { 1529 p = dst->segs[i].ds_addr; 1530 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1531 dst->segs[i].ds_len; 1532 } else { 1533 p = sc->sc_dma_physaddr + 1534 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1535 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1536 sizeof(u_int32_t); 1537 1538 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1539 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1540 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1541 HIFN_D_MASKDONEIRQ | 1542 (dst->segs[i].ds_len - cmd->sloplen)); 1543 HIFN_DSTR_SYNC(sc, idx, 1544 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1545 used++; 1546 1547 if (++idx == HIFN_D_DST_RSIZE) { 1548 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1549 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1550 HIFN_DSTR_SYNC(sc, idx, 1551 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1552 idx = 0; 1553 } 1554 } 1555 } 1556 dma->dstr[idx].p = htole32(p); 1557 dma->dstr[idx].l = htole32(l); 1558 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1559 used++; 1560 1561 if (++idx == HIFN_D_DST_RSIZE) { 1562 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1563 HIFN_D_MASKDONEIRQ); 1564 HIFN_DSTR_SYNC(sc, idx, 1565 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1566 idx = 0; 1567 } 1568 1569 dma->dsti = idx; 1570 dma->dstu += used; 1571 return (idx); 1572} 1573 1574static int 1575hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1576{ 1577 struct hifn_dma *dma = sc->sc_dma; 1578 struct hifn_operand *src = &cmd->src; 1579 int idx, i; 1580 u_int32_t last = 0; 1581 1582 idx = dma->srci; 1583 for (i = 0; i < src->nsegs; i++) { 1584 if (i == src->nsegs - 1) 1585 last = HIFN_D_LAST; 1586 1587 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1588 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1589 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1590 HIFN_SRCR_SYNC(sc, idx, 1591 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1592 1593 if (++idx == HIFN_D_SRC_RSIZE) { 1594 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1595 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1596 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1597 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1598 idx = 0; 1599 } 1600 } 1601 dma->srci = idx; 1602 dma->srcu += src->nsegs; 1603 return (idx); 1604} 1605 1606static void 1607hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1608{ 1609 struct hifn_operand *op = arg; 1610 1611 KASSERT(nsegs <= MAX_SCATTER, 1612 ("hifn_op_cb: too many DMA segments (%u > %u) " 1613 "returned when mapping operand", nsegs, MAX_SCATTER)); 1614 op->mapsize = mapsize; 1615 op->nsegs = nsegs; 1616 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1617} 1618 1619static int 1620hifn_crypto( 1621 struct hifn_softc *sc, 1622 struct hifn_command *cmd, 1623 struct cryptop *crp, 1624 int hint) 1625{ 1626 struct hifn_dma *dma = sc->sc_dma; 1627 u_int32_t cmdlen; 1628 int cmdi, resi, err = 0; 1629 1630 /* 1631 * need 1 cmd, and 1 res 1632 * 1633 * NB: check this first since it's easy. 1634 */ 1635 HIFN_LOCK(sc); 1636 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1637 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1638#ifdef HIFN_DEBUG 1639 if (hifn_debug) { 1640 device_printf(sc->sc_dev, 1641 "cmd/result exhaustion, cmdu %u resu %u\n", 1642 dma->cmdu, dma->resu); 1643 } 1644#endif 1645 hifnstats.hst_nomem_cr++; 1646 HIFN_UNLOCK(sc); 1647 return (ERESTART); 1648 } 1649 1650 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1651 hifnstats.hst_nomem_map++; 1652 HIFN_UNLOCK(sc); 1653 return (ENOMEM); 1654 } 1655 1656 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1657 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1658 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1659 hifnstats.hst_nomem_load++; 1660 err = ENOMEM; 1661 goto err_srcmap1; 1662 } 1663 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1664 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1665 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1666 hifnstats.hst_nomem_load++; 1667 err = ENOMEM; 1668 goto err_srcmap1; 1669 } 1670 } else { 1671 err = EINVAL; 1672 goto err_srcmap1; 1673 } 1674 1675 if (hifn_dmamap_aligned(&cmd->src)) { 1676 cmd->sloplen = cmd->src_mapsize & 3; 1677 cmd->dst = cmd->src; 1678 } else { 1679 if (crp->crp_flags & CRYPTO_F_IOV) { 1680 err = EINVAL; 1681 goto err_srcmap; 1682 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1683 int totlen, len; 1684 struct mbuf *m, *m0, *mlast; 1685 1686 KASSERT(cmd->dst_m == cmd->src_m, 1687 ("hifn_crypto: dst_m initialized improperly")); 1688 hifnstats.hst_unaligned++; 1689 /* 1690 * Source is not aligned on a longword boundary. 1691 * Copy the data to insure alignment. If we fail 1692 * to allocate mbufs or clusters while doing this 1693 * we return ERESTART so the operation is requeued 1694 * at the crypto later, but only if there are 1695 * ops already posted to the hardware; otherwise we 1696 * have no guarantee that we'll be re-entered. 1697 */ 1698 totlen = cmd->src_mapsize; 1699 if (cmd->src_m->m_flags & M_PKTHDR) { 1700 len = MHLEN; 1701 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1702 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) { 1703 m_free(m0); 1704 m0 = NULL; 1705 } 1706 } else { 1707 len = MLEN; 1708 MGET(m0, M_DONTWAIT, MT_DATA); 1709 } 1710 if (m0 == NULL) { 1711 hifnstats.hst_nomem_mbuf++; 1712 err = dma->cmdu ? ERESTART : ENOMEM; 1713 goto err_srcmap; 1714 } 1715 if (totlen >= MINCLSIZE) { 1716 MCLGET(m0, M_DONTWAIT); 1717 if ((m0->m_flags & M_EXT) == 0) { 1718 hifnstats.hst_nomem_mcl++; 1719 err = dma->cmdu ? ERESTART : ENOMEM; 1720 m_freem(m0); 1721 goto err_srcmap; 1722 } 1723 len = MCLBYTES; 1724 } 1725 totlen -= len; 1726 m0->m_pkthdr.len = m0->m_len = len; 1727 mlast = m0; 1728 1729 while (totlen > 0) { 1730 MGET(m, M_DONTWAIT, MT_DATA); 1731 if (m == NULL) { 1732 hifnstats.hst_nomem_mbuf++; 1733 err = dma->cmdu ? ERESTART : ENOMEM; 1734 m_freem(m0); 1735 goto err_srcmap; 1736 } 1737 len = MLEN; 1738 if (totlen >= MINCLSIZE) { 1739 MCLGET(m, M_DONTWAIT); 1740 if ((m->m_flags & M_EXT) == 0) { 1741 hifnstats.hst_nomem_mcl++; 1742 err = dma->cmdu ? ERESTART : ENOMEM; 1743 mlast->m_next = m; 1744 m_freem(m0); 1745 goto err_srcmap; 1746 } 1747 len = MCLBYTES; 1748 } 1749 1750 m->m_len = len; 1751 m0->m_pkthdr.len += len; 1752 totlen -= len; 1753 1754 mlast->m_next = m; 1755 mlast = m; 1756 } 1757 cmd->dst_m = m0; 1758 } 1759 } 1760 1761 if (cmd->dst_map == NULL) { 1762 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1763 hifnstats.hst_nomem_map++; 1764 err = ENOMEM; 1765 goto err_srcmap; 1766 } 1767 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1768 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1769 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1770 hifnstats.hst_nomem_map++; 1771 err = ENOMEM; 1772 goto err_dstmap1; 1773 } 1774 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1775 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1776 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1777 hifnstats.hst_nomem_load++; 1778 err = ENOMEM; 1779 goto err_dstmap1; 1780 } 1781 } 1782 } 1783 1784#ifdef HIFN_DEBUG 1785 if (hifn_debug) { 1786 device_printf(sc->sc_dev, 1787 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1788 READ_REG_1(sc, HIFN_1_DMA_CSR), 1789 READ_REG_1(sc, HIFN_1_DMA_IER), 1790 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1791 cmd->src_nsegs, cmd->dst_nsegs); 1792 } 1793#endif 1794 1795 if (cmd->src_map == cmd->dst_map) { 1796 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1797 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1798 } else { 1799 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1800 BUS_DMASYNC_PREWRITE); 1801 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1802 BUS_DMASYNC_PREREAD); 1803 } 1804 1805 /* 1806 * need N src, and N dst 1807 */ 1808 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 1809 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 1810#ifdef HIFN_DEBUG 1811 if (hifn_debug) { 1812 device_printf(sc->sc_dev, 1813 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 1814 dma->srcu, cmd->src_nsegs, 1815 dma->dstu, cmd->dst_nsegs); 1816 } 1817#endif 1818 hifnstats.hst_nomem_sd++; 1819 err = ERESTART; 1820 goto err_dstmap; 1821 } 1822 1823 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1824 dma->cmdi = 0; 1825 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1826 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1827 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1828 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1829 } 1830 cmdi = dma->cmdi++; 1831 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1832 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1833 1834 /* .p for command/result already set */ 1835 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1836 HIFN_D_MASKDONEIRQ); 1837 HIFN_CMDR_SYNC(sc, cmdi, 1838 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1839 dma->cmdu++; 1840 if (sc->sc_c_busy == 0) { 1841 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1842 sc->sc_c_busy = 1; 1843 } 1844 1845 /* 1846 * We don't worry about missing an interrupt (which a "command wait" 1847 * interrupt salvages us from), unless there is more than one command 1848 * in the queue. 1849 */ 1850 if (dma->cmdu > 1) { 1851 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1852 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1853 } 1854 1855 hifnstats.hst_ipackets++; 1856 hifnstats.hst_ibytes += cmd->src_mapsize; 1857 1858 hifn_dmamap_load_src(sc, cmd); 1859 if (sc->sc_s_busy == 0) { 1860 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1861 sc->sc_s_busy = 1; 1862 } 1863 1864 /* 1865 * Unlike other descriptors, we don't mask done interrupt from 1866 * result descriptor. 1867 */ 1868#ifdef HIFN_DEBUG 1869 if (hifn_debug) 1870 printf("load res\n"); 1871#endif 1872 if (dma->resi == HIFN_D_RES_RSIZE) { 1873 dma->resi = 0; 1874 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1875 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1876 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1877 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1878 } 1879 resi = dma->resi++; 1880 KASSERT(dma->hifn_commands[resi] == NULL, 1881 ("hifn_crypto: command slot %u busy", resi)); 1882 dma->hifn_commands[resi] = cmd; 1883 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1884 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 1885 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1886 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 1887 sc->sc_curbatch++; 1888 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 1889 hifnstats.hst_maxbatch = sc->sc_curbatch; 1890 hifnstats.hst_totbatch++; 1891 } else { 1892 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1893 HIFN_D_VALID | HIFN_D_LAST); 1894 sc->sc_curbatch = 0; 1895 } 1896 HIFN_RESR_SYNC(sc, resi, 1897 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1898 dma->resu++; 1899 if (sc->sc_r_busy == 0) { 1900 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1901 sc->sc_r_busy = 1; 1902 } 1903 1904 if (cmd->sloplen) 1905 cmd->slopidx = resi; 1906 1907 hifn_dmamap_load_dst(sc, cmd); 1908 1909 if (sc->sc_d_busy == 0) { 1910 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1911 sc->sc_d_busy = 1; 1912 } 1913 1914#ifdef HIFN_DEBUG 1915 if (hifn_debug) { 1916 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 1917 READ_REG_1(sc, HIFN_1_DMA_CSR), 1918 READ_REG_1(sc, HIFN_1_DMA_IER)); 1919 } 1920#endif 1921 1922 sc->sc_active = 5; 1923 HIFN_UNLOCK(sc); 1924 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 1925 return (err); /* success */ 1926 1927err_dstmap: 1928 if (cmd->src_map != cmd->dst_map) 1929 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1930err_dstmap1: 1931 if (cmd->src_map != cmd->dst_map) 1932 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 1933err_srcmap: 1934 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1935 if (cmd->src_m != cmd->dst_m) 1936 m_freem(cmd->dst_m); 1937 } 1938 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1939err_srcmap1: 1940 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 1941 HIFN_UNLOCK(sc); 1942 return (err); 1943} 1944 1945static void 1946hifn_tick(void* vsc) 1947{ 1948 struct hifn_softc *sc = vsc; 1949 1950 HIFN_LOCK(sc); 1951 if (sc->sc_active == 0) { 1952 struct hifn_dma *dma = sc->sc_dma; 1953 u_int32_t r = 0; 1954 1955 if (dma->cmdu == 0 && sc->sc_c_busy) { 1956 sc->sc_c_busy = 0; 1957 r |= HIFN_DMACSR_C_CTRL_DIS; 1958 } 1959 if (dma->srcu == 0 && sc->sc_s_busy) { 1960 sc->sc_s_busy = 0; 1961 r |= HIFN_DMACSR_S_CTRL_DIS; 1962 } 1963 if (dma->dstu == 0 && sc->sc_d_busy) { 1964 sc->sc_d_busy = 0; 1965 r |= HIFN_DMACSR_D_CTRL_DIS; 1966 } 1967 if (dma->resu == 0 && sc->sc_r_busy) { 1968 sc->sc_r_busy = 0; 1969 r |= HIFN_DMACSR_R_CTRL_DIS; 1970 } 1971 if (r) 1972 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1973 } else 1974 sc->sc_active--; 1975 HIFN_UNLOCK(sc); 1976 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1977} 1978 1979static void 1980hifn_intr(void *arg) 1981{ 1982 struct hifn_softc *sc = arg; 1983 struct hifn_dma *dma; 1984 u_int32_t dmacsr, restart; 1985 int i, u; 1986 1987 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1988 1989 /* Nothing in the DMA unit interrupted */ 1990 if ((dmacsr & sc->sc_dmaier) == 0) 1991 return; 1992 1993 HIFN_LOCK(sc); 1994 1995 dma = sc->sc_dma; 1996 1997#ifdef HIFN_DEBUG 1998 if (hifn_debug) { 1999 device_printf(sc->sc_dev, 2000 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2001 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2002 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2003 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2004 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2005 } 2006#endif 2007 2008 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2009 2010 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2011 (dmacsr & HIFN_DMACSR_PUBDONE)) 2012 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2013 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2014 2015 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2016 if (restart) 2017 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2018 2019 if (sc->sc_flags & HIFN_IS_7811) { 2020 if (dmacsr & HIFN_DMACSR_ILLR) 2021 device_printf(sc->sc_dev, "illegal read\n"); 2022 if (dmacsr & HIFN_DMACSR_ILLW) 2023 device_printf(sc->sc_dev, "illegal write\n"); 2024 } 2025 2026 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2027 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2028 if (restart) { 2029 device_printf(sc->sc_dev, "abort, resetting.\n"); 2030 hifnstats.hst_abort++; 2031 hifn_abort(sc); 2032 HIFN_UNLOCK(sc); 2033 return; 2034 } 2035 2036 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2037 /* 2038 * If no slots to process and we receive a "waiting on 2039 * command" interrupt, we disable the "waiting on command" 2040 * (by clearing it). 2041 */ 2042 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2043 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2044 } 2045 2046 /* clear the rings */ 2047 i = dma->resk; u = dma->resu; 2048 while (u != 0) { 2049 HIFN_RESR_SYNC(sc, i, 2050 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2051 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2052 HIFN_RESR_SYNC(sc, i, 2053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2054 break; 2055 } 2056 2057 if (i != HIFN_D_RES_RSIZE) { 2058 struct hifn_command *cmd; 2059 u_int8_t *macbuf = NULL; 2060 2061 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2062 cmd = dma->hifn_commands[i]; 2063 KASSERT(cmd != NULL, 2064 ("hifn_intr: null command slot %u", i)); 2065 dma->hifn_commands[i] = NULL; 2066 2067 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2068 macbuf = dma->result_bufs[i]; 2069 macbuf += 12; 2070 } 2071 2072 hifn_callback(sc, cmd, macbuf); 2073 hifnstats.hst_opackets++; 2074 u--; 2075 } 2076 2077 if (++i == (HIFN_D_RES_RSIZE + 1)) 2078 i = 0; 2079 } 2080 dma->resk = i; dma->resu = u; 2081 2082 i = dma->srck; u = dma->srcu; 2083 while (u != 0) { 2084 if (i == HIFN_D_SRC_RSIZE) 2085 i = 0; 2086 HIFN_SRCR_SYNC(sc, i, 2087 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2088 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2089 HIFN_SRCR_SYNC(sc, i, 2090 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2091 break; 2092 } 2093 i++, u--; 2094 } 2095 dma->srck = i; dma->srcu = u; 2096 2097 i = dma->cmdk; u = dma->cmdu; 2098 while (u != 0) { 2099 HIFN_CMDR_SYNC(sc, i, 2100 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2101 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2102 HIFN_CMDR_SYNC(sc, i, 2103 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2104 break; 2105 } 2106 if (i != HIFN_D_CMD_RSIZE) { 2107 u--; 2108 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2109 } 2110 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2111 i = 0; 2112 } 2113 dma->cmdk = i; dma->cmdu = u; 2114 2115 HIFN_UNLOCK(sc); 2116 2117 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2118 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2119#ifdef HIFN_DEBUG 2120 if (hifn_debug) 2121 device_printf(sc->sc_dev, 2122 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2123 sc->sc_needwakeup, 2124 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2125#endif 2126 sc->sc_needwakeup &= ~wakeup; 2127 crypto_unblock(sc->sc_cid, wakeup); 2128 } 2129} 2130 2131/* 2132 * Allocate a new 'session' and return an encoded session id. 'sidp' 2133 * contains our registration id, and should contain an encoded session 2134 * id on successful allocation. 2135 */ 2136static int 2137hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2138{ 2139 struct cryptoini *c; 2140 struct hifn_softc *sc = arg; 2141 int i, mac = 0, cry = 0; 2142 2143 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2144 if (sidp == NULL || cri == NULL || sc == NULL) 2145 return (EINVAL); 2146 2147 for (i = 0; i < sc->sc_maxses; i++) 2148 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 2149 break; 2150 if (i == sc->sc_maxses) 2151 return (ENOMEM); 2152 2153 for (c = cri; c != NULL; c = c->cri_next) { 2154 switch (c->cri_alg) { 2155 case CRYPTO_MD5: 2156 case CRYPTO_SHA1: 2157 case CRYPTO_MD5_HMAC: 2158 case CRYPTO_SHA1_HMAC: 2159 if (mac) 2160 return (EINVAL); 2161 mac = 1; 2162 break; 2163 case CRYPTO_DES_CBC: 2164 case CRYPTO_3DES_CBC: 2165 /* XXX this may read fewer, does it matter? */ 2166 read_random(sc->sc_sessions[i].hs_iv, HIFN_IV_LENGTH); 2167 /*FALLTHROUGH*/ 2168 case CRYPTO_ARC4: 2169 if (cry) 2170 return (EINVAL); 2171 cry = 1; 2172 break; 2173 default: 2174 return (EINVAL); 2175 } 2176 } 2177 if (mac == 0 && cry == 0) 2178 return (EINVAL); 2179 2180 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), i); 2181 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2182 2183 return (0); 2184} 2185 2186/* 2187 * Deallocate a session. 2188 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2189 * XXX to blow away any keys already stored there. 2190 */ 2191static int 2192hifn_freesession(void *arg, u_int64_t tid) 2193{ 2194 struct hifn_softc *sc = arg; 2195 int session; 2196 u_int32_t sid = CRYPTO_SESID2LID(tid); 2197 2198 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2199 if (sc == NULL) 2200 return (EINVAL); 2201 2202 session = HIFN_SESSION(sid); 2203 if (session >= sc->sc_maxses) 2204 return (EINVAL); 2205 2206 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2207 return (0); 2208} 2209 2210static int 2211hifn_process(void *arg, struct cryptop *crp, int hint) 2212{ 2213 struct hifn_softc *sc = arg; 2214 struct hifn_command *cmd = NULL; 2215 int session, err; 2216 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2217 2218 if (crp == NULL || crp->crp_callback == NULL) { 2219 hifnstats.hst_invalid++; 2220 return (EINVAL); 2221 } 2222 session = HIFN_SESSION(crp->crp_sid); 2223 2224 if (sc == NULL || session >= sc->sc_maxses) { 2225 err = EINVAL; 2226 goto errout; 2227 } 2228 2229 cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); 2230 if (cmd == NULL) { 2231 hifnstats.hst_nomem++; 2232 err = ENOMEM; 2233 goto errout; 2234 } 2235 2236 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2237 cmd->src_m = (struct mbuf *)crp->crp_buf; 2238 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2239 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2240 cmd->src_io = (struct uio *)crp->crp_buf; 2241 cmd->dst_io = (struct uio *)crp->crp_buf; 2242 } else { 2243 err = EINVAL; 2244 goto errout; /* XXX we don't handle contiguous buffers! */ 2245 } 2246 2247 crd1 = crp->crp_desc; 2248 if (crd1 == NULL) { 2249 err = EINVAL; 2250 goto errout; 2251 } 2252 crd2 = crd1->crd_next; 2253 2254 if (crd2 == NULL) { 2255 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2256 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2257 crd1->crd_alg == CRYPTO_SHA1 || 2258 crd1->crd_alg == CRYPTO_MD5) { 2259 maccrd = crd1; 2260 enccrd = NULL; 2261 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2262 crd1->crd_alg == CRYPTO_3DES_CBC || 2263 crd1->crd_alg == CRYPTO_ARC4) { 2264 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2265 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2266 maccrd = NULL; 2267 enccrd = crd1; 2268 } else { 2269 err = EINVAL; 2270 goto errout; 2271 } 2272 } else { 2273 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2274 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2275 crd1->crd_alg == CRYPTO_MD5 || 2276 crd1->crd_alg == CRYPTO_SHA1) && 2277 (crd2->crd_alg == CRYPTO_DES_CBC || 2278 crd2->crd_alg == CRYPTO_3DES_CBC || 2279 crd2->crd_alg == CRYPTO_ARC4) && 2280 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2281 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2282 maccrd = crd1; 2283 enccrd = crd2; 2284 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2285 crd1->crd_alg == CRYPTO_ARC4 || 2286 crd1->crd_alg == CRYPTO_3DES_CBC) && 2287 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2288 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2289 crd2->crd_alg == CRYPTO_MD5 || 2290 crd2->crd_alg == CRYPTO_SHA1) && 2291 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2292 enccrd = crd1; 2293 maccrd = crd2; 2294 } else { 2295 /* 2296 * We cannot order the 7751 as requested 2297 */ 2298 err = EINVAL; 2299 goto errout; 2300 } 2301 } 2302 2303 if (enccrd) { 2304 cmd->enccrd = enccrd; 2305 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2306 switch (enccrd->crd_alg) { 2307 case CRYPTO_ARC4: 2308 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2309 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2310 != sc->sc_sessions[session].hs_prev_op) 2311 sc->sc_sessions[session].hs_state = 2312 HS_STATE_USED; 2313 break; 2314 case CRYPTO_DES_CBC: 2315 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2316 HIFN_CRYPT_CMD_MODE_CBC | 2317 HIFN_CRYPT_CMD_NEW_IV; 2318 break; 2319 case CRYPTO_3DES_CBC: 2320 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2321 HIFN_CRYPT_CMD_MODE_CBC | 2322 HIFN_CRYPT_CMD_NEW_IV; 2323 break; 2324 default: 2325 err = EINVAL; 2326 goto errout; 2327 } 2328 if (enccrd->crd_alg != CRYPTO_ARC4) { 2329 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2330 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2331 bcopy(enccrd->crd_iv, cmd->iv, 2332 HIFN_IV_LENGTH); 2333 else 2334 bcopy(sc->sc_sessions[session].hs_iv, 2335 cmd->iv, HIFN_IV_LENGTH); 2336 2337 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2338 == 0) { 2339 if (crp->crp_flags & CRYPTO_F_IMBUF) 2340 m_copyback(cmd->src_m, 2341 enccrd->crd_inject, 2342 HIFN_IV_LENGTH, cmd->iv); 2343 else if (crp->crp_flags & CRYPTO_F_IOV) 2344 cuio_copyback(cmd->src_io, 2345 enccrd->crd_inject, 2346 HIFN_IV_LENGTH, cmd->iv); 2347 } 2348 } else { 2349 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2350 bcopy(enccrd->crd_iv, cmd->iv, 2351 HIFN_IV_LENGTH); 2352 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2353 m_copydata(cmd->src_m, 2354 enccrd->crd_inject, 2355 HIFN_IV_LENGTH, cmd->iv); 2356 else if (crp->crp_flags & CRYPTO_F_IOV) 2357 cuio_copydata(cmd->src_io, 2358 enccrd->crd_inject, 2359 HIFN_IV_LENGTH, cmd->iv); 2360 } 2361 } 2362 2363 cmd->ck = enccrd->crd_key; 2364 cmd->cklen = enccrd->crd_klen >> 3; 2365 2366 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2367 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2368 } 2369 2370 if (maccrd) { 2371 cmd->maccrd = maccrd; 2372 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2373 2374 switch (maccrd->crd_alg) { 2375 case CRYPTO_MD5: 2376 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2377 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2378 HIFN_MAC_CMD_POS_IPSEC; 2379 break; 2380 case CRYPTO_MD5_HMAC: 2381 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2382 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2383 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2384 break; 2385 case CRYPTO_SHA1: 2386 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2387 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2388 HIFN_MAC_CMD_POS_IPSEC; 2389 break; 2390 case CRYPTO_SHA1_HMAC: 2391 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2392 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2393 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2394 break; 2395 } 2396 2397 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2398 maccrd->crd_alg == CRYPTO_MD5_HMAC) && 2399 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2400 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2401 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2402 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2403 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2404 } 2405 } 2406 2407 cmd->crp = crp; 2408 cmd->session_num = session; 2409 cmd->softc = sc; 2410 2411 err = hifn_crypto(sc, cmd, crp, hint); 2412 if (!err) { 2413 if (enccrd) 2414 sc->sc_sessions[session].hs_prev_op = 2415 enccrd->crd_flags & CRD_F_ENCRYPT; 2416 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2417 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2418 return 0; 2419 } else if (err == ERESTART) { 2420 /* 2421 * There weren't enough resources to dispatch the request 2422 * to the part. Notify the caller so they'll requeue this 2423 * request and resubmit it again soon. 2424 */ 2425#ifdef HIFN_DEBUG 2426 if (hifn_debug) 2427 device_printf(sc->sc_dev, "requeue request\n"); 2428#endif 2429 free(cmd, M_DEVBUF); 2430 sc->sc_needwakeup |= CRYPTO_SYMQ; 2431 return (err); 2432 } 2433 2434errout: 2435 if (cmd != NULL) 2436 free(cmd, M_DEVBUF); 2437 if (err == EINVAL) 2438 hifnstats.hst_invalid++; 2439 else 2440 hifnstats.hst_nomem++; 2441 crp->crp_etype = err; 2442 crypto_done(crp); 2443 return (err); 2444} 2445 2446static void 2447hifn_abort(struct hifn_softc *sc) 2448{ 2449 struct hifn_dma *dma = sc->sc_dma; 2450 struct hifn_command *cmd; 2451 struct cryptop *crp; 2452 int i, u; 2453 2454 i = dma->resk; u = dma->resu; 2455 while (u != 0) { 2456 cmd = dma->hifn_commands[i]; 2457 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2458 dma->hifn_commands[i] = NULL; 2459 crp = cmd->crp; 2460 2461 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2462 /* Salvage what we can. */ 2463 u_int8_t *macbuf; 2464 2465 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2466 macbuf = dma->result_bufs[i]; 2467 macbuf += 12; 2468 } else 2469 macbuf = NULL; 2470 hifnstats.hst_opackets++; 2471 hifn_callback(sc, cmd, macbuf); 2472 } else { 2473 if (cmd->src_map == cmd->dst_map) { 2474 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2475 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2476 } else { 2477 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2478 BUS_DMASYNC_POSTWRITE); 2479 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2480 BUS_DMASYNC_POSTREAD); 2481 } 2482 2483 if (cmd->src_m != cmd->dst_m) { 2484 m_freem(cmd->src_m); 2485 crp->crp_buf = (caddr_t)cmd->dst_m; 2486 } 2487 2488 /* non-shared buffers cannot be restarted */ 2489 if (cmd->src_map != cmd->dst_map) { 2490 /* 2491 * XXX should be EAGAIN, delayed until 2492 * after the reset. 2493 */ 2494 crp->crp_etype = ENOMEM; 2495 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2496 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2497 } else 2498 crp->crp_etype = ENOMEM; 2499 2500 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2501 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2502 2503 free(cmd, M_DEVBUF); 2504 if (crp->crp_etype != EAGAIN) 2505 crypto_done(crp); 2506 } 2507 2508 if (++i == HIFN_D_RES_RSIZE) 2509 i = 0; 2510 u--; 2511 } 2512 dma->resk = i; dma->resu = u; 2513 2514 /* Force upload of key next time */ 2515 for (i = 0; i < sc->sc_maxses; i++) 2516 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2517 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2518 2519 hifn_reset_board(sc, 1); 2520 hifn_init_dma(sc); 2521 hifn_init_pci_registers(sc); 2522} 2523 2524static void 2525hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2526{ 2527 struct hifn_dma *dma = sc->sc_dma; 2528 struct cryptop *crp = cmd->crp; 2529 struct cryptodesc *crd; 2530 struct mbuf *m; 2531 int totlen, i, u; 2532 2533 if (cmd->src_map == cmd->dst_map) { 2534 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2535 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2536 } else { 2537 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2538 BUS_DMASYNC_POSTWRITE); 2539 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2540 BUS_DMASYNC_POSTREAD); 2541 } 2542 2543 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2544 if (cmd->src_m != cmd->dst_m) { 2545 crp->crp_buf = (caddr_t)cmd->dst_m; 2546 totlen = cmd->src_mapsize; 2547 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2548 if (totlen < m->m_len) { 2549 m->m_len = totlen; 2550 totlen = 0; 2551 } else 2552 totlen -= m->m_len; 2553 } 2554 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2555 m_freem(cmd->src_m); 2556 } 2557 } 2558 2559 if (cmd->sloplen != 0) { 2560 if (crp->crp_flags & CRYPTO_F_IMBUF) 2561 m_copyback((struct mbuf *)crp->crp_buf, 2562 cmd->src_mapsize - cmd->sloplen, 2563 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2564 else if (crp->crp_flags & CRYPTO_F_IOV) 2565 cuio_copyback((struct uio *)crp->crp_buf, 2566 cmd->src_mapsize - cmd->sloplen, 2567 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2568 } 2569 2570 i = dma->dstk; u = dma->dstu; 2571 while (u != 0) { 2572 if (i == HIFN_D_DST_RSIZE) 2573 i = 0; 2574 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2575 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2576 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2577 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2579 break; 2580 } 2581 i++, u--; 2582 } 2583 dma->dstk = i; dma->dstu = u; 2584 2585 hifnstats.hst_obytes += cmd->dst_mapsize; 2586 2587 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2588 HIFN_BASE_CMD_CRYPT) { 2589 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2590 if (crd->crd_alg != CRYPTO_DES_CBC && 2591 crd->crd_alg != CRYPTO_3DES_CBC) 2592 continue; 2593 if (crp->crp_flags & CRYPTO_F_IMBUF) 2594 m_copydata((struct mbuf *)crp->crp_buf, 2595 crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH, 2596 HIFN_IV_LENGTH, 2597 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2598 else if (crp->crp_flags & CRYPTO_F_IOV) { 2599 cuio_copydata((struct uio *)crp->crp_buf, 2600 crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH, 2601 HIFN_IV_LENGTH, 2602 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2603 } 2604 break; 2605 } 2606 } 2607 2608 if (macbuf != NULL) { 2609 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2610 int len; 2611 2612 if (crd->crd_alg == CRYPTO_MD5) 2613 len = 16; 2614 else if (crd->crd_alg == CRYPTO_SHA1) 2615 len = 20; 2616 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2617 crd->crd_alg == CRYPTO_SHA1_HMAC) 2618 len = 12; 2619 else 2620 continue; 2621 2622 if (crp->crp_flags & CRYPTO_F_IMBUF) 2623 m_copyback((struct mbuf *)crp->crp_buf, 2624 crd->crd_inject, len, macbuf); 2625 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2626 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2627 break; 2628 } 2629 } 2630 2631 if (cmd->src_map != cmd->dst_map) { 2632 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2633 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2634 } 2635 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2636 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2637 free(cmd, M_DEVBUF); 2638 crypto_done(crp); 2639} 2640 2641/* 2642 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2643 * and Group 1 registers; avoid conditions that could create 2644 * burst writes by doing a read in between the writes. 2645 * 2646 * NB: The read we interpose is always to the same register; 2647 * we do this because reading from an arbitrary (e.g. last) 2648 * register may not always work. 2649 */ 2650static void 2651hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2652{ 2653 if (sc->sc_flags & HIFN_IS_7811) { 2654 if (sc->sc_bar0_lastreg == reg - 4) 2655 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2656 sc->sc_bar0_lastreg = reg; 2657 } 2658 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2659} 2660 2661static void 2662hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2663{ 2664 if (sc->sc_flags & HIFN_IS_7811) { 2665 if (sc->sc_bar1_lastreg == reg - 4) 2666 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2667 sc->sc_bar1_lastreg = reg; 2668 } 2669 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2670} 2671