safe.c revision 158705
1/*- 2 * Copyright (c) 2003 Sam Leffler, Errno Consulting 3 * Copyright (c) 2003 Global Technology Associates, Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/safe/safe.c 158705 2006-05-17 18:34:26Z pjd $"); 30 31/* 32 * SafeNet SafeXcel-1141 hardware crypto accelerator 33 */ 34#include "opt_safe.h" 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/proc.h> 39#include <sys/errno.h> 40#include <sys/malloc.h> 41#include <sys/kernel.h> 42#include <sys/mbuf.h> 43#include <sys/module.h> 44#include <sys/lock.h> 45#include <sys/mutex.h> 46#include <sys/sysctl.h> 47#include <sys/endian.h> 48 49#include <vm/vm.h> 50#include <vm/pmap.h> 51 52#include <machine/bus.h> 53#include <machine/resource.h> 54#include <sys/bus.h> 55#include <sys/rman.h> 56 57#include <crypto/sha1.h> 58#include <opencrypto/cryptodev.h> 59#include <opencrypto/cryptosoft.h> 60#include <sys/md5.h> 61#include <sys/random.h> 62 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcireg.h> 65 66#ifdef SAFE_RNDTEST 67#include <dev/rndtest/rndtest.h> 68#endif 69#include <dev/safe/safereg.h> 70#include <dev/safe/safevar.h> 71 72#ifndef bswap32 73#define bswap32 NTOHL 74#endif 75 76/* 77 * Prototypes and count for the pci_device structure 78 */ 79static int safe_probe(device_t); 80static int safe_attach(device_t); 81static int safe_detach(device_t); 82static int safe_suspend(device_t); 83static int safe_resume(device_t); 84static void safe_shutdown(device_t); 85 86static device_method_t safe_methods[] = { 87 /* Device interface */ 88 DEVMETHOD(device_probe, safe_probe), 89 DEVMETHOD(device_attach, safe_attach), 90 DEVMETHOD(device_detach, safe_detach), 91 DEVMETHOD(device_suspend, safe_suspend), 92 DEVMETHOD(device_resume, safe_resume), 93 DEVMETHOD(device_shutdown, safe_shutdown), 94 95 /* bus interface */ 96 DEVMETHOD(bus_print_child, bus_generic_print_child), 97 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 98 99 { 0, 0 } 100}; 101static driver_t safe_driver = { 102 "safe", 103 safe_methods, 104 sizeof (struct safe_softc) 105}; 106static devclass_t safe_devclass; 107 108DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 109MODULE_DEPEND(safe, crypto, 1, 1, 1); 110#ifdef SAFE_RNDTEST 111MODULE_DEPEND(safe, rndtest, 1, 1, 1); 112#endif 113 114static void safe_intr(void *); 115static int safe_newsession(void *, u_int32_t *, struct cryptoini *); 116static int safe_freesession(void *, u_int64_t); 117static int safe_process(void *, struct cryptop *, int); 118static void safe_callback(struct safe_softc *, struct safe_ringentry *); 119static void safe_feed(struct safe_softc *, struct safe_ringentry *); 120static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 121#ifndef SAFE_NO_RNG 122static void safe_rng_init(struct safe_softc *); 123static void safe_rng(void *); 124#endif /* SAFE_NO_RNG */ 125static int safe_dma_malloc(struct safe_softc *, bus_size_t, 126 struct safe_dma_alloc *, int); 127#define safe_dma_sync(_dma, _flags) \ 128 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 129static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 130static int safe_dmamap_aligned(const struct safe_operand *); 131static int safe_dmamap_uniform(const struct safe_operand *); 132 133static void safe_reset_board(struct safe_softc *); 134static void safe_init_board(struct safe_softc *); 135static void safe_init_pciregs(device_t dev); 136static void safe_cleanchip(struct safe_softc *); 137static void safe_totalreset(struct safe_softc *); 138 139static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 140 141SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters"); 142 143#ifdef SAFE_DEBUG 144static void safe_dump_dmastatus(struct safe_softc *, const char *); 145static void safe_dump_ringstate(struct safe_softc *, const char *); 146static void safe_dump_intrstate(struct safe_softc *, const char *); 147static void safe_dump_request(struct safe_softc *, const char *, 148 struct safe_ringentry *); 149 150static struct safe_softc *safec; /* for use by hw.safe.dump */ 151 152static int safe_debug = 0; 153SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 154 0, "control debugging msgs"); 155#define DPRINTF(_x) if (safe_debug) printf _x 156#else 157#define DPRINTF(_x) 158#endif 159 160#define READ_REG(sc,r) \ 161 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 162 163#define WRITE_REG(sc,reg,val) \ 164 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 165 166struct safe_stats safestats; 167SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 168 safe_stats, "driver statistics"); 169#ifndef SAFE_NO_RNG 170static int safe_rnginterval = 1; /* poll once a second */ 171SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 172 0, "RNG polling interval (secs)"); 173static int safe_rngbufsize = 16; /* 64 bytes each poll */ 174SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 175 0, "RNG polling buffer size (32-bit words)"); 176static int safe_rngmaxalarm = 8; /* max alarms before reset */ 177SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 178 0, "RNG max alarms before reset"); 179#endif /* SAFE_NO_RNG */ 180 181static int 182safe_probe(device_t dev) 183{ 184 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 185 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 186 return (BUS_PROBE_DEFAULT); 187 return (ENXIO); 188} 189 190static const char* 191safe_partname(struct safe_softc *sc) 192{ 193 /* XXX sprintf numbers when not decoded */ 194 switch (pci_get_vendor(sc->sc_dev)) { 195 case PCI_VENDOR_SAFENET: 196 switch (pci_get_device(sc->sc_dev)) { 197 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 198 } 199 return "SafeNet unknown-part"; 200 } 201 return "Unknown-vendor unknown-part"; 202} 203 204#ifndef SAFE_NO_RNG 205static void 206default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 207{ 208 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); 209} 210#endif /* SAFE_NO_RNG */ 211 212static int 213safe_attach(device_t dev) 214{ 215 struct safe_softc *sc = device_get_softc(dev); 216 u_int32_t raddr; 217 u_int32_t cmd, i, devinfo; 218 int rid; 219 220 bzero(sc, sizeof (*sc)); 221 sc->sc_dev = dev; 222 223 /* XXX handle power management */ 224 225 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 226 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 227 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 228 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 229 230 if (!(cmd & PCIM_CMD_MEMEN)) { 231 device_printf(dev, "failed to enable memory mapping\n"); 232 goto bad; 233 } 234 235 if (!(cmd & PCIM_CMD_BUSMASTEREN)) { 236 device_printf(dev, "failed to enable bus mastering\n"); 237 goto bad; 238 } 239 240 /* 241 * Setup memory-mapping of PCI registers. 242 */ 243 rid = BS_BAR; 244 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 245 RF_ACTIVE); 246 if (sc->sc_sr == NULL) { 247 device_printf(dev, "cannot map register space\n"); 248 goto bad; 249 } 250 sc->sc_st = rman_get_bustag(sc->sc_sr); 251 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 252 253 /* 254 * Arrange interrupt line. 255 */ 256 rid = 0; 257 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 258 RF_SHAREABLE|RF_ACTIVE); 259 if (sc->sc_irq == NULL) { 260 device_printf(dev, "could not map interrupt\n"); 261 goto bad1; 262 } 263 /* 264 * NB: Network code assumes we are blocked with splimp() 265 * so make sure the IRQ is mapped appropriately. 266 */ 267 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 268 safe_intr, sc, &sc->sc_ih)) { 269 device_printf(dev, "could not establish interrupt\n"); 270 goto bad2; 271 } 272 273 sc->sc_cid = crypto_get_driverid(0); 274 if (sc->sc_cid < 0) { 275 device_printf(dev, "could not get crypto driver id\n"); 276 goto bad3; 277 } 278 279 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 280 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 281 282 /* 283 * Setup DMA descriptor area. 284 */ 285 if (bus_dma_tag_create(NULL, /* parent */ 286 1, /* alignment */ 287 SAFE_DMA_BOUNDARY, /* boundary */ 288 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 289 BUS_SPACE_MAXADDR, /* highaddr */ 290 NULL, NULL, /* filter, filterarg */ 291 SAFE_MAX_DMA, /* maxsize */ 292 SAFE_MAX_PART, /* nsegments */ 293 SAFE_MAX_SSIZE, /* maxsegsize */ 294 BUS_DMA_ALLOCNOW, /* flags */ 295 NULL, NULL, /* locking */ 296 &sc->sc_srcdmat)) { 297 device_printf(dev, "cannot allocate DMA tag\n"); 298 goto bad4; 299 } 300 if (bus_dma_tag_create(NULL, /* parent */ 301 sizeof(u_int32_t), /* alignment */ 302 SAFE_MAX_DSIZE, /* boundary */ 303 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 304 BUS_SPACE_MAXADDR, /* highaddr */ 305 NULL, NULL, /* filter, filterarg */ 306 SAFE_MAX_DMA, /* maxsize */ 307 SAFE_MAX_PART, /* nsegments */ 308 SAFE_MAX_DSIZE, /* maxsegsize */ 309 BUS_DMA_ALLOCNOW, /* flags */ 310 NULL, NULL, /* locking */ 311 &sc->sc_dstdmat)) { 312 device_printf(dev, "cannot allocate DMA tag\n"); 313 goto bad4; 314 } 315 316 /* 317 * Allocate packet engine descriptors. 318 */ 319 if (safe_dma_malloc(sc, 320 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 321 &sc->sc_ringalloc, 0)) { 322 device_printf(dev, "cannot allocate PE descriptor ring\n"); 323 bus_dma_tag_destroy(sc->sc_srcdmat); 324 goto bad4; 325 } 326 /* 327 * Hookup the static portion of all our data structures. 328 */ 329 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 330 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 331 sc->sc_front = sc->sc_ring; 332 sc->sc_back = sc->sc_ring; 333 raddr = sc->sc_ringalloc.dma_paddr; 334 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 335 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 336 struct safe_ringentry *re = &sc->sc_ring[i]; 337 338 re->re_desc.d_sa = raddr + 339 offsetof(struct safe_ringentry, re_sa); 340 re->re_sa.sa_staterec = raddr + 341 offsetof(struct safe_ringentry, re_sastate); 342 343 raddr += sizeof (struct safe_ringentry); 344 } 345 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 346 "packet engine ring", MTX_DEF); 347 348 /* 349 * Allocate scatter and gather particle descriptors. 350 */ 351 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 352 &sc->sc_spalloc, 0)) { 353 device_printf(dev, "cannot allocate source particle " 354 "descriptor ring\n"); 355 mtx_destroy(&sc->sc_ringmtx); 356 safe_dma_free(sc, &sc->sc_ringalloc); 357 bus_dma_tag_destroy(sc->sc_srcdmat); 358 goto bad4; 359 } 360 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 361 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 362 sc->sc_spfree = sc->sc_spring; 363 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 364 365 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 366 &sc->sc_dpalloc, 0)) { 367 device_printf(dev, "cannot allocate destination particle " 368 "descriptor ring\n"); 369 mtx_destroy(&sc->sc_ringmtx); 370 safe_dma_free(sc, &sc->sc_spalloc); 371 safe_dma_free(sc, &sc->sc_ringalloc); 372 bus_dma_tag_destroy(sc->sc_dstdmat); 373 goto bad4; 374 } 375 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 376 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 377 sc->sc_dpfree = sc->sc_dpring; 378 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 379 380 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 381 382 devinfo = READ_REG(sc, SAFE_DEVINFO); 383 if (devinfo & SAFE_DEVINFO_RNG) { 384 sc->sc_flags |= SAFE_FLAGS_RNG; 385 printf(" rng"); 386 } 387 if (devinfo & SAFE_DEVINFO_PKEY) { 388#if 0 389 printf(" key"); 390 sc->sc_flags |= SAFE_FLAGS_KEY; 391 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 392 safe_kprocess, sc); 393 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 394 safe_kprocess, sc); 395#endif 396 } 397 if (devinfo & SAFE_DEVINFO_DES) { 398 printf(" des/3des"); 399 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 400 safe_newsession, safe_freesession, safe_process, sc); 401 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 402 safe_newsession, safe_freesession, safe_process, sc); 403 } 404 if (devinfo & SAFE_DEVINFO_AES) { 405 printf(" aes"); 406 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 407 safe_newsession, safe_freesession, safe_process, sc); 408 } 409 if (devinfo & SAFE_DEVINFO_MD5) { 410 printf(" md5"); 411 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 412 safe_newsession, safe_freesession, safe_process, sc); 413 } 414 if (devinfo & SAFE_DEVINFO_SHA1) { 415 printf(" sha1"); 416 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 417 safe_newsession, safe_freesession, safe_process, sc); 418 } 419 printf(" null"); 420 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0, 421 safe_newsession, safe_freesession, safe_process, sc); 422 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0, 423 safe_newsession, safe_freesession, safe_process, sc); 424 /* XXX other supported algorithms */ 425 printf("\n"); 426 427 safe_reset_board(sc); /* reset h/w */ 428 safe_init_pciregs(dev); /* init pci settings */ 429 safe_init_board(sc); /* init h/w */ 430 431#ifndef SAFE_NO_RNG 432 if (sc->sc_flags & SAFE_FLAGS_RNG) { 433#ifdef SAFE_RNDTEST 434 sc->sc_rndtest = rndtest_attach(dev); 435 if (sc->sc_rndtest) 436 sc->sc_harvest = rndtest_harvest; 437 else 438 sc->sc_harvest = default_harvest; 439#else 440 sc->sc_harvest = default_harvest; 441#endif 442 safe_rng_init(sc); 443 444 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); 445 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 446 } 447#endif /* SAFE_NO_RNG */ 448#ifdef SAFE_DEBUG 449 safec = sc; /* for use by hw.safe.dump */ 450#endif 451 return (0); 452bad4: 453 crypto_unregister_all(sc->sc_cid); 454bad3: 455 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 456bad2: 457 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 458bad1: 459 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 460bad: 461 return (ENXIO); 462} 463 464/* 465 * Detach a device that successfully probed. 466 */ 467static int 468safe_detach(device_t dev) 469{ 470 struct safe_softc *sc = device_get_softc(dev); 471 472 /* XXX wait/abort active ops */ 473 474 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 475 476 callout_stop(&sc->sc_rngto); 477 478 crypto_unregister_all(sc->sc_cid); 479 480#ifdef SAFE_RNDTEST 481 if (sc->sc_rndtest) 482 rndtest_detach(sc->sc_rndtest); 483#endif 484 485 safe_cleanchip(sc); 486 safe_dma_free(sc, &sc->sc_dpalloc); 487 safe_dma_free(sc, &sc->sc_spalloc); 488 mtx_destroy(&sc->sc_ringmtx); 489 safe_dma_free(sc, &sc->sc_ringalloc); 490 491 bus_generic_detach(dev); 492 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 493 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 494 495 bus_dma_tag_destroy(sc->sc_srcdmat); 496 bus_dma_tag_destroy(sc->sc_dstdmat); 497 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 498 499 return (0); 500} 501 502/* 503 * Stop all chip i/o so that the kernel's probe routines don't 504 * get confused by errant DMAs when rebooting. 505 */ 506static void 507safe_shutdown(device_t dev) 508{ 509#ifdef notyet 510 safe_stop(device_get_softc(dev)); 511#endif 512} 513 514/* 515 * Device suspend routine. 516 */ 517static int 518safe_suspend(device_t dev) 519{ 520 struct safe_softc *sc = device_get_softc(dev); 521 522#ifdef notyet 523 /* XXX stop the device and save PCI settings */ 524#endif 525 sc->sc_suspended = 1; 526 527 return (0); 528} 529 530static int 531safe_resume(device_t dev) 532{ 533 struct safe_softc *sc = device_get_softc(dev); 534 535#ifdef notyet 536 /* XXX retore PCI settings and start the device */ 537#endif 538 sc->sc_suspended = 0; 539 return (0); 540} 541 542/* 543 * SafeXcel Interrupt routine 544 */ 545static void 546safe_intr(void *arg) 547{ 548 struct safe_softc *sc = arg; 549 volatile u_int32_t stat; 550 551 stat = READ_REG(sc, SAFE_HM_STAT); 552 if (stat == 0) /* shared irq, not for us */ 553 return; 554 555 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 556 557 if ((stat & SAFE_INT_PE_DDONE)) { 558 /* 559 * Descriptor(s) done; scan the ring and 560 * process completed operations. 561 */ 562 mtx_lock(&sc->sc_ringmtx); 563 while (sc->sc_back != sc->sc_front) { 564 struct safe_ringentry *re = sc->sc_back; 565#ifdef SAFE_DEBUG 566 if (safe_debug) { 567 safe_dump_ringstate(sc, __func__); 568 safe_dump_request(sc, __func__, re); 569 } 570#endif 571 /* 572 * safe_process marks ring entries that were allocated 573 * but not used with a csr of zero. This insures the 574 * ring front pointer never needs to be set backwards 575 * in the event that an entry is allocated but not used 576 * because of a setup error. 577 */ 578 if (re->re_desc.d_csr != 0) { 579 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 580 break; 581 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 582 break; 583 sc->sc_nqchip--; 584 safe_callback(sc, re); 585 } 586 if (++(sc->sc_back) == sc->sc_ringtop) 587 sc->sc_back = sc->sc_ring; 588 } 589 mtx_unlock(&sc->sc_ringmtx); 590 } 591 592 /* 593 * Check to see if we got any DMA Error 594 */ 595 if (stat & SAFE_INT_PE_ERROR) { 596 DPRINTF(("dmaerr dmastat %08x\n", 597 READ_REG(sc, SAFE_PE_DMASTAT))); 598 safestats.st_dmaerr++; 599 safe_totalreset(sc); 600#if 0 601 safe_feed(sc); 602#endif 603 } 604 605 if (sc->sc_needwakeup) { /* XXX check high watermark */ 606 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 607 DPRINTF(("%s: wakeup crypto %x\n", __func__, 608 sc->sc_needwakeup)); 609 sc->sc_needwakeup &= ~wakeup; 610 crypto_unblock(sc->sc_cid, wakeup); 611 } 612} 613 614/* 615 * safe_feed() - post a request to chip 616 */ 617static void 618safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 619{ 620 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 621 if (re->re_dst_map != NULL) 622 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 623 BUS_DMASYNC_PREREAD); 624 /* XXX have no smaller granularity */ 625 safe_dma_sync(&sc->sc_ringalloc, 626 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 627 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 628 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 629 630#ifdef SAFE_DEBUG 631 if (safe_debug) { 632 safe_dump_ringstate(sc, __func__); 633 safe_dump_request(sc, __func__, re); 634 } 635#endif 636 sc->sc_nqchip++; 637 if (sc->sc_nqchip > safestats.st_maxqchip) 638 safestats.st_maxqchip = sc->sc_nqchip; 639 /* poke h/w to check descriptor ring, any value can be written */ 640 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 641} 642 643/* 644 * Allocate a new 'session' and return an encoded session id. 'sidp' 645 * contains our registration id, and should contain an encoded session 646 * id on successful allocation. 647 */ 648static int 649safe_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 650{ 651#define N(a) (sizeof(a) / sizeof (a[0])) 652 struct cryptoini *c, *encini = NULL, *macini = NULL; 653 struct safe_softc *sc = arg; 654 struct safe_session *ses = NULL; 655 MD5_CTX md5ctx; 656 SHA1_CTX sha1ctx; 657 int i, sesn; 658 659 if (sidp == NULL || cri == NULL || sc == NULL) 660 return (EINVAL); 661 662 for (c = cri; c != NULL; c = c->cri_next) { 663 if (c->cri_alg == CRYPTO_MD5_HMAC || 664 c->cri_alg == CRYPTO_SHA1_HMAC || 665 c->cri_alg == CRYPTO_NULL_HMAC) { 666 if (macini) 667 return (EINVAL); 668 macini = c; 669 } else if (c->cri_alg == CRYPTO_DES_CBC || 670 c->cri_alg == CRYPTO_3DES_CBC || 671 c->cri_alg == CRYPTO_AES_CBC || 672 c->cri_alg == CRYPTO_NULL_CBC) { 673 if (encini) 674 return (EINVAL); 675 encini = c; 676 } else 677 return (EINVAL); 678 } 679 if (encini == NULL && macini == NULL) 680 return (EINVAL); 681 if (encini) { /* validate key length */ 682 switch (encini->cri_alg) { 683 case CRYPTO_DES_CBC: 684 if (encini->cri_klen != 64) 685 return (EINVAL); 686 break; 687 case CRYPTO_3DES_CBC: 688 if (encini->cri_klen != 192) 689 return (EINVAL); 690 break; 691 case CRYPTO_AES_CBC: 692 if (encini->cri_klen != 128 && 693 encini->cri_klen != 192 && 694 encini->cri_klen != 256) 695 return (EINVAL); 696 break; 697 } 698 } 699 700 if (sc->sc_sessions == NULL) { 701 ses = sc->sc_sessions = (struct safe_session *)malloc( 702 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); 703 if (ses == NULL) 704 return (ENOMEM); 705 sesn = 0; 706 sc->sc_nsessions = 1; 707 } else { 708 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 709 if (sc->sc_sessions[sesn].ses_used == 0) { 710 ses = &sc->sc_sessions[sesn]; 711 break; 712 } 713 } 714 715 if (ses == NULL) { 716 sesn = sc->sc_nsessions; 717 ses = (struct safe_session *)malloc((sesn + 1) * 718 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT); 719 if (ses == NULL) 720 return (ENOMEM); 721 bcopy(sc->sc_sessions, ses, sesn * 722 sizeof(struct safe_session)); 723 bzero(sc->sc_sessions, sesn * 724 sizeof(struct safe_session)); 725 free(sc->sc_sessions, M_DEVBUF); 726 sc->sc_sessions = ses; 727 ses = &sc->sc_sessions[sesn]; 728 sc->sc_nsessions++; 729 } 730 } 731 732 bzero(ses, sizeof(struct safe_session)); 733 ses->ses_used = 1; 734 735 if (encini) { 736 /* get an IV */ 737 /* XXX may read fewer than requested */ 738 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 739 740 ses->ses_klen = encini->cri_klen; 741 bcopy(encini->cri_key, ses->ses_key, ses->ses_klen / 8); 742 743 /* PE is little-endian, insure proper byte order */ 744 for (i = 0; i < N(ses->ses_key); i++) 745 ses->ses_key[i] = htole32(ses->ses_key[i]); 746 } 747 748 if (macini) { 749 ses->ses_mlen = macini->cri_mlen; 750 if (ses->ses_mlen == 0) { 751 if (macini->cri_alg == CRYPTO_MD5_HMAC) 752 ses->ses_mlen = MD5_DIGEST_LENGTH; 753 else 754 ses->ses_mlen = SHA1_RESULTLEN; 755 } 756 757 for (i = 0; i < macini->cri_klen / 8; i++) 758 macini->cri_key[i] ^= HMAC_IPAD_VAL; 759 760 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 761 MD5Init(&md5ctx); 762 MD5Update(&md5ctx, macini->cri_key, 763 macini->cri_klen / 8); 764 MD5Update(&md5ctx, hmac_ipad_buffer, 765 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 766 bcopy(md5ctx.state, ses->ses_hminner, 767 sizeof(md5ctx.state)); 768 } else { 769 SHA1Init(&sha1ctx); 770 SHA1Update(&sha1ctx, macini->cri_key, 771 macini->cri_klen / 8); 772 SHA1Update(&sha1ctx, hmac_ipad_buffer, 773 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 774 bcopy(sha1ctx.h.b32, ses->ses_hminner, 775 sizeof(sha1ctx.h.b32)); 776 } 777 778 for (i = 0; i < macini->cri_klen / 8; i++) 779 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 780 781 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 782 MD5Init(&md5ctx); 783 MD5Update(&md5ctx, macini->cri_key, 784 macini->cri_klen / 8); 785 MD5Update(&md5ctx, hmac_opad_buffer, 786 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 787 bcopy(md5ctx.state, ses->ses_hmouter, 788 sizeof(md5ctx.state)); 789 } else { 790 SHA1Init(&sha1ctx); 791 SHA1Update(&sha1ctx, macini->cri_key, 792 macini->cri_klen / 8); 793 SHA1Update(&sha1ctx, hmac_opad_buffer, 794 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 795 bcopy(sha1ctx.h.b32, ses->ses_hmouter, 796 sizeof(sha1ctx.h.b32)); 797 } 798 799 for (i = 0; i < macini->cri_klen / 8; i++) 800 macini->cri_key[i] ^= HMAC_OPAD_VAL; 801 802 /* PE is little-endian, insure proper byte order */ 803 for (i = 0; i < N(ses->ses_hminner); i++) { 804 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 805 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 806 } 807 } 808 809 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn); 810 return (0); 811#undef N 812} 813 814/* 815 * Deallocate a session. 816 */ 817static int 818safe_freesession(void *arg, u_int64_t tid) 819{ 820 struct safe_softc *sc = arg; 821 int session, ret; 822 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 823 824 if (sc == NULL) 825 return (EINVAL); 826 827 session = SAFE_SESSION(sid); 828 if (session < sc->sc_nsessions) { 829 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 830 ret = 0; 831 } else 832 ret = EINVAL; 833 return (ret); 834} 835 836static void 837safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 838{ 839 struct safe_operand *op = arg; 840 841 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__, 842 (u_int) mapsize, nsegs, error)); 843 if (error != 0) 844 return; 845 op->mapsize = mapsize; 846 op->nsegs = nsegs; 847 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 848} 849 850static int 851safe_process(void *arg, struct cryptop *crp, int hint) 852{ 853 int err = 0, i, nicealign, uniform; 854 struct safe_softc *sc = arg; 855 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 856 int bypass, oplen, ivsize; 857 caddr_t iv; 858 int16_t coffset; 859 struct safe_session *ses; 860 struct safe_ringentry *re; 861 struct safe_sarec *sa; 862 struct safe_pdesc *pd; 863 u_int32_t cmd0, cmd1, staterec; 864 865 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 866 safestats.st_invalid++; 867 return (EINVAL); 868 } 869 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 870 safestats.st_badsession++; 871 return (EINVAL); 872 } 873 874 mtx_lock(&sc->sc_ringmtx); 875 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 876 safestats.st_ringfull++; 877 sc->sc_needwakeup |= CRYPTO_SYMQ; 878 mtx_unlock(&sc->sc_ringmtx); 879 return (ERESTART); 880 } 881 re = sc->sc_front; 882 883 staterec = re->re_sa.sa_staterec; /* save */ 884 /* NB: zero everything but the PE descriptor */ 885 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 886 re->re_sa.sa_staterec = staterec; /* restore */ 887 888 re->re_crp = crp; 889 re->re_sesn = SAFE_SESSION(crp->crp_sid); 890 891 if (crp->crp_flags & CRYPTO_F_IMBUF) { 892 re->re_src_m = (struct mbuf *)crp->crp_buf; 893 re->re_dst_m = (struct mbuf *)crp->crp_buf; 894 } else if (crp->crp_flags & CRYPTO_F_IOV) { 895 re->re_src_io = (struct uio *)crp->crp_buf; 896 re->re_dst_io = (struct uio *)crp->crp_buf; 897 } else { 898 safestats.st_badflags++; 899 err = EINVAL; 900 goto errout; /* XXX we don't handle contiguous blocks! */ 901 } 902 903 sa = &re->re_sa; 904 ses = &sc->sc_sessions[re->re_sesn]; 905 906 crd1 = crp->crp_desc; 907 if (crd1 == NULL) { 908 safestats.st_nodesc++; 909 err = EINVAL; 910 goto errout; 911 } 912 crd2 = crd1->crd_next; 913 914 if ((crd1->crd_flags & CRD_F_KEY_EXPLICIT) || 915 (crd2 != NULL && (crd2->crd_flags & CRD_F_KEY_EXPLICIT))) { 916 safestats.st_badflags++; 917 err = EINVAL; 918 goto errout; 919 } 920 921 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 922 cmd1 = 0; 923 if (crd2 == NULL) { 924 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 925 crd1->crd_alg == CRYPTO_SHA1_HMAC || 926 crd1->crd_alg == CRYPTO_NULL_HMAC) { 927 maccrd = crd1; 928 enccrd = NULL; 929 cmd0 |= SAFE_SA_CMD0_OP_HASH; 930 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 931 crd1->crd_alg == CRYPTO_3DES_CBC || 932 crd1->crd_alg == CRYPTO_AES_CBC || 933 crd1->crd_alg == CRYPTO_NULL_CBC) { 934 maccrd = NULL; 935 enccrd = crd1; 936 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 937 } else { 938 safestats.st_badalg++; 939 err = EINVAL; 940 goto errout; 941 } 942 } else { 943 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 944 crd1->crd_alg == CRYPTO_SHA1_HMAC || 945 crd1->crd_alg == CRYPTO_NULL_HMAC) && 946 (crd2->crd_alg == CRYPTO_DES_CBC || 947 crd2->crd_alg == CRYPTO_3DES_CBC || 948 crd2->crd_alg == CRYPTO_AES_CBC || 949 crd2->crd_alg == CRYPTO_NULL_CBC) && 950 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 951 maccrd = crd1; 952 enccrd = crd2; 953 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 954 crd1->crd_alg == CRYPTO_3DES_CBC || 955 crd1->crd_alg == CRYPTO_AES_CBC || 956 crd1->crd_alg == CRYPTO_NULL_CBC) && 957 (crd2->crd_alg == CRYPTO_MD5_HMAC || 958 crd2->crd_alg == CRYPTO_SHA1_HMAC || 959 crd2->crd_alg == CRYPTO_NULL_HMAC) && 960 (crd1->crd_flags & CRD_F_ENCRYPT)) { 961 enccrd = crd1; 962 maccrd = crd2; 963 } else { 964 safestats.st_badalg++; 965 err = EINVAL; 966 goto errout; 967 } 968 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 969 } 970 971 if (enccrd) { 972 if (enccrd->crd_alg == CRYPTO_DES_CBC) { 973 cmd0 |= SAFE_SA_CMD0_DES; 974 cmd1 |= SAFE_SA_CMD1_CBC; 975 ivsize = 2*sizeof(u_int32_t); 976 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) { 977 cmd0 |= SAFE_SA_CMD0_3DES; 978 cmd1 |= SAFE_SA_CMD1_CBC; 979 ivsize = 2*sizeof(u_int32_t); 980 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) { 981 cmd0 |= SAFE_SA_CMD0_AES; 982 cmd1 |= SAFE_SA_CMD1_CBC; 983 if (ses->ses_klen == 128) 984 cmd1 |= SAFE_SA_CMD1_AES128; 985 else if (ses->ses_klen == 192) 986 cmd1 |= SAFE_SA_CMD1_AES192; 987 else 988 cmd1 |= SAFE_SA_CMD1_AES256; 989 ivsize = 4*sizeof(u_int32_t); 990 } else { 991 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL; 992 ivsize = 0; 993 } 994 995 /* 996 * Setup encrypt/decrypt state. When using basic ops 997 * we can't use an inline IV because hash/crypt offset 998 * must be from the end of the IV to the start of the 999 * crypt data and this leaves out the preceding header 1000 * from the hash calculation. Instead we place the IV 1001 * in the state record and set the hash/crypt offset to 1002 * copy both the header+IV. 1003 */ 1004 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1005 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 1006 1007 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1008 iv = enccrd->crd_iv; 1009 else 1010 iv = (caddr_t) ses->ses_iv; 1011 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1012 if (crp->crp_flags & CRYPTO_F_IMBUF) 1013 m_copyback(re->re_src_m, 1014 enccrd->crd_inject, ivsize, iv); 1015 else if (crp->crp_flags & CRYPTO_F_IOV) 1016 cuio_copyback(re->re_src_io, 1017 enccrd->crd_inject, ivsize, iv); 1018 } 1019 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize); 1020 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV; 1021 re->re_flags |= SAFE_QFLAGS_COPYOUTIV; 1022 } else { 1023 cmd0 |= SAFE_SA_CMD0_INBOUND; 1024 1025 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1026 bcopy(enccrd->crd_iv, 1027 re->re_sastate.sa_saved_iv, ivsize); 1028 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1029 m_copydata(re->re_src_m, enccrd->crd_inject, 1030 ivsize, 1031 (caddr_t)re->re_sastate.sa_saved_iv); 1032 else if (crp->crp_flags & CRYPTO_F_IOV) 1033 cuio_copydata(re->re_src_io, enccrd->crd_inject, 1034 ivsize, 1035 (caddr_t)re->re_sastate.sa_saved_iv); 1036 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 1037 } 1038 /* 1039 * For basic encryption use the zero pad algorithm. 1040 * This pads results to an 8-byte boundary and 1041 * suppresses padding verification for inbound (i.e. 1042 * decrypt) operations. 1043 * 1044 * NB: Not sure if the 8-byte pad boundary is a problem. 1045 */ 1046 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 1047 1048 /* XXX assert key bufs have the same size */ 1049 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 1050 } 1051 1052 if (maccrd) { 1053 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) { 1054 cmd0 |= SAFE_SA_CMD0_MD5; 1055 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 1056 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) { 1057 cmd0 |= SAFE_SA_CMD0_SHA1; 1058 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 1059 } else { 1060 cmd0 |= SAFE_SA_CMD0_HASH_NULL; 1061 } 1062 /* 1063 * Digest data is loaded from the SA and the hash 1064 * result is saved to the state block where we 1065 * retrieve it for return to the caller. 1066 */ 1067 /* XXX assert digest bufs have the same size */ 1068 bcopy(ses->ses_hminner, sa->sa_indigest, 1069 sizeof(sa->sa_indigest)); 1070 bcopy(ses->ses_hmouter, sa->sa_outdigest, 1071 sizeof(sa->sa_outdigest)); 1072 1073 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 1074 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 1075 } 1076 1077 if (enccrd && maccrd) { 1078 /* 1079 * The offset from hash data to the start of 1080 * crypt data is the difference in the skips. 1081 */ 1082 bypass = maccrd->crd_skip; 1083 coffset = enccrd->crd_skip - maccrd->crd_skip; 1084 if (coffset < 0) { 1085 DPRINTF(("%s: hash does not precede crypt; " 1086 "mac skip %u enc skip %u\n", 1087 __func__, maccrd->crd_skip, enccrd->crd_skip)); 1088 safestats.st_skipmismatch++; 1089 err = EINVAL; 1090 goto errout; 1091 } 1092 oplen = enccrd->crd_skip + enccrd->crd_len; 1093 if (maccrd->crd_skip + maccrd->crd_len != oplen) { 1094 DPRINTF(("%s: hash amount %u != crypt amount %u\n", 1095 __func__, maccrd->crd_skip + maccrd->crd_len, 1096 oplen)); 1097 safestats.st_lenmismatch++; 1098 err = EINVAL; 1099 goto errout; 1100 } 1101#ifdef SAFE_DEBUG 1102 if (safe_debug) { 1103 printf("mac: skip %d, len %d, inject %d\n", 1104 maccrd->crd_skip, maccrd->crd_len, 1105 maccrd->crd_inject); 1106 printf("enc: skip %d, len %d, inject %d\n", 1107 enccrd->crd_skip, enccrd->crd_len, 1108 enccrd->crd_inject); 1109 printf("bypass %d coffset %d oplen %d\n", 1110 bypass, coffset, oplen); 1111 } 1112#endif 1113 if (coffset & 3) { /* offset must be 32-bit aligned */ 1114 DPRINTF(("%s: coffset %u misaligned\n", 1115 __func__, coffset)); 1116 safestats.st_coffmisaligned++; 1117 err = EINVAL; 1118 goto errout; 1119 } 1120 coffset >>= 2; 1121 if (coffset > 255) { /* offset must be <256 dwords */ 1122 DPRINTF(("%s: coffset %u too big\n", 1123 __func__, coffset)); 1124 safestats.st_cofftoobig++; 1125 err = EINVAL; 1126 goto errout; 1127 } 1128 /* 1129 * Tell the hardware to copy the header to the output. 1130 * The header is defined as the data from the end of 1131 * the bypass to the start of data to be encrypted. 1132 * Typically this is the inline IV. Note that you need 1133 * to do this even if src+dst are the same; it appears 1134 * that w/o this bit the crypted data is written 1135 * immediately after the bypass data. 1136 */ 1137 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 1138 /* 1139 * Disable IP header mutable bit handling. This is 1140 * needed to get correct HMAC calculations. 1141 */ 1142 cmd1 |= SAFE_SA_CMD1_MUTABLE; 1143 } else { 1144 if (enccrd) { 1145 bypass = enccrd->crd_skip; 1146 oplen = bypass + enccrd->crd_len; 1147 } else { 1148 bypass = maccrd->crd_skip; 1149 oplen = bypass + maccrd->crd_len; 1150 } 1151 coffset = 0; 1152 } 1153 /* XXX verify multiple of 4 when using s/g */ 1154 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 1155 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 1156 safestats.st_bypasstoobig++; 1157 err = EINVAL; 1158 goto errout; 1159 } 1160 1161 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 1162 safestats.st_nomap++; 1163 err = ENOMEM; 1164 goto errout; 1165 } 1166 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1167 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map, 1168 re->re_src_m, safe_op_cb, 1169 &re->re_src, BUS_DMA_NOWAIT) != 0) { 1170 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1171 re->re_src_map = NULL; 1172 safestats.st_noload++; 1173 err = ENOMEM; 1174 goto errout; 1175 } 1176 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1177 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map, 1178 re->re_src_io, safe_op_cb, 1179 &re->re_src, BUS_DMA_NOWAIT) != 0) { 1180 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1181 re->re_src_map = NULL; 1182 safestats.st_noload++; 1183 err = ENOMEM; 1184 goto errout; 1185 } 1186 } 1187 nicealign = safe_dmamap_aligned(&re->re_src); 1188 uniform = safe_dmamap_uniform(&re->re_src); 1189 1190 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 1191 nicealign, uniform, re->re_src.nsegs)); 1192 if (re->re_src.nsegs > 1) { 1193 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 1194 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 1195 for (i = 0; i < re->re_src_nsegs; i++) { 1196 /* NB: no need to check if there's space */ 1197 pd = sc->sc_spfree; 1198 if (++(sc->sc_spfree) == sc->sc_springtop) 1199 sc->sc_spfree = sc->sc_spring; 1200 1201 KASSERT((pd->pd_flags&3) == 0 || 1202 (pd->pd_flags&3) == SAFE_PD_DONE, 1203 ("bogus source particle descriptor; flags %x", 1204 pd->pd_flags)); 1205 pd->pd_addr = re->re_src_segs[i].ds_addr; 1206 pd->pd_size = re->re_src_segs[i].ds_len; 1207 pd->pd_flags = SAFE_PD_READY; 1208 } 1209 cmd0 |= SAFE_SA_CMD0_IGATHER; 1210 } else { 1211 /* 1212 * No need for gather, reference the operand directly. 1213 */ 1214 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1215 } 1216 1217 if (enccrd == NULL && maccrd != NULL) { 1218 /* 1219 * Hash op; no destination needed. 1220 */ 1221 } else { 1222 if (crp->crp_flags & CRYPTO_F_IOV) { 1223 if (!nicealign) { 1224 safestats.st_iovmisaligned++; 1225 err = EINVAL; 1226 goto errout; 1227 } 1228 if (uniform != 1) { 1229 /* 1230 * Source is not suitable for direct use as 1231 * the destination. Create a new scatter/gather 1232 * list based on the destination requirements 1233 * and check if that's ok. 1234 */ 1235 if (bus_dmamap_create(sc->sc_dstdmat, 1236 BUS_DMA_NOWAIT, &re->re_dst_map)) { 1237 safestats.st_nomap++; 1238 err = ENOMEM; 1239 goto errout; 1240 } 1241 if (bus_dmamap_load_uio(sc->sc_dstdmat, 1242 re->re_dst_map, re->re_dst_io, 1243 safe_op_cb, &re->re_dst, 1244 BUS_DMA_NOWAIT) != 0) { 1245 bus_dmamap_destroy(sc->sc_dstdmat, 1246 re->re_dst_map); 1247 re->re_dst_map = NULL; 1248 safestats.st_noload++; 1249 err = ENOMEM; 1250 goto errout; 1251 } 1252 uniform = safe_dmamap_uniform(&re->re_dst); 1253 if (!uniform) { 1254 /* 1255 * There's no way to handle the DMA 1256 * requirements with this uio. We 1257 * could create a separate DMA area for 1258 * the result and then copy it back, 1259 * but for now we just bail and return 1260 * an error. Note that uio requests 1261 * > SAFE_MAX_DSIZE are handled because 1262 * the DMA map and segment list for the 1263 * destination wil result in a 1264 * destination particle list that does 1265 * the necessary scatter DMA. 1266 */ 1267 safestats.st_iovnotuniform++; 1268 err = EINVAL; 1269 goto errout; 1270 } 1271 } else 1272 re->re_dst = re->re_src; 1273 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1274 if (nicealign && uniform == 1) { 1275 /* 1276 * Source layout is suitable for direct 1277 * sharing of the DMA map and segment list. 1278 */ 1279 re->re_dst = re->re_src; 1280 } else if (nicealign && uniform == 2) { 1281 /* 1282 * The source is properly aligned but requires a 1283 * different particle list to handle DMA of the 1284 * result. Create a new map and do the load to 1285 * create the segment list. The particle 1286 * descriptor setup code below will handle the 1287 * rest. 1288 */ 1289 if (bus_dmamap_create(sc->sc_dstdmat, 1290 BUS_DMA_NOWAIT, &re->re_dst_map)) { 1291 safestats.st_nomap++; 1292 err = ENOMEM; 1293 goto errout; 1294 } 1295 if (bus_dmamap_load_mbuf(sc->sc_dstdmat, 1296 re->re_dst_map, re->re_dst_m, 1297 safe_op_cb, &re->re_dst, 1298 BUS_DMA_NOWAIT) != 0) { 1299 bus_dmamap_destroy(sc->sc_dstdmat, 1300 re->re_dst_map); 1301 re->re_dst_map = NULL; 1302 safestats.st_noload++; 1303 err = ENOMEM; 1304 goto errout; 1305 } 1306 } else { /* !(aligned and/or uniform) */ 1307 int totlen, len; 1308 struct mbuf *m, *top, **mp; 1309 1310 /* 1311 * DMA constraints require that we allocate a 1312 * new mbuf chain for the destination. We 1313 * allocate an entire new set of mbufs of 1314 * optimal/required size and then tell the 1315 * hardware to copy any bits that are not 1316 * created as a byproduct of the operation. 1317 */ 1318 if (!nicealign) 1319 safestats.st_unaligned++; 1320 if (!uniform) 1321 safestats.st_notuniform++; 1322 totlen = re->re_src_mapsize; 1323 if (re->re_src_m->m_flags & M_PKTHDR) { 1324 len = MHLEN; 1325 MGETHDR(m, M_DONTWAIT, MT_DATA); 1326 if (m && !m_dup_pkthdr(m, re->re_src_m, 1327 M_DONTWAIT)) { 1328 m_free(m); 1329 m = NULL; 1330 } 1331 } else { 1332 len = MLEN; 1333 MGET(m, M_DONTWAIT, MT_DATA); 1334 } 1335 if (m == NULL) { 1336 safestats.st_nombuf++; 1337 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1338 goto errout; 1339 } 1340 if (totlen >= MINCLSIZE) { 1341 MCLGET(m, M_DONTWAIT); 1342 if ((m->m_flags & M_EXT) == 0) { 1343 m_free(m); 1344 safestats.st_nomcl++; 1345 err = sc->sc_nqchip ? 1346 ERESTART : ENOMEM; 1347 goto errout; 1348 } 1349 len = MCLBYTES; 1350 } 1351 m->m_len = len; 1352 top = NULL; 1353 mp = ⊤ 1354 1355 while (totlen > 0) { 1356 if (top) { 1357 MGET(m, M_DONTWAIT, MT_DATA); 1358 if (m == NULL) { 1359 m_freem(top); 1360 safestats.st_nombuf++; 1361 err = sc->sc_nqchip ? 1362 ERESTART : ENOMEM; 1363 goto errout; 1364 } 1365 len = MLEN; 1366 } 1367 if (top && totlen >= MINCLSIZE) { 1368 MCLGET(m, M_DONTWAIT); 1369 if ((m->m_flags & M_EXT) == 0) { 1370 *mp = m; 1371 m_freem(top); 1372 safestats.st_nomcl++; 1373 err = sc->sc_nqchip ? 1374 ERESTART : ENOMEM; 1375 goto errout; 1376 } 1377 len = MCLBYTES; 1378 } 1379 m->m_len = len = min(totlen, len); 1380 totlen -= len; 1381 *mp = m; 1382 mp = &m->m_next; 1383 } 1384 re->re_dst_m = top; 1385 if (bus_dmamap_create(sc->sc_dstdmat, 1386 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1387 safestats.st_nomap++; 1388 err = ENOMEM; 1389 goto errout; 1390 } 1391 if (bus_dmamap_load_mbuf(sc->sc_dstdmat, 1392 re->re_dst_map, re->re_dst_m, 1393 safe_op_cb, &re->re_dst, 1394 BUS_DMA_NOWAIT) != 0) { 1395 bus_dmamap_destroy(sc->sc_dstdmat, 1396 re->re_dst_map); 1397 re->re_dst_map = NULL; 1398 safestats.st_noload++; 1399 err = ENOMEM; 1400 goto errout; 1401 } 1402 if (re->re_src.mapsize > oplen) { 1403 /* 1404 * There's data following what the 1405 * hardware will copy for us. If this 1406 * isn't just the ICV (that's going to 1407 * be written on completion), copy it 1408 * to the new mbufs 1409 */ 1410 if (!(maccrd && 1411 (re->re_src.mapsize-oplen) == 12 && 1412 maccrd->crd_inject == oplen)) 1413 safe_mcopy(re->re_src_m, 1414 re->re_dst_m, 1415 oplen); 1416 else 1417 safestats.st_noicvcopy++; 1418 } 1419 } 1420 } else { 1421 safestats.st_badflags++; 1422 err = EINVAL; 1423 goto errout; 1424 } 1425 1426 if (re->re_dst.nsegs > 1) { 1427 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1428 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1429 for (i = 0; i < re->re_dst_nsegs; i++) { 1430 pd = sc->sc_dpfree; 1431 KASSERT((pd->pd_flags&3) == 0 || 1432 (pd->pd_flags&3) == SAFE_PD_DONE, 1433 ("bogus dest particle descriptor; flags %x", 1434 pd->pd_flags)); 1435 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1436 sc->sc_dpfree = sc->sc_dpring; 1437 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1438 pd->pd_flags = SAFE_PD_READY; 1439 } 1440 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1441 } else { 1442 /* 1443 * No need for scatter, reference the operand directly. 1444 */ 1445 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1446 } 1447 } 1448 1449 /* 1450 * All done with setup; fillin the SA command words 1451 * and the packet engine descriptor. The operation 1452 * is now ready for submission to the hardware. 1453 */ 1454 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1455 sa->sa_cmd1 = cmd1 1456 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1457 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1458 | SAFE_SA_CMD1_SRPCI 1459 ; 1460 /* 1461 * NB: the order of writes is important here. In case the 1462 * chip is scanning the ring because of an outstanding request 1463 * it might nab this one too. In that case we need to make 1464 * sure the setup is complete before we write the length 1465 * field of the descriptor as it signals the descriptor is 1466 * ready for processing. 1467 */ 1468 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1469 if (maccrd) 1470 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1471 re->re_desc.d_len = oplen 1472 | SAFE_PE_LEN_READY 1473 | (bypass << SAFE_PE_LEN_BYPASS_S) 1474 ; 1475 1476 safestats.st_ipackets++; 1477 safestats.st_ibytes += oplen; 1478 1479 if (++(sc->sc_front) == sc->sc_ringtop) 1480 sc->sc_front = sc->sc_ring; 1481 1482 /* XXX honor batching */ 1483 safe_feed(sc, re); 1484 mtx_unlock(&sc->sc_ringmtx); 1485 return (0); 1486 1487errout: 1488 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) 1489 m_freem(re->re_dst_m); 1490 1491 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1492 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1493 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1494 } 1495 if (re->re_src_map != NULL) { 1496 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1497 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1498 } 1499 mtx_unlock(&sc->sc_ringmtx); 1500 if (err != ERESTART) { 1501 crp->crp_etype = err; 1502 crypto_done(crp); 1503 } else { 1504 sc->sc_needwakeup |= CRYPTO_SYMQ; 1505 } 1506 return (err); 1507} 1508 1509static void 1510safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1511{ 1512 struct cryptop *crp = (struct cryptop *)re->re_crp; 1513 struct cryptodesc *crd; 1514 1515 safestats.st_opackets++; 1516 safestats.st_obytes += re->re_dst.mapsize; 1517 1518 safe_dma_sync(&sc->sc_ringalloc, 1519 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1520 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1521 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1522 re->re_desc.d_csr, 1523 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1524 safestats.st_peoperr++; 1525 crp->crp_etype = EIO; /* something more meaningful? */ 1526 } 1527 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1528 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1529 BUS_DMASYNC_POSTREAD); 1530 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1531 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1532 } 1533 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1534 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1535 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1536 1537 /* 1538 * If result was written to a differet mbuf chain, swap 1539 * it in as the return value and reclaim the original. 1540 */ 1541 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) { 1542 m_freem(re->re_src_m); 1543 crp->crp_buf = (caddr_t)re->re_dst_m; 1544 } 1545 1546 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) { 1547 /* copy out IV for future use */ 1548 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1549 int ivsize; 1550 1551 if (crd->crd_alg == CRYPTO_DES_CBC || 1552 crd->crd_alg == CRYPTO_3DES_CBC) { 1553 ivsize = 2*sizeof(u_int32_t); 1554 } else if (crd->crd_alg == CRYPTO_AES_CBC) { 1555 ivsize = 4*sizeof(u_int32_t); 1556 } else 1557 continue; 1558 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1559 m_copydata((struct mbuf *)crp->crp_buf, 1560 crd->crd_skip + crd->crd_len - ivsize, 1561 ivsize, 1562 (caddr_t) sc->sc_sessions[re->re_sesn].ses_iv); 1563 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1564 cuio_copydata((struct uio *)crp->crp_buf, 1565 crd->crd_skip + crd->crd_len - ivsize, 1566 ivsize, 1567 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv); 1568 } 1569 break; 1570 } 1571 } 1572 1573 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1574 /* copy out ICV result */ 1575 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1576 if (!(crd->crd_alg == CRYPTO_MD5_HMAC || 1577 crd->crd_alg == CRYPTO_SHA1_HMAC || 1578 crd->crd_alg == CRYPTO_NULL_HMAC)) 1579 continue; 1580 if (crd->crd_alg == CRYPTO_SHA1_HMAC) { 1581 /* 1582 * SHA-1 ICV's are byte-swapped; fix 'em up 1583 * before copy them to their destination. 1584 */ 1585 bswap32(re->re_sastate.sa_saved_indigest[0]); 1586 bswap32(re->re_sastate.sa_saved_indigest[1]); 1587 bswap32(re->re_sastate.sa_saved_indigest[2]); 1588 } 1589 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1590 m_copyback((struct mbuf *)crp->crp_buf, 1591 crd->crd_inject, 1592 sc->sc_sessions[re->re_sesn].ses_mlen, 1593 (caddr_t)re->re_sastate.sa_saved_indigest); 1594 } else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) { 1595 bcopy((caddr_t)re->re_sastate.sa_saved_indigest, 1596 crp->crp_mac, 1597 sc->sc_sessions[re->re_sesn].ses_mlen); 1598 } 1599 break; 1600 } 1601 } 1602 crypto_done(crp); 1603} 1604 1605/* 1606 * Copy all data past offset from srcm to dstm. 1607 */ 1608static void 1609safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1610{ 1611 u_int j, dlen, slen; 1612 caddr_t dptr, sptr; 1613 1614 /* 1615 * Advance src and dst to offset. 1616 */ 1617 j = offset; 1618 while (j >= 0) { 1619 if (srcm->m_len > j) 1620 break; 1621 j -= srcm->m_len; 1622 srcm = srcm->m_next; 1623 if (srcm == NULL) 1624 return; 1625 } 1626 sptr = mtod(srcm, caddr_t) + j; 1627 slen = srcm->m_len - j; 1628 1629 j = offset; 1630 while (j >= 0) { 1631 if (dstm->m_len > j) 1632 break; 1633 j -= dstm->m_len; 1634 dstm = dstm->m_next; 1635 if (dstm == NULL) 1636 return; 1637 } 1638 dptr = mtod(dstm, caddr_t) + j; 1639 dlen = dstm->m_len - j; 1640 1641 /* 1642 * Copy everything that remains. 1643 */ 1644 for (;;) { 1645 j = min(slen, dlen); 1646 bcopy(sptr, dptr, j); 1647 if (slen == j) { 1648 srcm = srcm->m_next; 1649 if (srcm == NULL) 1650 return; 1651 sptr = srcm->m_data; 1652 slen = srcm->m_len; 1653 } else 1654 sptr += j, slen -= j; 1655 if (dlen == j) { 1656 dstm = dstm->m_next; 1657 if (dstm == NULL) 1658 return; 1659 dptr = dstm->m_data; 1660 dlen = dstm->m_len; 1661 } else 1662 dptr += j, dlen -= j; 1663 } 1664} 1665 1666#ifndef SAFE_NO_RNG 1667#define SAFE_RNG_MAXWAIT 1000 1668 1669static void 1670safe_rng_init(struct safe_softc *sc) 1671{ 1672 u_int32_t w, v; 1673 int i; 1674 1675 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1676 /* use default value according to the manual */ 1677 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1678 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1679 1680 /* 1681 * There is a bug in rev 1.0 of the 1140 that when the RNG 1682 * is brought out of reset the ready status flag does not 1683 * work until the RNG has finished its internal initialization. 1684 * 1685 * So in order to determine the device is through its 1686 * initialization we must read the data register, using the 1687 * status reg in the read in case it is initialized. Then read 1688 * the data register until it changes from the first read. 1689 * Once it changes read the data register until it changes 1690 * again. At this time the RNG is considered initialized. 1691 * This could take between 750ms - 1000ms in time. 1692 */ 1693 i = 0; 1694 w = READ_REG(sc, SAFE_RNG_OUT); 1695 do { 1696 v = READ_REG(sc, SAFE_RNG_OUT); 1697 if (v != w) { 1698 w = v; 1699 break; 1700 } 1701 DELAY(10); 1702 } while (++i < SAFE_RNG_MAXWAIT); 1703 1704 /* Wait Until data changes again */ 1705 i = 0; 1706 do { 1707 v = READ_REG(sc, SAFE_RNG_OUT); 1708 if (v != w) 1709 break; 1710 DELAY(10); 1711 } while (++i < SAFE_RNG_MAXWAIT); 1712} 1713 1714static __inline void 1715safe_rng_disable_short_cycle(struct safe_softc *sc) 1716{ 1717 WRITE_REG(sc, SAFE_RNG_CTRL, 1718 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1719} 1720 1721static __inline void 1722safe_rng_enable_short_cycle(struct safe_softc *sc) 1723{ 1724 WRITE_REG(sc, SAFE_RNG_CTRL, 1725 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1726} 1727 1728static __inline u_int32_t 1729safe_rng_read(struct safe_softc *sc) 1730{ 1731 int i; 1732 1733 i = 0; 1734 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1735 ; 1736 return READ_REG(sc, SAFE_RNG_OUT); 1737} 1738 1739static void 1740safe_rng(void *arg) 1741{ 1742 struct safe_softc *sc = arg; 1743 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1744 u_int maxwords; 1745 int i; 1746 1747 safestats.st_rng++; 1748 /* 1749 * Fetch the next block of data. 1750 */ 1751 maxwords = safe_rngbufsize; 1752 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1753 maxwords = SAFE_RNG_MAXBUFSIZ; 1754retry: 1755 for (i = 0; i < maxwords; i++) 1756 buf[i] = safe_rng_read(sc); 1757 /* 1758 * Check the comparator alarm count and reset the h/w if 1759 * it exceeds our threshold. This guards against the 1760 * hardware oscillators resonating with external signals. 1761 */ 1762 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1763 u_int32_t freq_inc, w; 1764 1765 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1766 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1767 safestats.st_rngalarm++; 1768 safe_rng_enable_short_cycle(sc); 1769 freq_inc = 18; 1770 for (i = 0; i < 64; i++) { 1771 w = READ_REG(sc, SAFE_RNG_CNFG); 1772 freq_inc = ((w + freq_inc) & 0x3fL); 1773 w = ((w & ~0x3fL) | freq_inc); 1774 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1775 1776 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1777 1778 (void) safe_rng_read(sc); 1779 DELAY(25); 1780 1781 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1782 safe_rng_disable_short_cycle(sc); 1783 goto retry; 1784 } 1785 freq_inc = 1; 1786 } 1787 safe_rng_disable_short_cycle(sc); 1788 } else 1789 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1790 1791 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1792 callout_reset(&sc->sc_rngto, 1793 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1794} 1795#endif /* SAFE_NO_RNG */ 1796 1797static void 1798safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1799{ 1800 bus_addr_t *paddr = (bus_addr_t*) arg; 1801 *paddr = segs->ds_addr; 1802} 1803 1804static int 1805safe_dma_malloc( 1806 struct safe_softc *sc, 1807 bus_size_t size, 1808 struct safe_dma_alloc *dma, 1809 int mapflags 1810) 1811{ 1812 int r; 1813 1814 r = bus_dma_tag_create(NULL, /* parent */ 1815 sizeof(u_int32_t), 0, /* alignment, bounds */ 1816 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1817 BUS_SPACE_MAXADDR, /* highaddr */ 1818 NULL, NULL, /* filter, filterarg */ 1819 size, /* maxsize */ 1820 1, /* nsegments */ 1821 size, /* maxsegsize */ 1822 BUS_DMA_ALLOCNOW, /* flags */ 1823 NULL, NULL, /* locking */ 1824 &dma->dma_tag); 1825 if (r != 0) { 1826 device_printf(sc->sc_dev, "safe_dma_malloc: " 1827 "bus_dma_tag_create failed; error %u\n", r); 1828 goto fail_0; 1829 } 1830 1831 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); 1832 if (r != 0) { 1833 device_printf(sc->sc_dev, "safe_dma_malloc: " 1834 "bus_dmamap_create failed; error %u\n", r); 1835 goto fail_1; 1836 } 1837 1838 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1839 BUS_DMA_NOWAIT, &dma->dma_map); 1840 if (r != 0) { 1841 device_printf(sc->sc_dev, "safe_dma_malloc: " 1842 "bus_dmammem_alloc failed; size %zu, error %u\n", 1843 size, r); 1844 goto fail_2; 1845 } 1846 1847 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1848 size, 1849 safe_dmamap_cb, 1850 &dma->dma_paddr, 1851 mapflags | BUS_DMA_NOWAIT); 1852 if (r != 0) { 1853 device_printf(sc->sc_dev, "safe_dma_malloc: " 1854 "bus_dmamap_load failed; error %u\n", r); 1855 goto fail_3; 1856 } 1857 1858 dma->dma_size = size; 1859 return (0); 1860 1861fail_3: 1862 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1863fail_2: 1864 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1865fail_1: 1866 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1867 bus_dma_tag_destroy(dma->dma_tag); 1868fail_0: 1869 dma->dma_map = NULL; 1870 dma->dma_tag = NULL; 1871 return (r); 1872} 1873 1874static void 1875safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1876{ 1877 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1878 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1879 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1880 bus_dma_tag_destroy(dma->dma_tag); 1881} 1882 1883/* 1884 * Resets the board. Values in the regesters are left as is 1885 * from the reset (i.e. initial values are assigned elsewhere). 1886 */ 1887static void 1888safe_reset_board(struct safe_softc *sc) 1889{ 1890 u_int32_t v; 1891 /* 1892 * Reset the device. The manual says no delay 1893 * is needed between marking and clearing reset. 1894 */ 1895 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1896 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1897 SAFE_PE_DMACFG_SGRESET); 1898 WRITE_REG(sc, SAFE_PE_DMACFG, v 1899 | SAFE_PE_DMACFG_PERESET 1900 | SAFE_PE_DMACFG_PDRRESET 1901 | SAFE_PE_DMACFG_SGRESET); 1902 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1903} 1904 1905/* 1906 * Initialize registers we need to touch only once. 1907 */ 1908static void 1909safe_init_board(struct safe_softc *sc) 1910{ 1911 u_int32_t v, dwords; 1912 1913 v = READ_REG(sc, SAFE_PE_DMACFG);; 1914 v &=~ SAFE_PE_DMACFG_PEMODE; 1915 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1916 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1917 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1918 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1919 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1920 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1921 ; 1922 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1923#if 0 1924 /* XXX select byte swap based on host byte order */ 1925 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1926#endif 1927 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1928 /* 1929 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1930 * "target mode transfers" done while the chip is DMA'ing 1931 * >1020 bytes cause the hardware to lockup. To avoid this 1932 * we reduce the max PCI transfer size and use small source 1933 * particle descriptors (<= 256 bytes). 1934 */ 1935 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1936 device_printf(sc->sc_dev, 1937 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1938 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1939 SAFE_REV_MAJ(sc->sc_chiprev), 1940 SAFE_REV_MIN(sc->sc_chiprev)); 1941 } 1942 1943 /* NB: operands+results are overlaid */ 1944 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1945 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1946 /* 1947 * Configure ring entry size and number of items in the ring. 1948 */ 1949 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1950 ("PE ring entry not 32-bit aligned!")); 1951 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1952 WRITE_REG(sc, SAFE_PE_RINGCFG, 1953 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1954 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1955 1956 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1957 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1958 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1959 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1960 /* 1961 * NB: destination particles are fixed size. We use 1962 * an mbuf cluster and require all results go to 1963 * clusters or smaller. 1964 */ 1965 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1966 1967 /* it's now safe to enable PE mode, do it */ 1968 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1969 1970 /* 1971 * Configure hardware to use level-triggered interrupts and 1972 * to interrupt after each descriptor is processed. 1973 */ 1974 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1975 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1976 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1977} 1978 1979/* 1980 * Init PCI registers 1981 */ 1982static void 1983safe_init_pciregs(device_t dev) 1984{ 1985} 1986 1987/* 1988 * Clean up after a chip crash. 1989 * It is assumed that the caller in splimp() 1990 */ 1991static void 1992safe_cleanchip(struct safe_softc *sc) 1993{ 1994 1995 if (sc->sc_nqchip != 0) { 1996 struct safe_ringentry *re = sc->sc_back; 1997 1998 while (re != sc->sc_front) { 1999 if (re->re_desc.d_csr != 0) 2000 safe_free_entry(sc, re); 2001 if (++re == sc->sc_ringtop) 2002 re = sc->sc_ring; 2003 } 2004 sc->sc_back = re; 2005 sc->sc_nqchip = 0; 2006 } 2007} 2008 2009/* 2010 * free a safe_q 2011 * It is assumed that the caller is within splimp(). 2012 */ 2013static int 2014safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 2015{ 2016 struct cryptop *crp; 2017 2018 /* 2019 * Free header MCR 2020 */ 2021 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m)) 2022 m_freem(re->re_dst_m); 2023 2024 crp = (struct cryptop *)re->re_crp; 2025 2026 re->re_desc.d_csr = 0; 2027 2028 crp->crp_etype = EFAULT; 2029 crypto_done(crp); 2030 return(0); 2031} 2032 2033/* 2034 * Routine to reset the chip and clean up. 2035 * It is assumed that the caller is in splimp() 2036 */ 2037static void 2038safe_totalreset(struct safe_softc *sc) 2039{ 2040 safe_reset_board(sc); 2041 safe_init_board(sc); 2042 safe_cleanchip(sc); 2043} 2044 2045/* 2046 * Is the operand suitable aligned for direct DMA. Each 2047 * segment must be aligned on a 32-bit boundary and all 2048 * but the last segment must be a multiple of 4 bytes. 2049 */ 2050static int 2051safe_dmamap_aligned(const struct safe_operand *op) 2052{ 2053 int i; 2054 2055 for (i = 0; i < op->nsegs; i++) { 2056 if (op->segs[i].ds_addr & 3) 2057 return (0); 2058 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 2059 return (0); 2060 } 2061 return (1); 2062} 2063 2064/* 2065 * Is the operand suitable for direct DMA as the destination 2066 * of an operation. The hardware requires that each ``particle'' 2067 * but the last in an operation result have the same size. We 2068 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 2069 * 0 if some segment is not a multiple of of this size, 1 if all 2070 * segments are exactly this size, or 2 if segments are at worst 2071 * a multple of this size. 2072 */ 2073static int 2074safe_dmamap_uniform(const struct safe_operand *op) 2075{ 2076 int result = 1; 2077 2078 if (op->nsegs > 0) { 2079 int i; 2080 2081 for (i = 0; i < op->nsegs-1; i++) { 2082 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 2083 return (0); 2084 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 2085 result = 2; 2086 } 2087 } 2088 return (result); 2089} 2090 2091#ifdef SAFE_DEBUG 2092static void 2093safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 2094{ 2095 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 2096 , tag 2097 , READ_REG(sc, SAFE_DMA_ENDIAN) 2098 , READ_REG(sc, SAFE_DMA_SRCADDR) 2099 , READ_REG(sc, SAFE_DMA_DSTADDR) 2100 , READ_REG(sc, SAFE_DMA_STAT) 2101 ); 2102} 2103 2104static void 2105safe_dump_intrstate(struct safe_softc *sc, const char *tag) 2106{ 2107 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 2108 , tag 2109 , READ_REG(sc, SAFE_HI_CFG) 2110 , READ_REG(sc, SAFE_HI_MASK) 2111 , READ_REG(sc, SAFE_HI_DESC_CNT) 2112 , READ_REG(sc, SAFE_HU_STAT) 2113 , READ_REG(sc, SAFE_HM_STAT) 2114 ); 2115} 2116 2117static void 2118safe_dump_ringstate(struct safe_softc *sc, const char *tag) 2119{ 2120 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 2121 2122 /* NB: assume caller has lock on ring */ 2123 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 2124 tag, 2125 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 2126 (unsigned long)(sc->sc_back - sc->sc_ring), 2127 (unsigned long)(sc->sc_front - sc->sc_ring)); 2128} 2129 2130static void 2131safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 2132{ 2133 int ix, nsegs; 2134 2135 ix = re - sc->sc_ring; 2136 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 2137 , tag 2138 , re, ix 2139 , re->re_desc.d_csr 2140 , re->re_desc.d_src 2141 , re->re_desc.d_dst 2142 , re->re_desc.d_sa 2143 , re->re_desc.d_len 2144 ); 2145 if (re->re_src.nsegs > 1) { 2146 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 2147 sizeof(struct safe_pdesc); 2148 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 2149 printf(" spd[%u] %p: %p size %u flags %x" 2150 , ix, &sc->sc_spring[ix] 2151 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 2152 , sc->sc_spring[ix].pd_size 2153 , sc->sc_spring[ix].pd_flags 2154 ); 2155 if (sc->sc_spring[ix].pd_size == 0) 2156 printf(" (zero!)"); 2157 printf("\n"); 2158 if (++ix == SAFE_TOTAL_SPART) 2159 ix = 0; 2160 } 2161 } 2162 if (re->re_dst.nsegs > 1) { 2163 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 2164 sizeof(struct safe_pdesc); 2165 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 2166 printf(" dpd[%u] %p: %p flags %x\n" 2167 , ix, &sc->sc_dpring[ix] 2168 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 2169 , sc->sc_dpring[ix].pd_flags 2170 ); 2171 if (++ix == SAFE_TOTAL_DPART) 2172 ix = 0; 2173 } 2174 } 2175 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 2176 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 2177 printf("sa: key %x %x %x %x %x %x %x %x\n" 2178 , re->re_sa.sa_key[0] 2179 , re->re_sa.sa_key[1] 2180 , re->re_sa.sa_key[2] 2181 , re->re_sa.sa_key[3] 2182 , re->re_sa.sa_key[4] 2183 , re->re_sa.sa_key[5] 2184 , re->re_sa.sa_key[6] 2185 , re->re_sa.sa_key[7] 2186 ); 2187 printf("sa: indigest %x %x %x %x %x\n" 2188 , re->re_sa.sa_indigest[0] 2189 , re->re_sa.sa_indigest[1] 2190 , re->re_sa.sa_indigest[2] 2191 , re->re_sa.sa_indigest[3] 2192 , re->re_sa.sa_indigest[4] 2193 ); 2194 printf("sa: outdigest %x %x %x %x %x\n" 2195 , re->re_sa.sa_outdigest[0] 2196 , re->re_sa.sa_outdigest[1] 2197 , re->re_sa.sa_outdigest[2] 2198 , re->re_sa.sa_outdigest[3] 2199 , re->re_sa.sa_outdigest[4] 2200 ); 2201 printf("sr: iv %x %x %x %x\n" 2202 , re->re_sastate.sa_saved_iv[0] 2203 , re->re_sastate.sa_saved_iv[1] 2204 , re->re_sastate.sa_saved_iv[2] 2205 , re->re_sastate.sa_saved_iv[3] 2206 ); 2207 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 2208 , re->re_sastate.sa_saved_hashbc 2209 , re->re_sastate.sa_saved_indigest[0] 2210 , re->re_sastate.sa_saved_indigest[1] 2211 , re->re_sastate.sa_saved_indigest[2] 2212 , re->re_sastate.sa_saved_indigest[3] 2213 , re->re_sastate.sa_saved_indigest[4] 2214 ); 2215} 2216 2217static void 2218safe_dump_ring(struct safe_softc *sc, const char *tag) 2219{ 2220 mtx_lock(&sc->sc_ringmtx); 2221 printf("\nSafeNet Ring State:\n"); 2222 safe_dump_intrstate(sc, tag); 2223 safe_dump_dmastatus(sc, tag); 2224 safe_dump_ringstate(sc, tag); 2225 if (sc->sc_nqchip) { 2226 struct safe_ringentry *re = sc->sc_back; 2227 do { 2228 safe_dump_request(sc, tag, re); 2229 if (++re == sc->sc_ringtop) 2230 re = sc->sc_ring; 2231 } while (re != sc->sc_front); 2232 } 2233 mtx_unlock(&sc->sc_ringmtx); 2234} 2235 2236static int 2237sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 2238{ 2239 char dmode[64]; 2240 int error; 2241 2242 strncpy(dmode, "", sizeof(dmode) - 1); 2243 dmode[sizeof(dmode) - 1] = '\0'; 2244 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 2245 2246 if (error == 0 && req->newptr != NULL) { 2247 struct safe_softc *sc = safec; 2248 2249 if (!sc) 2250 return EINVAL; 2251 if (strncmp(dmode, "dma", 3) == 0) 2252 safe_dump_dmastatus(sc, "safe0"); 2253 else if (strncmp(dmode, "int", 3) == 0) 2254 safe_dump_intrstate(sc, "safe0"); 2255 else if (strncmp(dmode, "ring", 4) == 0) 2256 safe_dump_ring(sc, "safe0"); 2257 else 2258 return EINVAL; 2259 } 2260 return error; 2261} 2262SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW, 2263 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); 2264#endif /* SAFE_DEBUG */ 2265