ubsec.c revision 111646
1/* $FreeBSD: head/sys/dev/ubsec/ubsec.c 111646 2003-02-27 21:10:20Z sam $ */ 2/* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ 3 4/* 5 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 6 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 7 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 8 * 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Jason L. Wright 22 * 4. The name of the author may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 34 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Effort sponsored in part by the Defense Advanced Research Projects 38 * Agency (DARPA) and Air Force Research Laboratory, Air Force 39 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 40 * 41 */ 42 43#define UBSEC_DEBUG 44 45/* 46 * uBsec 5[56]01, 58xx hardware crypto accelerator 47 */ 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/proc.h> 52#include <sys/errno.h> 53#include <sys/malloc.h> 54#include <sys/kernel.h> 55#include <sys/mbuf.h> 56#include <sys/lock.h> 57#include <sys/mutex.h> 58#include <sys/stdint.h> 59#include <sys/sysctl.h> 60#include <sys/endian.h> 61 62#include <vm/vm.h> 63#include <vm/pmap.h> 64 65#include <machine/clock.h> 66#include <machine/bus.h> 67#include <machine/resource.h> 68#include <sys/bus.h> 69#include <sys/rman.h> 70 71#include <crypto/sha1.h> 72#include <opencrypto/cryptodev.h> 73#include <opencrypto/cryptosoft.h> 74#include <sys/md5.h> 75#include <sys/random.h> 76 77#include <pci/pcivar.h> 78#include <pci/pcireg.h> 79 80/* grr, #defines for gratuitous incompatibility in queue.h */ 81#define SIMPLEQ_HEAD STAILQ_HEAD 82#define SIMPLEQ_ENTRY STAILQ_ENTRY 83#define SIMPLEQ_INIT STAILQ_INIT 84#define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL 85#define SIMPLEQ_EMPTY STAILQ_EMPTY 86#define SIMPLEQ_FIRST STAILQ_FIRST 87#define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD_UNTIL 88#define SIMPLEQ_FOREACH STAILQ_FOREACH 89/* ditto for endian.h */ 90#define letoh16(x) le16toh(x) 91#define letoh32(x) le32toh(x) 92 93#include <dev/ubsec/ubsecreg.h> 94#include <dev/ubsec/ubsecvar.h> 95 96/* 97 * Prototypes and count for the pci_device structure 98 */ 99static int ubsec_probe(device_t); 100static int ubsec_attach(device_t); 101static int ubsec_detach(device_t); 102static int ubsec_suspend(device_t); 103static int ubsec_resume(device_t); 104static void ubsec_shutdown(device_t); 105 106static device_method_t ubsec_methods[] = { 107 /* Device interface */ 108 DEVMETHOD(device_probe, ubsec_probe), 109 DEVMETHOD(device_attach, ubsec_attach), 110 DEVMETHOD(device_detach, ubsec_detach), 111 DEVMETHOD(device_suspend, ubsec_suspend), 112 DEVMETHOD(device_resume, ubsec_resume), 113 DEVMETHOD(device_shutdown, ubsec_shutdown), 114 115 /* bus interface */ 116 DEVMETHOD(bus_print_child, bus_generic_print_child), 117 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 118 119 { 0, 0 } 120}; 121static driver_t ubsec_driver = { 122 "ubsec", 123 ubsec_methods, 124 sizeof (struct ubsec_softc) 125}; 126static devclass_t ubsec_devclass; 127 128DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); 129MODULE_DEPEND(ubsec, crypto, 1, 1, 1); 130 131static void ubsec_intr(void *); 132static int ubsec_newsession(void *, u_int32_t *, struct cryptoini *); 133static int ubsec_freesession(void *, u_int64_t); 134static int ubsec_process(void *, struct cryptop *, int); 135static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 136static void ubsec_feed(struct ubsec_softc *); 137static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 138static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 139static int ubsec_feed2(struct ubsec_softc *); 140static void ubsec_rng(void *); 141static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 142 struct ubsec_dma_alloc *, int); 143#define ubsec_dma_sync(_dma, _flags) \ 144 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 145static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 146static int ubsec_dmamap_aligned(struct ubsec_operand *op); 147 148static void ubsec_reset_board(struct ubsec_softc *sc); 149static void ubsec_init_board(struct ubsec_softc *sc); 150static void ubsec_init_pciregs(device_t dev); 151static void ubsec_totalreset(struct ubsec_softc *sc); 152 153static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); 154 155static int ubsec_kprocess(void*, struct cryptkop *, int); 156static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); 157static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); 158static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); 159static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 160static int ubsec_ksigbits(struct crparam *); 161static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 162static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 163 164SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); 165 166#ifdef UBSEC_DEBUG 167static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 168static void ubsec_dump_mcr(struct ubsec_mcr *); 169static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); 170 171static int ubsec_debug = 0; 172SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 173 0, "control debugging msgs"); 174#endif 175 176#define READ_REG(sc,r) \ 177 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 178 179#define WRITE_REG(sc,reg,val) \ 180 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 181 182#define SWAP32(x) (x) = htole32(ntohl((x))) 183#define HTOLE32(x) (x) = htole32(x) 184 185struct ubsec_stats ubsecstats; 186SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, 187 ubsec_stats, "driver statistics"); 188 189static int 190ubsec_probe(device_t dev) 191{ 192 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 193 (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || 194 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) 195 return (0); 196 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 197 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || 198 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 199 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || 200 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || 201 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 202 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 203 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 204 )) 205 return (0); 206 return (ENXIO); 207} 208 209static const char* 210ubsec_partname(struct ubsec_softc *sc) 211{ 212 /* XXX sprintf numbers when not decoded */ 213 switch (pci_get_vendor(sc->sc_dev)) { 214 case PCI_VENDOR_BROADCOM: 215 switch (pci_get_device(sc->sc_dev)) { 216 case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; 217 case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; 218 case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; 219 case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; 220 case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; 221 case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; 222 case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; 223 } 224 return "Broadcom unknown-part"; 225 case PCI_VENDOR_BLUESTEEL: 226 switch (pci_get_device(sc->sc_dev)) { 227 case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; 228 } 229 return "Bluesteel unknown-part"; 230 } 231 return "Unknown-vendor unknown-part"; 232} 233 234static int 235ubsec_attach(device_t dev) 236{ 237 struct ubsec_softc *sc = device_get_softc(dev); 238 struct ubsec_dma *dmap; 239 u_int32_t cmd, i; 240 int rid; 241 242 KASSERT(sc != NULL, ("ubsec_attach: null software carrier!")); 243 bzero(sc, sizeof (*sc)); 244 sc->sc_dev = dev; 245 246 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "crypto driver", MTX_DEF); 247 248 SIMPLEQ_INIT(&sc->sc_queue); 249 SIMPLEQ_INIT(&sc->sc_qchip); 250 SIMPLEQ_INIT(&sc->sc_queue2); 251 SIMPLEQ_INIT(&sc->sc_qchip2); 252 SIMPLEQ_INIT(&sc->sc_q2free); 253 254 /* XXX handle power management */ 255 256 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; 257 258 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 259 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) 260 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 261 262 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 263 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 264 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) 265 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 266 267 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 268 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) 269 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 270 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 271 272 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 273 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 274 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 275 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 )) { 276 /* NB: the 5821/5822 defines some additional status bits */ 277 sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | 278 BS_STAT_MCR2_ALLEMPTY; 279 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 280 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 281 } 282 283 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 284 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 285 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 286 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 287 288 if (!(cmd & PCIM_CMD_MEMEN)) { 289 device_printf(dev, "failed to enable memory mapping\n"); 290 goto bad; 291 } 292 293 if (!(cmd & PCIM_CMD_BUSMASTEREN)) { 294 device_printf(dev, "failed to enable bus mastering\n"); 295 goto bad; 296 } 297 298 /* 299 * Setup memory-mapping of PCI registers. 300 */ 301 rid = BS_BAR; 302 sc->sc_sr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 303 0, ~0, 1, RF_ACTIVE); 304 if (sc->sc_sr == NULL) { 305 device_printf(dev, "cannot map register space\n"); 306 goto bad; 307 } 308 sc->sc_st = rman_get_bustag(sc->sc_sr); 309 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 310 311 /* 312 * Arrange interrupt line. 313 */ 314 rid = 0; 315 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 316 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 317 if (sc->sc_irq == NULL) { 318 device_printf(dev, "could not map interrupt\n"); 319 goto bad1; 320 } 321 /* 322 * NB: Network code assumes we are blocked with splimp() 323 * so make sure the IRQ is mapped appropriately. 324 */ 325 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, 326 ubsec_intr, sc, &sc->sc_ih)) { 327 device_printf(dev, "could not establish interrupt\n"); 328 goto bad2; 329 } 330 331 sc->sc_cid = crypto_get_driverid(0); 332 if (sc->sc_cid < 0) { 333 device_printf(dev, "could not get crypto driver id\n"); 334 goto bad3; 335 } 336 337 /* 338 * Setup DMA descriptor area. 339 */ 340 if (bus_dma_tag_create(NULL, /* parent */ 341 1, 0, /* alignment, bounds */ 342 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 343 BUS_SPACE_MAXADDR, /* highaddr */ 344 NULL, NULL, /* filter, filterarg */ 345 0x3ffff, /* maxsize */ 346 UBS_MAX_SCATTER, /* nsegments */ 347 0xffff, /* maxsegsize */ 348 BUS_DMA_ALLOCNOW, /* flags */ 349 &sc->sc_dmat)) { 350 device_printf(dev, "cannot allocate DMA tag\n"); 351 goto bad4; 352 } 353 SIMPLEQ_INIT(&sc->sc_freequeue); 354 dmap = sc->sc_dmaa; 355 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 356 struct ubsec_q *q; 357 358 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), 359 M_DEVBUF, M_NOWAIT); 360 if (q == NULL) { 361 device_printf(dev, "cannot allocate queue buffers\n"); 362 break; 363 } 364 365 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 366 &dmap->d_alloc, 0)) { 367 device_printf(dev, "cannot allocate dma buffers\n"); 368 free(q, M_DEVBUF); 369 break; 370 } 371 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 372 373 q->q_dma = dmap; 374 sc->sc_queuea[i] = q; 375 376 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 377 } 378 379 device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); 380 381 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 382 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 383 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 384 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 385 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 386 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 387 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 388 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 389 390 /* 391 * Reset Broadcom chip 392 */ 393 ubsec_reset_board(sc); 394 395 /* 396 * Init Broadcom specific PCI settings 397 */ 398 ubsec_init_pciregs(dev); 399 400 /* 401 * Init Broadcom chip 402 */ 403 ubsec_init_board(sc); 404 405#ifndef UBSEC_NO_RNG 406 if (sc->sc_flags & UBS_FLAGS_RNG) { 407 sc->sc_statmask |= BS_STAT_MCR2_DONE; 408 409 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 410 &sc->sc_rng.rng_q.q_mcr, 0)) 411 goto skip_rng; 412 413 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 414 &sc->sc_rng.rng_q.q_ctx, 0)) { 415 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 416 goto skip_rng; 417 } 418 419 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 420 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 421 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 422 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 423 goto skip_rng; 424 } 425 426 if (hz >= 100) 427 sc->sc_rnghz = hz / 100; 428 else 429 sc->sc_rnghz = 1; 430 /* NB: 1 means the callout runs w/o Giant locked */ 431 callout_init(&sc->sc_rngto, 1); 432 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 433skip_rng: 434 ; 435 } 436#endif /* UBSEC_NO_RNG */ 437 438 if (sc->sc_flags & UBS_FLAGS_KEY) { 439 sc->sc_statmask |= BS_STAT_MCR2_DONE; 440 441 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 442 ubsec_kprocess, sc); 443#if 0 444 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 445 ubsec_kprocess, sc); 446#endif 447 } 448 return (0); 449bad4: 450 crypto_unregister_all(sc->sc_cid); 451bad3: 452 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 453bad2: 454 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 455bad1: 456 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 457bad: 458 mtx_destroy(&sc->sc_mtx); 459 return (ENXIO); 460} 461 462/* 463 * Detach a device that successfully probed. 464 */ 465static int 466ubsec_detach(device_t dev) 467{ 468 struct ubsec_softc *sc = device_get_softc(dev); 469 470 KASSERT(sc != NULL, ("ubsec_detach: null software carrier")); 471 472 /* XXX wait/abort active ops */ 473 474 UBSEC_LOCK(sc); 475 476 callout_stop(&sc->sc_rngto); 477 478 crypto_unregister_all(sc->sc_cid); 479 480 while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 481 struct ubsec_q *q; 482 483 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 484 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); 485 ubsec_dma_free(sc, &q->q_dma->d_alloc); 486 free(q, M_DEVBUF); 487 } 488#ifndef UBSEC_NO_RNG 489 if (sc->sc_flags & UBS_FLAGS_RNG) { 490 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 491 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 492 ubsec_dma_free(sc, &sc->sc_rng.rng_buf); 493 } 494#endif /* UBSEC_NO_RNG */ 495 496 bus_generic_detach(dev); 497 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 498 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 499 500 bus_dma_tag_destroy(sc->sc_dmat); 501 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 502 503 UBSEC_UNLOCK(sc); 504 505 mtx_destroy(&sc->sc_mtx); 506 507 return (0); 508} 509 510/* 511 * Stop all chip i/o so that the kernel's probe routines don't 512 * get confused by errant DMAs when rebooting. 513 */ 514static void 515ubsec_shutdown(device_t dev) 516{ 517#ifdef notyet 518 ubsec_stop(device_get_softc(dev)); 519#endif 520} 521 522/* 523 * Device suspend routine. 524 */ 525static int 526ubsec_suspend(device_t dev) 527{ 528 struct ubsec_softc *sc = device_get_softc(dev); 529 530 KASSERT(sc != NULL, ("ubsec_suspend: null software carrier")); 531#ifdef notyet 532 /* XXX stop the device and save PCI settings */ 533#endif 534 sc->sc_suspended = 1; 535 536 return (0); 537} 538 539static int 540ubsec_resume(device_t dev) 541{ 542 struct ubsec_softc *sc = device_get_softc(dev); 543 544 KASSERT(sc != NULL, ("ubsec_resume: null software carrier")); 545#ifdef notyet 546 /* XXX retore PCI settings and start the device */ 547#endif 548 sc->sc_suspended = 0; 549 return (0); 550} 551 552/* 553 * UBSEC Interrupt routine 554 */ 555static void 556ubsec_intr(void *arg) 557{ 558 struct ubsec_softc *sc = arg; 559 volatile u_int32_t stat; 560 struct ubsec_q *q; 561 struct ubsec_dma *dmap; 562 int npkts = 0, i; 563 564 UBSEC_LOCK(sc); 565 566 stat = READ_REG(sc, BS_STAT); 567 stat &= sc->sc_statmask; 568 if (stat == 0) { 569 UBSEC_UNLOCK(sc); 570 return; 571 } 572 573 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 574 575 /* 576 * Check to see if we have any packets waiting for us 577 */ 578 if ((stat & BS_STAT_MCR1_DONE)) { 579 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 580 q = SIMPLEQ_FIRST(&sc->sc_qchip); 581 dmap = q->q_dma; 582 583 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 584 break; 585 586 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); 587 588 npkts = q->q_nstacked_mcrs; 589 sc->sc_nqchip -= 1+npkts; 590 /* 591 * search for further sc_qchip ubsec_q's that share 592 * the same MCR, and complete them too, they must be 593 * at the top. 594 */ 595 for (i = 0; i < npkts; i++) { 596 if(q->q_stacked_mcr[i]) { 597 ubsec_callback(sc, q->q_stacked_mcr[i]); 598 ubsecstats.hst_opackets++; 599 } else { 600 break; 601 } 602 } 603 ubsec_callback(sc, q); 604 ubsecstats.hst_opackets++; 605 } 606 607 /* 608 * Don't send any more packet to chip if there has been 609 * a DMAERR. 610 */ 611 if (!(stat & BS_STAT_DMAERR)) 612 ubsec_feed(sc); 613 } 614 615 /* 616 * Check to see if we have any key setups/rng's waiting for us 617 */ 618 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 619 (stat & BS_STAT_MCR2_DONE)) { 620 struct ubsec_q2 *q2; 621 struct ubsec_mcr *mcr; 622 623 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 624 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 625 626 ubsec_dma_sync(&q2->q_mcr, 627 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 628 629 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 630 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 631 ubsec_dma_sync(&q2->q_mcr, 632 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 633 break; 634 } 635 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q2, q_next); 636 ubsec_callback2(sc, q2); 637 /* 638 * Don't send any more packet to chip if there has been 639 * a DMAERR. 640 */ 641 if (!(stat & BS_STAT_DMAERR)) 642 ubsec_feed2(sc); 643 } 644 } 645 646 /* 647 * Check to see if we got any DMA Error 648 */ 649 if (stat & BS_STAT_DMAERR) { 650#ifdef UBSEC_DEBUG 651 if (ubsec_debug) { 652 volatile u_int32_t a = READ_REG(sc, BS_ERR); 653 654 printf("dmaerr %s@%08x\n", 655 (a & BS_ERR_READ) ? "read" : "write", 656 a & BS_ERR_ADDR); 657 } 658#endif /* UBSEC_DEBUG */ 659 ubsecstats.hst_dmaerr++; 660 ubsec_totalreset(sc); 661 ubsec_feed(sc); 662 } 663 664 if (sc->sc_needwakeup) { /* XXX check high watermark */ 665 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 666#ifdef UBSEC_DEBUG 667 if (ubsec_debug) 668 device_printf(sc->sc_dev, "wakeup crypto (%x)\n", 669 sc->sc_needwakeup); 670#endif /* UBSEC_DEBUG */ 671 sc->sc_needwakeup &= ~wakeup; 672 crypto_unblock(sc->sc_cid, wakeup); 673 } 674 675 UBSEC_UNLOCK(sc); 676} 677 678/* 679 * ubsec_feed() - aggregate and post requests to chip 680 */ 681static void 682ubsec_feed(struct ubsec_softc *sc) 683{ 684 struct ubsec_q *q, *q2; 685 int npkts, i; 686 void *v; 687 u_int32_t stat; 688 689 /* 690 * Decide how many ops to combine in a single MCR. We cannot 691 * aggregate more than UBS_MAX_AGGR because this is the number 692 * of slots defined in the data structure. Note that 693 * aggregation only happens if ops are marked batch'able. 694 * Aggregating ops reduces the number of interrupts to the host 695 * but also (potentially) increases the latency for processing 696 * completed ops as we only get an interrupt when all aggregated 697 * ops have completed. 698 */ 699 if (sc->sc_nqueue == 0) 700 return; 701 if (sc->sc_nqueue > 1) { 702 npkts = 0; 703 SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { 704 npkts++; 705 if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) 706 break; 707 } 708 } else 709 npkts = 1; 710 /* 711 * Check device status before going any further. 712 */ 713 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 714 if (stat & BS_STAT_DMAERR) { 715 ubsec_totalreset(sc); 716 ubsecstats.hst_dmaerr++; 717 } else 718 ubsecstats.hst_mcr1full++; 719 return; 720 } 721 if (sc->sc_nqueue > ubsecstats.hst_maxqueue) 722 ubsecstats.hst_maxqueue = sc->sc_nqueue; 723 if (npkts > UBS_MAX_AGGR) 724 npkts = UBS_MAX_AGGR; 725 if (npkts < 2) /* special case 1 op */ 726 goto feed1; 727 728 ubsecstats.hst_totbatch += npkts-1; 729#ifdef UBSEC_DEBUG 730 if (ubsec_debug) 731 printf("merging %d records\n", npkts); 732#endif /* UBSEC_DEBUG */ 733 734 q = SIMPLEQ_FIRST(&sc->sc_queue); 735 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); 736 --sc->sc_nqueue; 737 738 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 739 if (q->q_dst_map != NULL) 740 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 741 742 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 743 744 for (i = 0; i < q->q_nstacked_mcrs; i++) { 745 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 746 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 747 BUS_DMASYNC_PREWRITE); 748 if (q2->q_dst_map != NULL) 749 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 750 BUS_DMASYNC_PREREAD); 751 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q2, q_next); 752 --sc->sc_nqueue; 753 754 v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - 755 sizeof(struct ubsec_mcr_add)); 756 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); 757 q->q_stacked_mcr[i] = q2; 758 } 759 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 760 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 761 sc->sc_nqchip += npkts; 762 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 763 ubsecstats.hst_maxqchip = sc->sc_nqchip; 764 ubsec_dma_sync(&q->q_dma->d_alloc, 765 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 766 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 767 offsetof(struct ubsec_dmachunk, d_mcr)); 768 return; 769feed1: 770 q = SIMPLEQ_FIRST(&sc->sc_queue); 771 772 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 773 if (q->q_dst_map != NULL) 774 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 775 ubsec_dma_sync(&q->q_dma->d_alloc, 776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 777 778 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 779 offsetof(struct ubsec_dmachunk, d_mcr)); 780#ifdef UBSEC_DEBUG 781 if (ubsec_debug) 782 printf("feed1: q->chip %p %08x stat %08x\n", 783 q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), 784 stat); 785#endif /* UBSEC_DEBUG */ 786 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); 787 --sc->sc_nqueue; 788 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 789 sc->sc_nqchip++; 790 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 791 ubsecstats.hst_maxqchip = sc->sc_nqchip; 792 return; 793} 794 795/* 796 * Allocate a new 'session' and return an encoded session id. 'sidp' 797 * contains our registration id, and should contain an encoded session 798 * id on successful allocation. 799 */ 800static int 801ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 802{ 803 struct cryptoini *c, *encini = NULL, *macini = NULL; 804 struct ubsec_softc *sc = arg; 805 struct ubsec_session *ses = NULL; 806 MD5_CTX md5ctx; 807 SHA1_CTX sha1ctx; 808 int i, sesn; 809 810 KASSERT(sc != NULL, ("ubsec_newsession: null softc")); 811 if (sidp == NULL || cri == NULL || sc == NULL) 812 return (EINVAL); 813 814 for (c = cri; c != NULL; c = c->cri_next) { 815 if (c->cri_alg == CRYPTO_MD5_HMAC || 816 c->cri_alg == CRYPTO_SHA1_HMAC) { 817 if (macini) 818 return (EINVAL); 819 macini = c; 820 } else if (c->cri_alg == CRYPTO_DES_CBC || 821 c->cri_alg == CRYPTO_3DES_CBC) { 822 if (encini) 823 return (EINVAL); 824 encini = c; 825 } else 826 return (EINVAL); 827 } 828 if (encini == NULL && macini == NULL) 829 return (EINVAL); 830 831 if (sc->sc_sessions == NULL) { 832 ses = sc->sc_sessions = (struct ubsec_session *)malloc( 833 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 834 if (ses == NULL) 835 return (ENOMEM); 836 sesn = 0; 837 sc->sc_nsessions = 1; 838 } else { 839 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 840 if (sc->sc_sessions[sesn].ses_used == 0) { 841 ses = &sc->sc_sessions[sesn]; 842 break; 843 } 844 } 845 846 if (ses == NULL) { 847 sesn = sc->sc_nsessions; 848 ses = (struct ubsec_session *)malloc((sesn + 1) * 849 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 850 if (ses == NULL) 851 return (ENOMEM); 852 bcopy(sc->sc_sessions, ses, sesn * 853 sizeof(struct ubsec_session)); 854 bzero(sc->sc_sessions, sesn * 855 sizeof(struct ubsec_session)); 856 free(sc->sc_sessions, M_DEVBUF); 857 sc->sc_sessions = ses; 858 ses = &sc->sc_sessions[sesn]; 859 sc->sc_nsessions++; 860 } 861 } 862 863 bzero(ses, sizeof(struct ubsec_session)); 864 ses->ses_used = 1; 865 if (encini) { 866 /* get an IV, network byte order */ 867 /* XXX may read fewer than requested */ 868 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 869 870 /* Go ahead and compute key in ubsec's byte order */ 871 if (encini->cri_alg == CRYPTO_DES_CBC) { 872 bcopy(encini->cri_key, &ses->ses_deskey[0], 8); 873 bcopy(encini->cri_key, &ses->ses_deskey[2], 8); 874 bcopy(encini->cri_key, &ses->ses_deskey[4], 8); 875 } else 876 bcopy(encini->cri_key, ses->ses_deskey, 24); 877 878 SWAP32(ses->ses_deskey[0]); 879 SWAP32(ses->ses_deskey[1]); 880 SWAP32(ses->ses_deskey[2]); 881 SWAP32(ses->ses_deskey[3]); 882 SWAP32(ses->ses_deskey[4]); 883 SWAP32(ses->ses_deskey[5]); 884 } 885 886 if (macini) { 887 for (i = 0; i < macini->cri_klen / 8; i++) 888 macini->cri_key[i] ^= HMAC_IPAD_VAL; 889 890 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 891 MD5Init(&md5ctx); 892 MD5Update(&md5ctx, macini->cri_key, 893 macini->cri_klen / 8); 894 MD5Update(&md5ctx, hmac_ipad_buffer, 895 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 896 bcopy(md5ctx.state, ses->ses_hminner, 897 sizeof(md5ctx.state)); 898 } else { 899 SHA1Init(&sha1ctx); 900 SHA1Update(&sha1ctx, macini->cri_key, 901 macini->cri_klen / 8); 902 SHA1Update(&sha1ctx, hmac_ipad_buffer, 903 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 904 bcopy(sha1ctx.h.b32, ses->ses_hminner, 905 sizeof(sha1ctx.h.b32)); 906 } 907 908 for (i = 0; i < macini->cri_klen / 8; i++) 909 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 910 911 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 912 MD5Init(&md5ctx); 913 MD5Update(&md5ctx, macini->cri_key, 914 macini->cri_klen / 8); 915 MD5Update(&md5ctx, hmac_opad_buffer, 916 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 917 bcopy(md5ctx.state, ses->ses_hmouter, 918 sizeof(md5ctx.state)); 919 } else { 920 SHA1Init(&sha1ctx); 921 SHA1Update(&sha1ctx, macini->cri_key, 922 macini->cri_klen / 8); 923 SHA1Update(&sha1ctx, hmac_opad_buffer, 924 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 925 bcopy(sha1ctx.h.b32, ses->ses_hmouter, 926 sizeof(sha1ctx.h.b32)); 927 } 928 929 for (i = 0; i < macini->cri_klen / 8; i++) 930 macini->cri_key[i] ^= HMAC_OPAD_VAL; 931 } 932 933 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); 934 return (0); 935} 936 937/* 938 * Deallocate a session. 939 */ 940static int 941ubsec_freesession(void *arg, u_int64_t tid) 942{ 943 struct ubsec_softc *sc = arg; 944 int session; 945 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 946 947 KASSERT(sc != NULL, ("ubsec_freesession: null softc")); 948 if (sc == NULL) 949 return (EINVAL); 950 951 session = UBSEC_SESSION(sid); 952 if (session >= sc->sc_nsessions) 953 return (EINVAL); 954 955 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 956 return (0); 957} 958 959static void 960ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 961{ 962 struct ubsec_operand *op = arg; 963 964 KASSERT(nsegs <= UBS_MAX_SCATTER, 965 ("Too many DMA segments returned when mapping operand")); 966#ifdef UBSEC_DEBUG 967 if (ubsec_debug) 968 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 969 (u_int) mapsize, nsegs); 970#endif 971 op->mapsize = mapsize; 972 op->nsegs = nsegs; 973 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 974} 975 976static int 977ubsec_process(void *arg, struct cryptop *crp, int hint) 978{ 979 struct ubsec_q *q = NULL; 980 int err = 0, i, j, nicealign; 981 struct ubsec_softc *sc = arg; 982 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 983 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 984 int sskip, dskip, stheend, dtheend; 985 int16_t coffset; 986 struct ubsec_session *ses; 987 struct ubsec_pktctx ctx; 988 struct ubsec_dma *dmap = NULL; 989 990 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 991 ubsecstats.hst_invalid++; 992 return (EINVAL); 993 } 994 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 995 ubsecstats.hst_badsession++; 996 return (EINVAL); 997 } 998 999 UBSEC_LOCK(sc); 1000 1001 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 1002 ubsecstats.hst_queuefull++; 1003 sc->sc_needwakeup |= CRYPTO_SYMQ; 1004 UBSEC_UNLOCK(sc); 1005 return (ERESTART); 1006 } 1007 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 1008 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); 1009 UBSEC_UNLOCK(sc); 1010 1011 dmap = q->q_dma; /* Save dma pointer */ 1012 bzero(q, sizeof(struct ubsec_q)); 1013 bzero(&ctx, sizeof(ctx)); 1014 1015 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 1016 q->q_dma = dmap; 1017 ses = &sc->sc_sessions[q->q_sesn]; 1018 1019 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1020 q->q_src_m = (struct mbuf *)crp->crp_buf; 1021 q->q_dst_m = (struct mbuf *)crp->crp_buf; 1022 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1023 q->q_src_io = (struct uio *)crp->crp_buf; 1024 q->q_dst_io = (struct uio *)crp->crp_buf; 1025 } else { 1026 ubsecstats.hst_badflags++; 1027 err = EINVAL; 1028 goto errout; /* XXX we don't handle contiguous blocks! */ 1029 } 1030 1031 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); 1032 1033 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1034 dmap->d_dma->d_mcr.mcr_flags = 0; 1035 q->q_crp = crp; 1036 1037 crd1 = crp->crp_desc; 1038 if (crd1 == NULL) { 1039 ubsecstats.hst_nodesc++; 1040 err = EINVAL; 1041 goto errout; 1042 } 1043 crd2 = crd1->crd_next; 1044 1045 if (crd2 == NULL) { 1046 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 1047 crd1->crd_alg == CRYPTO_SHA1_HMAC) { 1048 maccrd = crd1; 1049 enccrd = NULL; 1050 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1051 crd1->crd_alg == CRYPTO_3DES_CBC) { 1052 maccrd = NULL; 1053 enccrd = crd1; 1054 } else { 1055 ubsecstats.hst_badalg++; 1056 err = EINVAL; 1057 goto errout; 1058 } 1059 } else { 1060 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 1061 crd1->crd_alg == CRYPTO_SHA1_HMAC) && 1062 (crd2->crd_alg == CRYPTO_DES_CBC || 1063 crd2->crd_alg == CRYPTO_3DES_CBC) && 1064 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1065 maccrd = crd1; 1066 enccrd = crd2; 1067 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1068 crd1->crd_alg == CRYPTO_3DES_CBC) && 1069 (crd2->crd_alg == CRYPTO_MD5_HMAC || 1070 crd2->crd_alg == CRYPTO_SHA1_HMAC) && 1071 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1072 enccrd = crd1; 1073 maccrd = crd2; 1074 } else { 1075 /* 1076 * We cannot order the ubsec as requested 1077 */ 1078 ubsecstats.hst_badalg++; 1079 err = EINVAL; 1080 goto errout; 1081 } 1082 } 1083 1084 if (enccrd) { 1085 encoffset = enccrd->crd_skip; 1086 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1087 1088 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1089 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1090 1091 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1092 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1093 else { 1094 ctx.pc_iv[0] = ses->ses_iv[0]; 1095 ctx.pc_iv[1] = ses->ses_iv[1]; 1096 } 1097 1098 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1099 if (crp->crp_flags & CRYPTO_F_IMBUF) 1100 m_copyback(q->q_src_m, 1101 enccrd->crd_inject, 1102 8, (caddr_t)ctx.pc_iv); 1103 else if (crp->crp_flags & CRYPTO_F_IOV) 1104 cuio_copyback(q->q_src_io, 1105 enccrd->crd_inject, 1106 8, (caddr_t)ctx.pc_iv); 1107 } 1108 } else { 1109 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1110 1111 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1112 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1113 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1114 m_copydata(q->q_src_m, enccrd->crd_inject, 1115 8, (caddr_t)ctx.pc_iv); 1116 else if (crp->crp_flags & CRYPTO_F_IOV) 1117 cuio_copydata(q->q_src_io, 1118 enccrd->crd_inject, 8, 1119 (caddr_t)ctx.pc_iv); 1120 } 1121 1122 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1123 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1124 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1125 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1126 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1127 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1128 SWAP32(ctx.pc_iv[0]); 1129 SWAP32(ctx.pc_iv[1]); 1130 } 1131 1132 if (maccrd) { 1133 macoffset = maccrd->crd_skip; 1134 1135 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) 1136 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1137 else 1138 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1139 1140 for (i = 0; i < 5; i++) { 1141 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1142 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1143 1144 HTOLE32(ctx.pc_hminner[i]); 1145 HTOLE32(ctx.pc_hmouter[i]); 1146 } 1147 } 1148 1149 if (enccrd && maccrd) { 1150 /* 1151 * ubsec cannot handle packets where the end of encryption 1152 * and authentication are not the same, or where the 1153 * encrypted part begins before the authenticated part. 1154 */ 1155 if ((encoffset + enccrd->crd_len) != 1156 (macoffset + maccrd->crd_len)) { 1157 ubsecstats.hst_lenmismatch++; 1158 err = EINVAL; 1159 goto errout; 1160 } 1161 if (enccrd->crd_skip < maccrd->crd_skip) { 1162 ubsecstats.hst_skipmismatch++; 1163 err = EINVAL; 1164 goto errout; 1165 } 1166 sskip = maccrd->crd_skip; 1167 cpskip = dskip = enccrd->crd_skip; 1168 stheend = maccrd->crd_len; 1169 dtheend = enccrd->crd_len; 1170 coffset = enccrd->crd_skip - maccrd->crd_skip; 1171 cpoffset = cpskip + dtheend; 1172#ifdef UBSEC_DEBUG 1173 if (ubsec_debug) { 1174 printf("mac: skip %d, len %d, inject %d\n", 1175 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1176 printf("enc: skip %d, len %d, inject %d\n", 1177 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1178 printf("src: skip %d, len %d\n", sskip, stheend); 1179 printf("dst: skip %d, len %d\n", dskip, dtheend); 1180 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1181 coffset, stheend, cpskip, cpoffset); 1182 } 1183#endif 1184 } else { 1185 cpskip = dskip = sskip = macoffset + encoffset; 1186 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1187 cpoffset = cpskip + dtheend; 1188 coffset = 0; 1189 } 1190 ctx.pc_offset = htole16(coffset >> 2); 1191 1192 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { 1193 ubsecstats.hst_nomap++; 1194 err = ENOMEM; 1195 goto errout; 1196 } 1197 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1198 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1199 q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1200 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1201 q->q_src_map = NULL; 1202 ubsecstats.hst_noload++; 1203 err = ENOMEM; 1204 goto errout; 1205 } 1206 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1207 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1208 q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1209 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1210 q->q_src_map = NULL; 1211 ubsecstats.hst_noload++; 1212 err = ENOMEM; 1213 goto errout; 1214 } 1215 } 1216 nicealign = ubsec_dmamap_aligned(&q->q_src); 1217 1218 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1219 1220#ifdef UBSEC_DEBUG 1221 if (ubsec_debug) 1222 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1223#endif 1224 for (i = j = 0; i < q->q_src_nsegs; i++) { 1225 struct ubsec_pktbuf *pb; 1226 bus_size_t packl = q->q_src_segs[i].ds_len; 1227 bus_addr_t packp = q->q_src_segs[i].ds_addr; 1228 1229 if (sskip >= packl) { 1230 sskip -= packl; 1231 continue; 1232 } 1233 1234 packl -= sskip; 1235 packp += sskip; 1236 sskip = 0; 1237 1238 if (packl > 0xfffc) { 1239 err = EIO; 1240 goto errout; 1241 } 1242 1243 if (j == 0) 1244 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1245 else 1246 pb = &dmap->d_dma->d_sbuf[j - 1]; 1247 1248 pb->pb_addr = htole32(packp); 1249 1250 if (stheend) { 1251 if (packl > stheend) { 1252 pb->pb_len = htole32(stheend); 1253 stheend = 0; 1254 } else { 1255 pb->pb_len = htole32(packl); 1256 stheend -= packl; 1257 } 1258 } else 1259 pb->pb_len = htole32(packl); 1260 1261 if ((i + 1) == q->q_src_nsegs) 1262 pb->pb_next = 0; 1263 else 1264 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1265 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1266 j++; 1267 } 1268 1269 if (enccrd == NULL && maccrd != NULL) { 1270 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1271 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1272 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1273 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1274#ifdef UBSEC_DEBUG 1275 if (ubsec_debug) 1276 printf("opkt: %x %x %x\n", 1277 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1278 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1279 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1280#endif 1281 } else { 1282 if (crp->crp_flags & CRYPTO_F_IOV) { 1283 if (!nicealign) { 1284 ubsecstats.hst_iovmisaligned++; 1285 err = EINVAL; 1286 goto errout; 1287 } 1288 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 1289 &q->q_dst_map)) { 1290 ubsecstats.hst_nomap++; 1291 err = ENOMEM; 1292 goto errout; 1293 } 1294 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1295 q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { 1296 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1297 q->q_dst_map = NULL; 1298 ubsecstats.hst_noload++; 1299 err = ENOMEM; 1300 goto errout; 1301 } 1302 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1303 if (nicealign) { 1304 q->q_dst = q->q_src; 1305 } else { 1306 int totlen, len; 1307 struct mbuf *m, *top, **mp; 1308 1309 ubsecstats.hst_unaligned++; 1310 totlen = q->q_src_mapsize; 1311 if (q->q_src_m->m_flags & M_PKTHDR) { 1312 len = MHLEN; 1313 MGETHDR(m, M_DONTWAIT, MT_DATA); 1314 if (m && !m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)) { 1315 m_free(m); 1316 m = NULL; 1317 } 1318 } else { 1319 len = MLEN; 1320 MGET(m, M_DONTWAIT, MT_DATA); 1321 } 1322 if (m == NULL) { 1323 ubsecstats.hst_nombuf++; 1324 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1325 goto errout; 1326 } 1327 if (totlen >= MINCLSIZE) { 1328 MCLGET(m, M_DONTWAIT); 1329 if ((m->m_flags & M_EXT) == 0) { 1330 m_free(m); 1331 ubsecstats.hst_nomcl++; 1332 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1333 goto errout; 1334 } 1335 len = MCLBYTES; 1336 } 1337 m->m_len = len; 1338 top = NULL; 1339 mp = ⊤ 1340 1341 while (totlen > 0) { 1342 if (top) { 1343 MGET(m, M_DONTWAIT, MT_DATA); 1344 if (m == NULL) { 1345 m_freem(top); 1346 ubsecstats.hst_nombuf++; 1347 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1348 goto errout; 1349 } 1350 len = MLEN; 1351 } 1352 if (top && totlen >= MINCLSIZE) { 1353 MCLGET(m, M_DONTWAIT); 1354 if ((m->m_flags & M_EXT) == 0) { 1355 *mp = m; 1356 m_freem(top); 1357 ubsecstats.hst_nomcl++; 1358 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1359 goto errout; 1360 } 1361 len = MCLBYTES; 1362 } 1363 m->m_len = len = min(totlen, len); 1364 totlen -= len; 1365 *mp = m; 1366 mp = &m->m_next; 1367 } 1368 q->q_dst_m = top; 1369 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1370 cpskip, cpoffset); 1371 if (bus_dmamap_create(sc->sc_dmat, 1372 BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { 1373 ubsecstats.hst_nomap++; 1374 err = ENOMEM; 1375 goto errout; 1376 } 1377 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1378 q->q_dst_map, q->q_dst_m, 1379 ubsec_op_cb, &q->q_dst, 1380 BUS_DMA_NOWAIT) != 0) { 1381 bus_dmamap_destroy(sc->sc_dmat, 1382 q->q_dst_map); 1383 q->q_dst_map = NULL; 1384 ubsecstats.hst_noload++; 1385 err = ENOMEM; 1386 goto errout; 1387 } 1388 } 1389 } else { 1390 ubsecstats.hst_badflags++; 1391 err = EINVAL; 1392 goto errout; 1393 } 1394 1395#ifdef UBSEC_DEBUG 1396 if (ubsec_debug) 1397 printf("dst skip: %d\n", dskip); 1398#endif 1399 for (i = j = 0; i < q->q_dst_nsegs; i++) { 1400 struct ubsec_pktbuf *pb; 1401 bus_size_t packl = q->q_dst_segs[i].ds_len; 1402 bus_addr_t packp = q->q_dst_segs[i].ds_addr; 1403 1404 if (dskip >= packl) { 1405 dskip -= packl; 1406 continue; 1407 } 1408 1409 packl -= dskip; 1410 packp += dskip; 1411 dskip = 0; 1412 1413 if (packl > 0xfffc) { 1414 err = EIO; 1415 goto errout; 1416 } 1417 1418 if (j == 0) 1419 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1420 else 1421 pb = &dmap->d_dma->d_dbuf[j - 1]; 1422 1423 pb->pb_addr = htole32(packp); 1424 1425 if (dtheend) { 1426 if (packl > dtheend) { 1427 pb->pb_len = htole32(dtheend); 1428 dtheend = 0; 1429 } else { 1430 pb->pb_len = htole32(packl); 1431 dtheend -= packl; 1432 } 1433 } else 1434 pb->pb_len = htole32(packl); 1435 1436 if ((i + 1) == q->q_dst_nsegs) { 1437 if (maccrd) 1438 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1439 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1440 else 1441 pb->pb_next = 0; 1442 } else 1443 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1444 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1445 j++; 1446 } 1447 } 1448 1449 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1450 offsetof(struct ubsec_dmachunk, d_ctx)); 1451 1452 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1453 struct ubsec_pktctx_long *ctxl; 1454 1455 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + 1456 offsetof(struct ubsec_dmachunk, d_ctx)); 1457 1458 /* transform small context into long context */ 1459 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1460 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1461 ctxl->pc_flags = ctx.pc_flags; 1462 ctxl->pc_offset = ctx.pc_offset; 1463 for (i = 0; i < 6; i++) 1464 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1465 for (i = 0; i < 5; i++) 1466 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1467 for (i = 0; i < 5; i++) 1468 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1469 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1470 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1471 } else 1472 bcopy(&ctx, dmap->d_alloc.dma_vaddr + 1473 offsetof(struct ubsec_dmachunk, d_ctx), 1474 sizeof(struct ubsec_pktctx)); 1475 1476 UBSEC_LOCK(sc); 1477 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1478 sc->sc_nqueue++; 1479 ubsecstats.hst_ipackets++; 1480 ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; 1481 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) 1482 ubsec_feed(sc); 1483 UBSEC_UNLOCK(sc); 1484 return (0); 1485 1486errout: 1487 if (q != NULL) { 1488 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1489 m_freem(q->q_dst_m); 1490 1491 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1492 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1493 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1494 } 1495 if (q->q_src_map != NULL) { 1496 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1497 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1498 } 1499 1500 UBSEC_LOCK(sc); 1501 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1502 UBSEC_UNLOCK(sc); 1503 } 1504 if (err != ERESTART) { 1505 crp->crp_etype = err; 1506 crypto_done(crp); 1507 } else { 1508 sc->sc_needwakeup |= CRYPTO_SYMQ; 1509 } 1510 return (err); 1511} 1512 1513static void 1514ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1515{ 1516 struct cryptop *crp = (struct cryptop *)q->q_crp; 1517 struct cryptodesc *crd; 1518 struct ubsec_dma *dmap = q->q_dma; 1519 1520 ubsec_dma_sync(&dmap->d_alloc, 1521 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1522 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1523 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1524 BUS_DMASYNC_POSTREAD); 1525 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1526 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1527 } 1528 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); 1529 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1530 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1531 1532 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1533 m_freem(q->q_src_m); 1534 crp->crp_buf = (caddr_t)q->q_dst_m; 1535 } 1536 ubsecstats.hst_obytes += ((struct mbuf *)crp->crp_buf)->m_len; 1537 1538 /* copy out IV for future use */ 1539 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1540 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1541 if (crd->crd_alg != CRYPTO_DES_CBC && 1542 crd->crd_alg != CRYPTO_3DES_CBC) 1543 continue; 1544 if (crp->crp_flags & CRYPTO_F_IMBUF) 1545 m_copydata((struct mbuf *)crp->crp_buf, 1546 crd->crd_skip + crd->crd_len - 8, 8, 1547 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1548 else if (crp->crp_flags & CRYPTO_F_IOV) { 1549 cuio_copydata((struct uio *)crp->crp_buf, 1550 crd->crd_skip + crd->crd_len - 8, 8, 1551 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1552 } 1553 break; 1554 } 1555 } 1556 1557 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1558 if (crd->crd_alg != CRYPTO_MD5_HMAC && 1559 crd->crd_alg != CRYPTO_SHA1_HMAC) 1560 continue; 1561 if (crp->crp_flags & CRYPTO_F_IMBUF) 1562 m_copyback((struct mbuf *)crp->crp_buf, 1563 crd->crd_inject, 12, 1564 (caddr_t)dmap->d_dma->d_macbuf); 1565 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1566 bcopy((caddr_t)dmap->d_dma->d_macbuf, 1567 crp->crp_mac, 12); 1568 break; 1569 } 1570 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1571 crypto_done(crp); 1572} 1573 1574static void 1575ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1576{ 1577 int i, j, dlen, slen; 1578 caddr_t dptr, sptr; 1579 1580 j = 0; 1581 sptr = srcm->m_data; 1582 slen = srcm->m_len; 1583 dptr = dstm->m_data; 1584 dlen = dstm->m_len; 1585 1586 while (1) { 1587 for (i = 0; i < min(slen, dlen); i++) { 1588 if (j < hoffset || j >= toffset) 1589 *dptr++ = *sptr++; 1590 slen--; 1591 dlen--; 1592 j++; 1593 } 1594 if (slen == 0) { 1595 srcm = srcm->m_next; 1596 if (srcm == NULL) 1597 return; 1598 sptr = srcm->m_data; 1599 slen = srcm->m_len; 1600 } 1601 if (dlen == 0) { 1602 dstm = dstm->m_next; 1603 if (dstm == NULL) 1604 return; 1605 dptr = dstm->m_data; 1606 dlen = dstm->m_len; 1607 } 1608 } 1609} 1610 1611/* 1612 * feed the key generator, must be called at splimp() or higher. 1613 */ 1614static int 1615ubsec_feed2(struct ubsec_softc *sc) 1616{ 1617 struct ubsec_q2 *q; 1618 1619 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1620 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1621 break; 1622 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1623 1624 ubsec_dma_sync(&q->q_mcr, 1625 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1626 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); 1627 1628 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1629 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q, q_next); 1630 --sc->sc_nqueue2; 1631 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1632 } 1633 return (0); 1634} 1635 1636/* 1637 * Callback for handling random numbers 1638 */ 1639static void 1640ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1641{ 1642 struct cryptkop *krp; 1643 struct ubsec_ctx_keyop *ctx; 1644 1645 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1646 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); 1647 1648 switch (q->q_type) { 1649#ifndef UBSEC_NO_RNG 1650 case UBS_CTXOP_RNGBYPASS: { 1651 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1652 1653 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); 1654 random_harvest(rng->rng_buf.dma_vaddr, 1655 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t), 1656 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)*NBBY, 0, 1657 RANDOM_PURE); 1658 rng->rng_used = 0; 1659 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1660 break; 1661 } 1662#endif 1663 case UBS_CTXOP_MODEXP: { 1664 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1665 u_int rlen, clen; 1666 1667 krp = me->me_krp; 1668 rlen = (me->me_modbits + 7) / 8; 1669 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1670 1671 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); 1672 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); 1673 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); 1674 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); 1675 1676 if (clen < rlen) 1677 krp->krp_status = E2BIG; 1678 else { 1679 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1680 bzero(krp->krp_param[krp->krp_iparams].crp_p, 1681 (krp->krp_param[krp->krp_iparams].crp_nbits 1682 + 7) / 8); 1683 bcopy(me->me_C.dma_vaddr, 1684 krp->krp_param[krp->krp_iparams].crp_p, 1685 (me->me_modbits + 7) / 8); 1686 } else 1687 ubsec_kshift_l(me->me_shiftbits, 1688 me->me_C.dma_vaddr, me->me_normbits, 1689 krp->krp_param[krp->krp_iparams].crp_p, 1690 krp->krp_param[krp->krp_iparams].crp_nbits); 1691 } 1692 1693 crypto_kdone(krp); 1694 1695 /* bzero all potentially sensitive data */ 1696 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 1697 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 1698 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 1699 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 1700 1701 /* Can't free here, so put us on the free list. */ 1702 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1703 break; 1704 } 1705 case UBS_CTXOP_RSAPRIV: { 1706 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1707 u_int len; 1708 1709 krp = rp->rpr_krp; 1710 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); 1711 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); 1712 1713 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1714 bcopy(rp->rpr_msgout.dma_vaddr, 1715 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1716 1717 crypto_kdone(krp); 1718 1719 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 1720 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 1721 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); 1722 1723 /* Can't free here, so put us on the free list. */ 1724 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1725 break; 1726 } 1727 default: 1728 device_printf(sc->sc_dev, "unknown ctx op: %x\n", 1729 letoh16(ctx->ctx_op)); 1730 break; 1731 } 1732} 1733 1734#ifndef UBSEC_NO_RNG 1735static void 1736ubsec_rng(void *vsc) 1737{ 1738 struct ubsec_softc *sc = vsc; 1739 struct ubsec_q2_rng *rng = &sc->sc_rng; 1740 struct ubsec_mcr *mcr; 1741 struct ubsec_ctx_rngbypass *ctx; 1742 1743 UBSEC_LOCK(sc); 1744 if (rng->rng_used) { 1745 UBSEC_UNLOCK(sc); 1746 return; 1747 } 1748 sc->sc_nqueue2++; 1749 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1750 goto out; 1751 1752 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1753 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1754 1755 mcr->mcr_pkts = htole16(1); 1756 mcr->mcr_flags = 0; 1757 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1758 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1759 mcr->mcr_ipktbuf.pb_len = 0; 1760 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1761 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1762 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1763 UBS_PKTBUF_LEN); 1764 mcr->mcr_opktbuf.pb_next = 0; 1765 1766 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1767 ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); 1768 rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; 1769 1770 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); 1771 1772 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1773 rng->rng_used = 1; 1774 ubsec_feed2(sc); 1775 ubsecstats.hst_rng++; 1776 UBSEC_UNLOCK(sc); 1777 1778 return; 1779 1780out: 1781 /* 1782 * Something weird happened, generate our own call back. 1783 */ 1784 sc->sc_nqueue2--; 1785 UBSEC_UNLOCK(sc); 1786 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1787} 1788#endif /* UBSEC_NO_RNG */ 1789 1790static void 1791ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1792{ 1793 bus_addr_t *paddr = (bus_addr_t*) arg; 1794 *paddr = segs->ds_addr; 1795} 1796 1797static int 1798ubsec_dma_malloc( 1799 struct ubsec_softc *sc, 1800 bus_size_t size, 1801 struct ubsec_dma_alloc *dma, 1802 int mapflags 1803) 1804{ 1805 int r; 1806 1807 /* XXX could specify sc_dmat as parent but that just adds overhead */ 1808 r = bus_dma_tag_create(NULL, /* parent */ 1809 1, 0, /* alignment, bounds */ 1810 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1811 BUS_SPACE_MAXADDR, /* highaddr */ 1812 NULL, NULL, /* filter, filterarg */ 1813 size, /* maxsize */ 1814 1, /* nsegments */ 1815 size, /* maxsegsize */ 1816 BUS_DMA_ALLOCNOW, /* flags */ 1817 &dma->dma_tag); 1818 if (r != 0) { 1819 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1820 "bus_dma_tag_create failed; error %u\n", r); 1821 goto fail_0; 1822 } 1823 1824 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); 1825 if (r != 0) { 1826 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1827 "bus_dmamap_create failed; error %u\n", r); 1828 goto fail_1; 1829 } 1830 1831 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1832 BUS_DMA_NOWAIT, &dma->dma_map); 1833 if (r != 0) { 1834 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1835 "bus_dmammem_alloc failed; size %zu, error %u\n", 1836 size, r); 1837 goto fail_2; 1838 } 1839 1840 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1841 size, 1842 ubsec_dmamap_cb, 1843 &dma->dma_paddr, 1844 mapflags | BUS_DMA_NOWAIT); 1845 if (r != 0) { 1846 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1847 "bus_dmamap_load failed; error %u\n", r); 1848 goto fail_3; 1849 } 1850 1851 dma->dma_size = size; 1852 return (0); 1853 1854fail_3: 1855 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1856fail_2: 1857 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1858fail_1: 1859 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1860 bus_dma_tag_destroy(dma->dma_tag); 1861fail_0: 1862 dma->dma_map = NULL; 1863 dma->dma_tag = NULL; 1864 return (r); 1865} 1866 1867static void 1868ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1869{ 1870 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1871 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1872 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1873 bus_dma_tag_destroy(dma->dma_tag); 1874} 1875 1876/* 1877 * Resets the board. Values in the regesters are left as is 1878 * from the reset (i.e. initial values are assigned elsewhere). 1879 */ 1880static void 1881ubsec_reset_board(struct ubsec_softc *sc) 1882{ 1883 volatile u_int32_t ctrl; 1884 1885 ctrl = READ_REG(sc, BS_CTRL); 1886 ctrl |= BS_CTRL_RESET; 1887 WRITE_REG(sc, BS_CTRL, ctrl); 1888 1889 /* 1890 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1891 */ 1892 DELAY(10); 1893} 1894 1895/* 1896 * Init Broadcom registers 1897 */ 1898static void 1899ubsec_init_board(struct ubsec_softc *sc) 1900{ 1901 u_int32_t ctrl; 1902 1903 ctrl = READ_REG(sc, BS_CTRL); 1904 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1905 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1906 1907 if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) 1908 ctrl |= BS_CTRL_MCR2INT; 1909 else 1910 ctrl &= ~BS_CTRL_MCR2INT; 1911 1912 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1913 ctrl &= ~BS_CTRL_SWNORM; 1914 1915 WRITE_REG(sc, BS_CTRL, ctrl); 1916} 1917 1918/* 1919 * Init Broadcom PCI registers 1920 */ 1921static void 1922ubsec_init_pciregs(device_t dev) 1923{ 1924#if 0 1925 u_int32_t misc; 1926 1927 misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); 1928 misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) 1929 | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); 1930 misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) 1931 | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); 1932 pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); 1933#endif 1934 1935 /* 1936 * This will set the cache line size to 1, this will 1937 * force the BCM58xx chip just to do burst read/writes. 1938 * Cache line read/writes are to slow 1939 */ 1940 pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); 1941} 1942 1943/* 1944 * Clean up after a chip crash. 1945 * It is assumed that the caller in splimp() 1946 */ 1947static void 1948ubsec_cleanchip(struct ubsec_softc *sc) 1949{ 1950 struct ubsec_q *q; 1951 1952 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1953 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1954 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); 1955 ubsec_free_q(sc, q); 1956 } 1957 sc->sc_nqchip = 0; 1958} 1959 1960/* 1961 * free a ubsec_q 1962 * It is assumed that the caller is within splimp(). 1963 */ 1964static int 1965ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1966{ 1967 struct ubsec_q *q2; 1968 struct cryptop *crp; 1969 int npkts; 1970 int i; 1971 1972 npkts = q->q_nstacked_mcrs; 1973 1974 for (i = 0; i < npkts; i++) { 1975 if(q->q_stacked_mcr[i]) { 1976 q2 = q->q_stacked_mcr[i]; 1977 1978 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 1979 m_freem(q2->q_dst_m); 1980 1981 crp = (struct cryptop *)q2->q_crp; 1982 1983 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 1984 1985 crp->crp_etype = EFAULT; 1986 crypto_done(crp); 1987 } else { 1988 break; 1989 } 1990 } 1991 1992 /* 1993 * Free header MCR 1994 */ 1995 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1996 m_freem(q->q_dst_m); 1997 1998 crp = (struct cryptop *)q->q_crp; 1999 2000 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 2001 2002 crp->crp_etype = EFAULT; 2003 crypto_done(crp); 2004 return(0); 2005} 2006 2007/* 2008 * Routine to reset the chip and clean up. 2009 * It is assumed that the caller is in splimp() 2010 */ 2011static void 2012ubsec_totalreset(struct ubsec_softc *sc) 2013{ 2014 ubsec_reset_board(sc); 2015 ubsec_init_board(sc); 2016 ubsec_cleanchip(sc); 2017} 2018 2019static int 2020ubsec_dmamap_aligned(struct ubsec_operand *op) 2021{ 2022 int i; 2023 2024 for (i = 0; i < op->nsegs; i++) { 2025 if (op->segs[i].ds_addr & 3) 2026 return (0); 2027 if ((i != (op->nsegs - 1)) && 2028 (op->segs[i].ds_len & 3)) 2029 return (0); 2030 } 2031 return (1); 2032} 2033 2034static void 2035ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2036{ 2037 switch (q->q_type) { 2038 case UBS_CTXOP_MODEXP: { 2039 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2040 2041 ubsec_dma_free(sc, &me->me_q.q_mcr); 2042 ubsec_dma_free(sc, &me->me_q.q_ctx); 2043 ubsec_dma_free(sc, &me->me_M); 2044 ubsec_dma_free(sc, &me->me_E); 2045 ubsec_dma_free(sc, &me->me_C); 2046 ubsec_dma_free(sc, &me->me_epb); 2047 free(me, M_DEVBUF); 2048 break; 2049 } 2050 case UBS_CTXOP_RSAPRIV: { 2051 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2052 2053 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2054 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2055 ubsec_dma_free(sc, &rp->rpr_msgin); 2056 ubsec_dma_free(sc, &rp->rpr_msgout); 2057 free(rp, M_DEVBUF); 2058 break; 2059 } 2060 default: 2061 device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); 2062 break; 2063 } 2064} 2065 2066static int 2067ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2068{ 2069 struct ubsec_softc *sc = arg; 2070 int r; 2071 2072 if (krp == NULL || krp->krp_callback == NULL) 2073 return (EINVAL); 2074 2075 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2076 struct ubsec_q2 *q; 2077 2078 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2079 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q, q_next); 2080 ubsec_kfree(sc, q); 2081 } 2082 2083 switch (krp->krp_op) { 2084 case CRK_MOD_EXP: 2085 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2086 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2087 else 2088 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2089 break; 2090 case CRK_MOD_EXP_CRT: 2091 return (ubsec_kprocess_rsapriv(sc, krp, hint)); 2092 default: 2093 device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", 2094 krp->krp_op); 2095 krp->krp_status = EOPNOTSUPP; 2096 crypto_kdone(krp); 2097 return (0); 2098 } 2099 return (0); /* silence compiler */ 2100} 2101 2102/* 2103 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2104 */ 2105static int 2106ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2107{ 2108 struct ubsec_q2_modexp *me; 2109 struct ubsec_mcr *mcr; 2110 struct ubsec_ctx_modexp *ctx; 2111 struct ubsec_pktbuf *epb; 2112 int err = 0; 2113 u_int nbits, normbits, mbits, shiftbits, ebits; 2114 2115 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2116 if (me == NULL) { 2117 err = ENOMEM; 2118 goto errout; 2119 } 2120 bzero(me, sizeof *me); 2121 me->me_krp = krp; 2122 me->me_q.q_type = UBS_CTXOP_MODEXP; 2123 2124 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2125 if (nbits <= 512) 2126 normbits = 512; 2127 else if (nbits <= 768) 2128 normbits = 768; 2129 else if (nbits <= 1024) 2130 normbits = 1024; 2131 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2132 normbits = 1536; 2133 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2134 normbits = 2048; 2135 else { 2136 err = E2BIG; 2137 goto errout; 2138 } 2139 2140 shiftbits = normbits - nbits; 2141 2142 me->me_modbits = nbits; 2143 me->me_shiftbits = shiftbits; 2144 me->me_normbits = normbits; 2145 2146 /* Sanity check: result bits must be >= true modulus bits. */ 2147 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2148 err = ERANGE; 2149 goto errout; 2150 } 2151 2152 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2153 &me->me_q.q_mcr, 0)) { 2154 err = ENOMEM; 2155 goto errout; 2156 } 2157 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2158 2159 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2160 &me->me_q.q_ctx, 0)) { 2161 err = ENOMEM; 2162 goto errout; 2163 } 2164 2165 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2166 if (mbits > nbits) { 2167 err = E2BIG; 2168 goto errout; 2169 } 2170 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2171 err = ENOMEM; 2172 goto errout; 2173 } 2174 ubsec_kshift_r(shiftbits, 2175 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2176 me->me_M.dma_vaddr, normbits); 2177 2178 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2179 err = ENOMEM; 2180 goto errout; 2181 } 2182 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2183 2184 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2185 if (ebits > nbits) { 2186 err = E2BIG; 2187 goto errout; 2188 } 2189 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2190 err = ENOMEM; 2191 goto errout; 2192 } 2193 ubsec_kshift_r(shiftbits, 2194 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2195 me->me_E.dma_vaddr, normbits); 2196 2197 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2198 &me->me_epb, 0)) { 2199 err = ENOMEM; 2200 goto errout; 2201 } 2202 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2203 epb->pb_addr = htole32(me->me_E.dma_paddr); 2204 epb->pb_next = 0; 2205 epb->pb_len = htole32(normbits / 8); 2206 2207#ifdef UBSEC_DEBUG 2208 if (ubsec_debug) { 2209 printf("Epb "); 2210 ubsec_dump_pb(epb); 2211 } 2212#endif 2213 2214 mcr->mcr_pkts = htole16(1); 2215 mcr->mcr_flags = 0; 2216 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2217 mcr->mcr_reserved = 0; 2218 mcr->mcr_pktlen = 0; 2219 2220 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2221 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2222 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2223 2224 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2225 mcr->mcr_opktbuf.pb_next = 0; 2226 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2227 2228#ifdef DIAGNOSTIC 2229 /* Misaligned output buffer will hang the chip. */ 2230 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2231 panic("%s: modexp invalid addr 0x%x\n", 2232 device_get_nameunit(sc->sc_dev), 2233 letoh32(mcr->mcr_opktbuf.pb_addr)); 2234 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2235 panic("%s: modexp invalid len 0x%x\n", 2236 device_get_nameunit(sc->sc_dev), 2237 letoh32(mcr->mcr_opktbuf.pb_len)); 2238#endif 2239 2240 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2241 bzero(ctx, sizeof(*ctx)); 2242 ubsec_kshift_r(shiftbits, 2243 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2244 ctx->me_N, normbits); 2245 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2246 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2247 ctx->me_E_len = htole16(nbits); 2248 ctx->me_N_len = htole16(nbits); 2249 2250#ifdef UBSEC_DEBUG 2251 if (ubsec_debug) { 2252 ubsec_dump_mcr(mcr); 2253 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2254 } 2255#endif 2256 2257 /* 2258 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2259 * everything else. 2260 */ 2261 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2262 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2263 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2264 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2265 2266 /* Enqueue and we're done... */ 2267 UBSEC_LOCK(sc); 2268 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2269 ubsec_feed2(sc); 2270 ubsecstats.hst_modexp++; 2271 UBSEC_UNLOCK(sc); 2272 2273 return (0); 2274 2275errout: 2276 if (me != NULL) { 2277 if (me->me_q.q_mcr.dma_map != NULL) 2278 ubsec_dma_free(sc, &me->me_q.q_mcr); 2279 if (me->me_q.q_ctx.dma_map != NULL) { 2280 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2281 ubsec_dma_free(sc, &me->me_q.q_ctx); 2282 } 2283 if (me->me_M.dma_map != NULL) { 2284 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2285 ubsec_dma_free(sc, &me->me_M); 2286 } 2287 if (me->me_E.dma_map != NULL) { 2288 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2289 ubsec_dma_free(sc, &me->me_E); 2290 } 2291 if (me->me_C.dma_map != NULL) { 2292 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2293 ubsec_dma_free(sc, &me->me_C); 2294 } 2295 if (me->me_epb.dma_map != NULL) 2296 ubsec_dma_free(sc, &me->me_epb); 2297 free(me, M_DEVBUF); 2298 } 2299 krp->krp_status = err; 2300 crypto_kdone(krp); 2301 return (0); 2302} 2303 2304/* 2305 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2306 */ 2307static int 2308ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2309{ 2310 struct ubsec_q2_modexp *me; 2311 struct ubsec_mcr *mcr; 2312 struct ubsec_ctx_modexp *ctx; 2313 struct ubsec_pktbuf *epb; 2314 int err = 0; 2315 u_int nbits, normbits, mbits, shiftbits, ebits; 2316 2317 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2318 if (me == NULL) { 2319 err = ENOMEM; 2320 goto errout; 2321 } 2322 bzero(me, sizeof *me); 2323 me->me_krp = krp; 2324 me->me_q.q_type = UBS_CTXOP_MODEXP; 2325 2326 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2327 if (nbits <= 512) 2328 normbits = 512; 2329 else if (nbits <= 768) 2330 normbits = 768; 2331 else if (nbits <= 1024) 2332 normbits = 1024; 2333 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2334 normbits = 1536; 2335 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2336 normbits = 2048; 2337 else { 2338 err = E2BIG; 2339 goto errout; 2340 } 2341 2342 shiftbits = normbits - nbits; 2343 2344 /* XXX ??? */ 2345 me->me_modbits = nbits; 2346 me->me_shiftbits = shiftbits; 2347 me->me_normbits = normbits; 2348 2349 /* Sanity check: result bits must be >= true modulus bits. */ 2350 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2351 err = ERANGE; 2352 goto errout; 2353 } 2354 2355 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2356 &me->me_q.q_mcr, 0)) { 2357 err = ENOMEM; 2358 goto errout; 2359 } 2360 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2361 2362 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2363 &me->me_q.q_ctx, 0)) { 2364 err = ENOMEM; 2365 goto errout; 2366 } 2367 2368 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2369 if (mbits > nbits) { 2370 err = E2BIG; 2371 goto errout; 2372 } 2373 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2374 err = ENOMEM; 2375 goto errout; 2376 } 2377 bzero(me->me_M.dma_vaddr, normbits / 8); 2378 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2379 me->me_M.dma_vaddr, (mbits + 7) / 8); 2380 2381 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2382 err = ENOMEM; 2383 goto errout; 2384 } 2385 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2386 2387 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2388 if (ebits > nbits) { 2389 err = E2BIG; 2390 goto errout; 2391 } 2392 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2393 err = ENOMEM; 2394 goto errout; 2395 } 2396 bzero(me->me_E.dma_vaddr, normbits / 8); 2397 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2398 me->me_E.dma_vaddr, (ebits + 7) / 8); 2399 2400 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2401 &me->me_epb, 0)) { 2402 err = ENOMEM; 2403 goto errout; 2404 } 2405 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2406 epb->pb_addr = htole32(me->me_E.dma_paddr); 2407 epb->pb_next = 0; 2408 epb->pb_len = htole32((ebits + 7) / 8); 2409 2410#ifdef UBSEC_DEBUG 2411 if (ubsec_debug) { 2412 printf("Epb "); 2413 ubsec_dump_pb(epb); 2414 } 2415#endif 2416 2417 mcr->mcr_pkts = htole16(1); 2418 mcr->mcr_flags = 0; 2419 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2420 mcr->mcr_reserved = 0; 2421 mcr->mcr_pktlen = 0; 2422 2423 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2424 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2425 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2426 2427 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2428 mcr->mcr_opktbuf.pb_next = 0; 2429 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2430 2431#ifdef DIAGNOSTIC 2432 /* Misaligned output buffer will hang the chip. */ 2433 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2434 panic("%s: modexp invalid addr 0x%x\n", 2435 device_get_nameunit(sc->sc_dev), 2436 letoh32(mcr->mcr_opktbuf.pb_addr)); 2437 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2438 panic("%s: modexp invalid len 0x%x\n", 2439 device_get_nameunit(sc->sc_dev), 2440 letoh32(mcr->mcr_opktbuf.pb_len)); 2441#endif 2442 2443 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2444 bzero(ctx, sizeof(*ctx)); 2445 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, 2446 (nbits + 7) / 8); 2447 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2448 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2449 ctx->me_E_len = htole16(ebits); 2450 ctx->me_N_len = htole16(nbits); 2451 2452#ifdef UBSEC_DEBUG 2453 if (ubsec_debug) { 2454 ubsec_dump_mcr(mcr); 2455 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2456 } 2457#endif 2458 2459 /* 2460 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2461 * everything else. 2462 */ 2463 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2464 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2465 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2466 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2467 2468 /* Enqueue and we're done... */ 2469 UBSEC_LOCK(sc); 2470 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2471 ubsec_feed2(sc); 2472 UBSEC_UNLOCK(sc); 2473 2474 return (0); 2475 2476errout: 2477 if (me != NULL) { 2478 if (me->me_q.q_mcr.dma_map != NULL) 2479 ubsec_dma_free(sc, &me->me_q.q_mcr); 2480 if (me->me_q.q_ctx.dma_map != NULL) { 2481 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2482 ubsec_dma_free(sc, &me->me_q.q_ctx); 2483 } 2484 if (me->me_M.dma_map != NULL) { 2485 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2486 ubsec_dma_free(sc, &me->me_M); 2487 } 2488 if (me->me_E.dma_map != NULL) { 2489 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2490 ubsec_dma_free(sc, &me->me_E); 2491 } 2492 if (me->me_C.dma_map != NULL) { 2493 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2494 ubsec_dma_free(sc, &me->me_C); 2495 } 2496 if (me->me_epb.dma_map != NULL) 2497 ubsec_dma_free(sc, &me->me_epb); 2498 free(me, M_DEVBUF); 2499 } 2500 krp->krp_status = err; 2501 crypto_kdone(krp); 2502 return (0); 2503} 2504 2505static int 2506ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2507{ 2508 struct ubsec_q2_rsapriv *rp = NULL; 2509 struct ubsec_mcr *mcr; 2510 struct ubsec_ctx_rsapriv *ctx; 2511 int err = 0; 2512 u_int padlen, msglen; 2513 2514 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2515 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2516 if (msglen > padlen) 2517 padlen = msglen; 2518 2519 if (padlen <= 256) 2520 padlen = 256; 2521 else if (padlen <= 384) 2522 padlen = 384; 2523 else if (padlen <= 512) 2524 padlen = 512; 2525 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2526 padlen = 768; 2527 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2528 padlen = 1024; 2529 else { 2530 err = E2BIG; 2531 goto errout; 2532 } 2533 2534 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2535 err = E2BIG; 2536 goto errout; 2537 } 2538 2539 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2540 err = E2BIG; 2541 goto errout; 2542 } 2543 2544 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2545 err = E2BIG; 2546 goto errout; 2547 } 2548 2549 rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); 2550 if (rp == NULL) 2551 return (ENOMEM); 2552 bzero(rp, sizeof *rp); 2553 rp->rpr_krp = krp; 2554 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2555 2556 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2557 &rp->rpr_q.q_mcr, 0)) { 2558 err = ENOMEM; 2559 goto errout; 2560 } 2561 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2562 2563 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2564 &rp->rpr_q.q_ctx, 0)) { 2565 err = ENOMEM; 2566 goto errout; 2567 } 2568 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2569 bzero(ctx, sizeof *ctx); 2570 2571 /* Copy in p */ 2572 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2573 &ctx->rpr_buf[0 * (padlen / 8)], 2574 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2575 2576 /* Copy in q */ 2577 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2578 &ctx->rpr_buf[1 * (padlen / 8)], 2579 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2580 2581 /* Copy in dp */ 2582 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2583 &ctx->rpr_buf[2 * (padlen / 8)], 2584 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2585 2586 /* Copy in dq */ 2587 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2588 &ctx->rpr_buf[3 * (padlen / 8)], 2589 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2590 2591 /* Copy in pinv */ 2592 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2593 &ctx->rpr_buf[4 * (padlen / 8)], 2594 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2595 2596 msglen = padlen * 2; 2597 2598 /* Copy in input message (aligned buffer/length). */ 2599 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2600 /* Is this likely? */ 2601 err = E2BIG; 2602 goto errout; 2603 } 2604 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2605 err = ENOMEM; 2606 goto errout; 2607 } 2608 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); 2609 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2610 rp->rpr_msgin.dma_vaddr, 2611 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2612 2613 /* Prepare space for output message (aligned buffer/length). */ 2614 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2615 /* Is this likely? */ 2616 err = E2BIG; 2617 goto errout; 2618 } 2619 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2620 err = ENOMEM; 2621 goto errout; 2622 } 2623 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); 2624 2625 mcr->mcr_pkts = htole16(1); 2626 mcr->mcr_flags = 0; 2627 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2628 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2629 mcr->mcr_ipktbuf.pb_next = 0; 2630 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2631 mcr->mcr_reserved = 0; 2632 mcr->mcr_pktlen = htole16(msglen); 2633 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2634 mcr->mcr_opktbuf.pb_next = 0; 2635 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2636 2637#ifdef DIAGNOSTIC 2638 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2639 panic("%s: rsapriv: invalid msgin %x(0x%jx)", 2640 device_get_nameunit(sc->sc_dev), 2641 rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size); 2642 } 2643 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2644 panic("%s: rsapriv: invalid msgout %x(0x%jx)", 2645 device_get_nameunit(sc->sc_dev), 2646 rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size); 2647 } 2648#endif 2649 2650 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2651 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2652 ctx->rpr_q_len = htole16(padlen); 2653 ctx->rpr_p_len = htole16(padlen); 2654 2655 /* 2656 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2657 * everything else. 2658 */ 2659 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); 2660 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); 2661 2662 /* Enqueue and we're done... */ 2663 UBSEC_LOCK(sc); 2664 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2665 ubsec_feed2(sc); 2666 ubsecstats.hst_modexpcrt++; 2667 UBSEC_UNLOCK(sc); 2668 return (0); 2669 2670errout: 2671 if (rp != NULL) { 2672 if (rp->rpr_q.q_mcr.dma_map != NULL) 2673 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2674 if (rp->rpr_msgin.dma_map != NULL) { 2675 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 2676 ubsec_dma_free(sc, &rp->rpr_msgin); 2677 } 2678 if (rp->rpr_msgout.dma_map != NULL) { 2679 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 2680 ubsec_dma_free(sc, &rp->rpr_msgout); 2681 } 2682 free(rp, M_DEVBUF); 2683 } 2684 krp->krp_status = err; 2685 crypto_kdone(krp); 2686 return (0); 2687} 2688 2689#ifdef UBSEC_DEBUG 2690static void 2691ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2692{ 2693 printf("addr 0x%x (0x%x) next 0x%x\n", 2694 pb->pb_addr, pb->pb_len, pb->pb_next); 2695} 2696 2697static void 2698ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) 2699{ 2700 printf("CTX (0x%x):\n", c->ctx_len); 2701 switch (letoh16(c->ctx_op)) { 2702 case UBS_CTXOP_RNGBYPASS: 2703 case UBS_CTXOP_RNGSHA1: 2704 break; 2705 case UBS_CTXOP_MODEXP: 2706 { 2707 struct ubsec_ctx_modexp *cx = (void *)c; 2708 int i, len; 2709 2710 printf(" Elen %u, Nlen %u\n", 2711 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2712 len = (cx->me_N_len + 7)/8; 2713 for (i = 0; i < len; i++) 2714 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2715 printf("\n"); 2716 break; 2717 } 2718 default: 2719 printf("unknown context: %x\n", c->ctx_op); 2720 } 2721 printf("END CTX\n"); 2722} 2723 2724static void 2725ubsec_dump_mcr(struct ubsec_mcr *mcr) 2726{ 2727 volatile struct ubsec_mcr_add *ma; 2728 int i; 2729 2730 printf("MCR:\n"); 2731 printf(" pkts: %u, flags 0x%x\n", 2732 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2733 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2734 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2735 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2736 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2737 letoh16(ma->mcr_reserved)); 2738 printf(" %d: ipkt ", i); 2739 ubsec_dump_pb(&ma->mcr_ipktbuf); 2740 printf(" %d: opkt ", i); 2741 ubsec_dump_pb(&ma->mcr_opktbuf); 2742 ma++; 2743 } 2744 printf("END MCR\n"); 2745} 2746#endif /* UBSEC_DEBUG */ 2747 2748/* 2749 * Return the number of significant bits of a big number. 2750 */ 2751static int 2752ubsec_ksigbits(struct crparam *cr) 2753{ 2754 u_int plen = (cr->crp_nbits + 7) / 8; 2755 int i, sig = plen * 8; 2756 u_int8_t c, *p = cr->crp_p; 2757 2758 for (i = plen - 1; i >= 0; i--) { 2759 c = p[i]; 2760 if (c != 0) { 2761 while ((c & 0x80) == 0) { 2762 sig--; 2763 c <<= 1; 2764 } 2765 break; 2766 } 2767 sig -= 8; 2768 } 2769 return (sig); 2770} 2771 2772static void 2773ubsec_kshift_r( 2774 u_int shiftbits, 2775 u_int8_t *src, u_int srcbits, 2776 u_int8_t *dst, u_int dstbits) 2777{ 2778 u_int slen, dlen; 2779 int i, si, di, n; 2780 2781 slen = (srcbits + 7) / 8; 2782 dlen = (dstbits + 7) / 8; 2783 2784 for (i = 0; i < slen; i++) 2785 dst[i] = src[i]; 2786 for (i = 0; i < dlen - slen; i++) 2787 dst[slen + i] = 0; 2788 2789 n = shiftbits / 8; 2790 if (n != 0) { 2791 si = dlen - n - 1; 2792 di = dlen - 1; 2793 while (si >= 0) 2794 dst[di--] = dst[si--]; 2795 while (di >= 0) 2796 dst[di--] = 0; 2797 } 2798 2799 n = shiftbits % 8; 2800 if (n != 0) { 2801 for (i = dlen - 1; i > 0; i--) 2802 dst[i] = (dst[i] << n) | 2803 (dst[i - 1] >> (8 - n)); 2804 dst[0] = dst[0] << n; 2805 } 2806} 2807 2808static void 2809ubsec_kshift_l( 2810 u_int shiftbits, 2811 u_int8_t *src, u_int srcbits, 2812 u_int8_t *dst, u_int dstbits) 2813{ 2814 int slen, dlen, i, n; 2815 2816 slen = (srcbits + 7) / 8; 2817 dlen = (dstbits + 7) / 8; 2818 2819 n = shiftbits / 8; 2820 for (i = 0; i < slen; i++) 2821 dst[i] = src[i + n]; 2822 for (i = 0; i < dlen - slen; i++) 2823 dst[slen + i] = 0; 2824 2825 n = shiftbits % 8; 2826 if (n != 0) { 2827 for (i = 0; i < (dlen - 1); i++) 2828 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2829 dst[dlen - 1] = dst[dlen - 1] >> n; 2830 } 2831} 2832