ubsec.c revision 194023
1/*	$OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $	*/
2
3/*-
4 * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
5 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
6 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by Jason L. Wright
21 * 4. The name of the author may not be used to endorse or promote products
22 *    derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
32 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Effort sponsored in part by the Defense Advanced Research Projects
37 * Agency (DARPA) and Air Force Research Laboratory, Air Force
38 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/dev/ubsec/ubsec.c 194023 2009-06-11 17:14:28Z avg $");
43
44/*
45 * uBsec 5[56]01, 58xx hardware crypto accelerator
46 */
47
48#include "opt_ubsec.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/proc.h>
53#include <sys/errno.h>
54#include <sys/malloc.h>
55#include <sys/kernel.h>
56#include <sys/module.h>
57#include <sys/mbuf.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/sysctl.h>
61#include <sys/endian.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <crypto/sha1.h>
72#include <opencrypto/cryptodev.h>
73#include <opencrypto/cryptosoft.h>
74#include <sys/md5.h>
75#include <sys/random.h>
76#include <sys/kobj.h>
77
78#include "cryptodev_if.h"
79
80#include <dev/pci/pcivar.h>
81#include <dev/pci/pcireg.h>
82
83/* grr, #defines for gratuitous incompatibility in queue.h */
84#define	SIMPLEQ_HEAD		STAILQ_HEAD
85#define	SIMPLEQ_ENTRY		STAILQ_ENTRY
86#define	SIMPLEQ_INIT		STAILQ_INIT
87#define	SIMPLEQ_INSERT_TAIL	STAILQ_INSERT_TAIL
88#define	SIMPLEQ_EMPTY		STAILQ_EMPTY
89#define	SIMPLEQ_FIRST		STAILQ_FIRST
90#define	SIMPLEQ_REMOVE_HEAD	STAILQ_REMOVE_HEAD
91#define	SIMPLEQ_FOREACH		STAILQ_FOREACH
92/* ditto for endian.h */
93#define	letoh16(x)		le16toh(x)
94#define	letoh32(x)		le32toh(x)
95
96#ifdef UBSEC_RNDTEST
97#include <dev/rndtest/rndtest.h>
98#endif
99#include <dev/ubsec/ubsecreg.h>
100#include <dev/ubsec/ubsecvar.h>
101
102/*
103 * Prototypes and count for the pci_device structure
104 */
105static	int ubsec_probe(device_t);
106static	int ubsec_attach(device_t);
107static	int ubsec_detach(device_t);
108static	int ubsec_suspend(device_t);
109static	int ubsec_resume(device_t);
110static	int ubsec_shutdown(device_t);
111
112static	int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
113static	int ubsec_freesession(device_t, u_int64_t);
114static	int ubsec_process(device_t, struct cryptop *, int);
115static	int ubsec_kprocess(device_t, struct cryptkop *, int);
116
117static device_method_t ubsec_methods[] = {
118	/* Device interface */
119	DEVMETHOD(device_probe,		ubsec_probe),
120	DEVMETHOD(device_attach,	ubsec_attach),
121	DEVMETHOD(device_detach,	ubsec_detach),
122	DEVMETHOD(device_suspend,	ubsec_suspend),
123	DEVMETHOD(device_resume,	ubsec_resume),
124	DEVMETHOD(device_shutdown,	ubsec_shutdown),
125
126	/* bus interface */
127	DEVMETHOD(bus_print_child,	bus_generic_print_child),
128	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
129
130	/* crypto device methods */
131	DEVMETHOD(cryptodev_newsession,	ubsec_newsession),
132	DEVMETHOD(cryptodev_freesession,ubsec_freesession),
133	DEVMETHOD(cryptodev_process,	ubsec_process),
134	DEVMETHOD(cryptodev_kprocess,	ubsec_kprocess),
135
136	{ 0, 0 }
137};
138static driver_t ubsec_driver = {
139	"ubsec",
140	ubsec_methods,
141	sizeof (struct ubsec_softc)
142};
143static devclass_t ubsec_devclass;
144
145DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0);
146MODULE_DEPEND(ubsec, crypto, 1, 1, 1);
147#ifdef UBSEC_RNDTEST
148MODULE_DEPEND(ubsec, rndtest, 1, 1, 1);
149#endif
150
151static	void ubsec_intr(void *);
152static	void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
153static	void ubsec_feed(struct ubsec_softc *);
154static	void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int);
155static	void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *);
156static	int ubsec_feed2(struct ubsec_softc *);
157static	void ubsec_rng(void *);
158static	int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t,
159			     struct ubsec_dma_alloc *, int);
160#define	ubsec_dma_sync(_dma, _flags) \
161	bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
162static	void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
163static	int ubsec_dmamap_aligned(struct ubsec_operand *op);
164
165static	void ubsec_reset_board(struct ubsec_softc *sc);
166static	void ubsec_init_board(struct ubsec_softc *sc);
167static	void ubsec_init_pciregs(device_t dev);
168static	void ubsec_totalreset(struct ubsec_softc *sc);
169
170static	int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q);
171
172static	int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int);
173static	int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int);
174static	int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int);
175static	void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *);
176static	int ubsec_ksigbits(struct crparam *);
177static	void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
178static	void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
179
180SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters");
181
182#ifdef UBSEC_DEBUG
183static	void ubsec_dump_pb(volatile struct ubsec_pktbuf *);
184static	void ubsec_dump_mcr(struct ubsec_mcr *);
185static	void ubsec_dump_ctx2(struct ubsec_ctx_keyop *);
186
187static	int ubsec_debug = 0;
188SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug,
189	    0, "control debugging msgs");
190#endif
191
192#define	READ_REG(sc,r) \
193	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
194
195#define WRITE_REG(sc,reg,val) \
196	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
197
198#define	SWAP32(x) (x) = htole32(ntohl((x)))
199#define	HTOLE32(x) (x) = htole32(x)
200
201struct ubsec_stats ubsecstats;
202SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats,
203	    ubsec_stats, "driver statistics");
204
205static int
206ubsec_probe(device_t dev)
207{
208	if (pci_get_vendor(dev) == PCI_VENDOR_SUN &&
209	    (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 ||
210	     pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K))
211		return (BUS_PROBE_DEFAULT);
212	if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL &&
213	    (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 ||
214	     pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601))
215		return (BUS_PROBE_DEFAULT);
216	if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
217	    (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 ||
218	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 ||
219	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 ||
220	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 ||
221	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 ||
222	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 ||
223	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 ||
224	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825
225	     ))
226		return (BUS_PROBE_DEFAULT);
227	return (ENXIO);
228}
229
230static const char*
231ubsec_partname(struct ubsec_softc *sc)
232{
233	/* XXX sprintf numbers when not decoded */
234	switch (pci_get_vendor(sc->sc_dev)) {
235	case PCI_VENDOR_BROADCOM:
236		switch (pci_get_device(sc->sc_dev)) {
237		case PCI_PRODUCT_BROADCOM_5801:	return "Broadcom 5801";
238		case PCI_PRODUCT_BROADCOM_5802:	return "Broadcom 5802";
239		case PCI_PRODUCT_BROADCOM_5805:	return "Broadcom 5805";
240		case PCI_PRODUCT_BROADCOM_5820:	return "Broadcom 5820";
241		case PCI_PRODUCT_BROADCOM_5821:	return "Broadcom 5821";
242		case PCI_PRODUCT_BROADCOM_5822:	return "Broadcom 5822";
243		case PCI_PRODUCT_BROADCOM_5823:	return "Broadcom 5823";
244		case PCI_PRODUCT_BROADCOM_5825:	return "Broadcom 5825";
245		}
246		return "Broadcom unknown-part";
247	case PCI_VENDOR_BLUESTEEL:
248		switch (pci_get_device(sc->sc_dev)) {
249		case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601";
250		}
251		return "Bluesteel unknown-part";
252	case PCI_VENDOR_SUN:
253		switch (pci_get_device(sc->sc_dev)) {
254		case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821";
255		case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K";
256		}
257		return "Sun unknown-part";
258	}
259	return "Unknown-vendor unknown-part";
260}
261
262static void
263default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
264{
265	random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
266}
267
268static int
269ubsec_attach(device_t dev)
270{
271	struct ubsec_softc *sc = device_get_softc(dev);
272	struct ubsec_dma *dmap;
273	u_int32_t cmd, i;
274	int rid;
275
276	bzero(sc, sizeof (*sc));
277	sc->sc_dev = dev;
278
279	SIMPLEQ_INIT(&sc->sc_queue);
280	SIMPLEQ_INIT(&sc->sc_qchip);
281	SIMPLEQ_INIT(&sc->sc_queue2);
282	SIMPLEQ_INIT(&sc->sc_qchip2);
283	SIMPLEQ_INIT(&sc->sc_q2free);
284
285	/* XXX handle power management */
286
287	sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
288
289	if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL &&
290	    pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)
291		sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
292
293	if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
294	    (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 ||
295	     pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805))
296		sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
297
298	if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
299	    pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820)
300		sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
301		    UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
302
303	if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
304	     (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 ||
305	      pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 ||
306	      pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 ||
307	      pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825)) ||
308	    (pci_get_vendor(dev) == PCI_VENDOR_SUN &&
309	     (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K ||
310	      pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) {
311		/* NB: the 5821/5822 defines some additional status bits */
312		sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY |
313		    BS_STAT_MCR2_ALLEMPTY;
314		sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
315		    UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
316	}
317
318	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
319	cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
320	pci_write_config(dev, PCIR_COMMAND, cmd, 4);
321	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
322
323	if (!(cmd & PCIM_CMD_MEMEN)) {
324		device_printf(dev, "failed to enable memory mapping\n");
325		goto bad;
326	}
327
328	if (!(cmd & PCIM_CMD_BUSMASTEREN)) {
329		device_printf(dev, "failed to enable bus mastering\n");
330		goto bad;
331	}
332
333	/*
334	 * Setup memory-mapping of PCI registers.
335	 */
336	rid = BS_BAR;
337	sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
338					   RF_ACTIVE);
339	if (sc->sc_sr == NULL) {
340		device_printf(dev, "cannot map register space\n");
341		goto bad;
342	}
343	sc->sc_st = rman_get_bustag(sc->sc_sr);
344	sc->sc_sh = rman_get_bushandle(sc->sc_sr);
345
346	/*
347	 * Arrange interrupt line.
348	 */
349	rid = 0;
350	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
351					    RF_SHAREABLE|RF_ACTIVE);
352	if (sc->sc_irq == NULL) {
353		device_printf(dev, "could not map interrupt\n");
354		goto bad1;
355	}
356	/*
357	 * NB: Network code assumes we are blocked with splimp()
358	 *     so make sure the IRQ is mapped appropriately.
359	 */
360	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
361			   NULL, ubsec_intr, sc, &sc->sc_ih)) {
362		device_printf(dev, "could not establish interrupt\n");
363		goto bad2;
364	}
365
366	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
367	if (sc->sc_cid < 0) {
368		device_printf(dev, "could not get crypto driver id\n");
369		goto bad3;
370	}
371
372	/*
373	 * Setup DMA descriptor area.
374	 */
375	if (bus_dma_tag_create(NULL,			/* parent */
376			       1, 0,			/* alignment, bounds */
377			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
378			       BUS_SPACE_MAXADDR,	/* highaddr */
379			       NULL, NULL,		/* filter, filterarg */
380			       0x3ffff,			/* maxsize */
381			       UBS_MAX_SCATTER,		/* nsegments */
382			       0xffff,			/* maxsegsize */
383			       BUS_DMA_ALLOCNOW,	/* flags */
384			       NULL, NULL,		/* lockfunc, lockarg */
385			       &sc->sc_dmat)) {
386		device_printf(dev, "cannot allocate DMA tag\n");
387		goto bad4;
388	}
389	SIMPLEQ_INIT(&sc->sc_freequeue);
390	dmap = sc->sc_dmaa;
391	for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
392		struct ubsec_q *q;
393
394		q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q),
395		    M_DEVBUF, M_NOWAIT);
396		if (q == NULL) {
397			device_printf(dev, "cannot allocate queue buffers\n");
398			break;
399		}
400
401		if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk),
402		    &dmap->d_alloc, 0)) {
403			device_printf(dev, "cannot allocate dma buffers\n");
404			free(q, M_DEVBUF);
405			break;
406		}
407		dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
408
409		q->q_dma = dmap;
410		sc->sc_queuea[i] = q;
411
412		SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
413	}
414	mtx_init(&sc->sc_mcr1lock, device_get_nameunit(dev),
415		"mcr1 operations", MTX_DEF);
416	mtx_init(&sc->sc_freeqlock, device_get_nameunit(dev),
417		"mcr1 free q", MTX_DEF);
418
419	device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc));
420
421	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
422	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
423	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
424	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
425
426	/*
427	 * Reset Broadcom chip
428	 */
429	ubsec_reset_board(sc);
430
431	/*
432	 * Init Broadcom specific PCI settings
433	 */
434	ubsec_init_pciregs(dev);
435
436	/*
437	 * Init Broadcom chip
438	 */
439	ubsec_init_board(sc);
440
441#ifndef UBSEC_NO_RNG
442	if (sc->sc_flags & UBS_FLAGS_RNG) {
443		sc->sc_statmask |= BS_STAT_MCR2_DONE;
444#ifdef UBSEC_RNDTEST
445		sc->sc_rndtest = rndtest_attach(dev);
446		if (sc->sc_rndtest)
447			sc->sc_harvest = rndtest_harvest;
448		else
449			sc->sc_harvest = default_harvest;
450#else
451		sc->sc_harvest = default_harvest;
452#endif
453
454		if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
455		    &sc->sc_rng.rng_q.q_mcr, 0))
456			goto skip_rng;
457
458		if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass),
459		    &sc->sc_rng.rng_q.q_ctx, 0)) {
460			ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
461			goto skip_rng;
462		}
463
464		if (ubsec_dma_malloc(sc, sizeof(u_int32_t) *
465		    UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) {
466			ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
467			ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
468			goto skip_rng;
469		}
470
471		if (hz >= 100)
472			sc->sc_rnghz = hz / 100;
473		else
474			sc->sc_rnghz = 1;
475		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
476		callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
477skip_rng:
478	;
479	}
480#endif /* UBSEC_NO_RNG */
481	mtx_init(&sc->sc_mcr2lock, device_get_nameunit(dev),
482		"mcr2 operations", MTX_DEF);
483
484	if (sc->sc_flags & UBS_FLAGS_KEY) {
485		sc->sc_statmask |= BS_STAT_MCR2_DONE;
486
487		crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
488#if 0
489		crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
490#endif
491	}
492	return (0);
493bad4:
494	crypto_unregister_all(sc->sc_cid);
495bad3:
496	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
497bad2:
498	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
499bad1:
500	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
501bad:
502	return (ENXIO);
503}
504
505/*
506 * Detach a device that successfully probed.
507 */
508static int
509ubsec_detach(device_t dev)
510{
511	struct ubsec_softc *sc = device_get_softc(dev);
512
513	/* XXX wait/abort active ops */
514
515	/* disable interrupts */
516	WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~
517		(BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR));
518
519	callout_stop(&sc->sc_rngto);
520
521	crypto_unregister_all(sc->sc_cid);
522
523#ifdef UBSEC_RNDTEST
524	if (sc->sc_rndtest)
525		rndtest_detach(sc->sc_rndtest);
526#endif
527
528	while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
529		struct ubsec_q *q;
530
531		q = SIMPLEQ_FIRST(&sc->sc_freequeue);
532		SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
533		ubsec_dma_free(sc, &q->q_dma->d_alloc);
534		free(q, M_DEVBUF);
535	}
536	mtx_destroy(&sc->sc_mcr1lock);
537	mtx_destroy(&sc->sc_freeqlock);
538#ifndef UBSEC_NO_RNG
539	if (sc->sc_flags & UBS_FLAGS_RNG) {
540		ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
541		ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
542		ubsec_dma_free(sc, &sc->sc_rng.rng_buf);
543	}
544#endif /* UBSEC_NO_RNG */
545	mtx_destroy(&sc->sc_mcr2lock);
546
547	bus_generic_detach(dev);
548	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
549	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
550
551	bus_dma_tag_destroy(sc->sc_dmat);
552	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
553
554	return (0);
555}
556
557/*
558 * Stop all chip i/o so that the kernel's probe routines don't
559 * get confused by errant DMAs when rebooting.
560 */
561static int
562ubsec_shutdown(device_t dev)
563{
564#ifdef notyet
565	ubsec_stop(device_get_softc(dev));
566#endif
567	return (0);
568}
569
570/*
571 * Device suspend routine.
572 */
573static int
574ubsec_suspend(device_t dev)
575{
576	struct ubsec_softc *sc = device_get_softc(dev);
577
578#ifdef notyet
579	/* XXX stop the device and save PCI settings */
580#endif
581	sc->sc_suspended = 1;
582
583	return (0);
584}
585
586static int
587ubsec_resume(device_t dev)
588{
589	struct ubsec_softc *sc = device_get_softc(dev);
590
591#ifdef notyet
592	/* XXX retore PCI settings and start the device */
593#endif
594	sc->sc_suspended = 0;
595	return (0);
596}
597
598/*
599 * UBSEC Interrupt routine
600 */
601static void
602ubsec_intr(void *arg)
603{
604	struct ubsec_softc *sc = arg;
605	volatile u_int32_t stat;
606	struct ubsec_q *q;
607	struct ubsec_dma *dmap;
608	int npkts = 0, i;
609
610	stat = READ_REG(sc, BS_STAT);
611	stat &= sc->sc_statmask;
612	if (stat == 0)
613		return;
614
615	WRITE_REG(sc, BS_STAT, stat);		/* IACK */
616
617	/*
618	 * Check to see if we have any packets waiting for us
619	 */
620	if ((stat & BS_STAT_MCR1_DONE)) {
621		mtx_lock(&sc->sc_mcr1lock);
622		while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
623			q = SIMPLEQ_FIRST(&sc->sc_qchip);
624			dmap = q->q_dma;
625
626			if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
627				break;
628
629			SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
630
631			npkts = q->q_nstacked_mcrs;
632			sc->sc_nqchip -= 1+npkts;
633			/*
634			 * search for further sc_qchip ubsec_q's that share
635			 * the same MCR, and complete them too, they must be
636			 * at the top.
637			 */
638			for (i = 0; i < npkts; i++) {
639				if(q->q_stacked_mcr[i]) {
640					ubsec_callback(sc, q->q_stacked_mcr[i]);
641				} else {
642					break;
643				}
644			}
645			ubsec_callback(sc, q);
646		}
647		/*
648		 * Don't send any more packet to chip if there has been
649		 * a DMAERR.
650		 */
651		if (!(stat & BS_STAT_DMAERR))
652			ubsec_feed(sc);
653		mtx_unlock(&sc->sc_mcr1lock);
654	}
655
656	/*
657	 * Check to see if we have any key setups/rng's waiting for us
658	 */
659	if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) &&
660	    (stat & BS_STAT_MCR2_DONE)) {
661		struct ubsec_q2 *q2;
662		struct ubsec_mcr *mcr;
663
664		mtx_lock(&sc->sc_mcr2lock);
665		while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) {
666			q2 = SIMPLEQ_FIRST(&sc->sc_qchip2);
667
668			ubsec_dma_sync(&q2->q_mcr,
669			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
670
671			mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr;
672			if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) {
673				ubsec_dma_sync(&q2->q_mcr,
674				    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
675				break;
676			}
677			SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next);
678			ubsec_callback2(sc, q2);
679			/*
680			 * Don't send any more packet to chip if there has been
681			 * a DMAERR.
682			 */
683			if (!(stat & BS_STAT_DMAERR))
684				ubsec_feed2(sc);
685		}
686		mtx_unlock(&sc->sc_mcr2lock);
687	}
688
689	/*
690	 * Check to see if we got any DMA Error
691	 */
692	if (stat & BS_STAT_DMAERR) {
693#ifdef UBSEC_DEBUG
694		if (ubsec_debug) {
695			volatile u_int32_t a = READ_REG(sc, BS_ERR);
696
697			printf("dmaerr %s@%08x\n",
698			    (a & BS_ERR_READ) ? "read" : "write",
699			    a & BS_ERR_ADDR);
700		}
701#endif /* UBSEC_DEBUG */
702		ubsecstats.hst_dmaerr++;
703		mtx_lock(&sc->sc_mcr1lock);
704		ubsec_totalreset(sc);
705		ubsec_feed(sc);
706		mtx_unlock(&sc->sc_mcr1lock);
707	}
708
709	if (sc->sc_needwakeup) {		/* XXX check high watermark */
710		int wakeup;
711
712		mtx_lock(&sc->sc_freeqlock);
713		wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
714#ifdef UBSEC_DEBUG
715		if (ubsec_debug)
716			device_printf(sc->sc_dev, "wakeup crypto (%x)\n",
717				sc->sc_needwakeup);
718#endif /* UBSEC_DEBUG */
719		sc->sc_needwakeup &= ~wakeup;
720		mtx_unlock(&sc->sc_freeqlock);
721		crypto_unblock(sc->sc_cid, wakeup);
722	}
723}
724
725/*
726 * ubsec_feed() - aggregate and post requests to chip
727 */
728static void
729ubsec_feed(struct ubsec_softc *sc)
730{
731	struct ubsec_q *q, *q2;
732	int npkts, i;
733	void *v;
734	u_int32_t stat;
735
736	/*
737	 * Decide how many ops to combine in a single MCR.  We cannot
738	 * aggregate more than UBS_MAX_AGGR because this is the number
739	 * of slots defined in the data structure.  Note that
740	 * aggregation only happens if ops are marked batch'able.
741	 * Aggregating ops reduces the number of interrupts to the host
742	 * but also (potentially) increases the latency for processing
743	 * completed ops as we only get an interrupt when all aggregated
744	 * ops have completed.
745	 */
746	if (sc->sc_nqueue == 0)
747		return;
748	if (sc->sc_nqueue > 1) {
749		npkts = 0;
750		SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) {
751			npkts++;
752			if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0)
753				break;
754		}
755	} else
756		npkts = 1;
757	/*
758	 * Check device status before going any further.
759	 */
760	if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
761		if (stat & BS_STAT_DMAERR) {
762			ubsec_totalreset(sc);
763			ubsecstats.hst_dmaerr++;
764		} else
765			ubsecstats.hst_mcr1full++;
766		return;
767	}
768	if (sc->sc_nqueue > ubsecstats.hst_maxqueue)
769		ubsecstats.hst_maxqueue = sc->sc_nqueue;
770	if (npkts > UBS_MAX_AGGR)
771		npkts = UBS_MAX_AGGR;
772	if (npkts < 2)				/* special case 1 op */
773		goto feed1;
774
775	ubsecstats.hst_totbatch += npkts-1;
776#ifdef UBSEC_DEBUG
777	if (ubsec_debug)
778		printf("merging %d records\n", npkts);
779#endif /* UBSEC_DEBUG */
780
781	q = SIMPLEQ_FIRST(&sc->sc_queue);
782	SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
783	--sc->sc_nqueue;
784
785	bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE);
786	if (q->q_dst_map != NULL)
787		bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD);
788
789	q->q_nstacked_mcrs = npkts - 1;		/* Number of packets stacked */
790
791	for (i = 0; i < q->q_nstacked_mcrs; i++) {
792		q2 = SIMPLEQ_FIRST(&sc->sc_queue);
793		bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
794		    BUS_DMASYNC_PREWRITE);
795		if (q2->q_dst_map != NULL)
796			bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
797			    BUS_DMASYNC_PREREAD);
798		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
799		--sc->sc_nqueue;
800
801		v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
802		    sizeof(struct ubsec_mcr_add));
803		bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
804		q->q_stacked_mcr[i] = q2;
805	}
806	q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
807	SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
808	sc->sc_nqchip += npkts;
809	if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
810		ubsecstats.hst_maxqchip = sc->sc_nqchip;
811	ubsec_dma_sync(&q->q_dma->d_alloc,
812	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
813	WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
814	    offsetof(struct ubsec_dmachunk, d_mcr));
815	return;
816feed1:
817	q = SIMPLEQ_FIRST(&sc->sc_queue);
818
819	bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE);
820	if (q->q_dst_map != NULL)
821		bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD);
822	ubsec_dma_sync(&q->q_dma->d_alloc,
823	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
824
825	WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
826	    offsetof(struct ubsec_dmachunk, d_mcr));
827#ifdef UBSEC_DEBUG
828	if (ubsec_debug)
829		printf("feed1: q->chip %p %08x stat %08x\n",
830		      q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr),
831		      stat);
832#endif /* UBSEC_DEBUG */
833	SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
834	--sc->sc_nqueue;
835	SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
836	sc->sc_nqchip++;
837	if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
838		ubsecstats.hst_maxqchip = sc->sc_nqchip;
839	return;
840}
841
842static void
843ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key)
844{
845
846	/* Go ahead and compute key in ubsec's byte order */
847	if (algo == CRYPTO_DES_CBC) {
848		bcopy(key, &ses->ses_deskey[0], 8);
849		bcopy(key, &ses->ses_deskey[2], 8);
850		bcopy(key, &ses->ses_deskey[4], 8);
851	} else
852		bcopy(key, ses->ses_deskey, 24);
853
854	SWAP32(ses->ses_deskey[0]);
855	SWAP32(ses->ses_deskey[1]);
856	SWAP32(ses->ses_deskey[2]);
857	SWAP32(ses->ses_deskey[3]);
858	SWAP32(ses->ses_deskey[4]);
859	SWAP32(ses->ses_deskey[5]);
860}
861
862static void
863ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
864{
865	MD5_CTX md5ctx;
866	SHA1_CTX sha1ctx;
867	int i;
868
869	for (i = 0; i < klen; i++)
870		key[i] ^= HMAC_IPAD_VAL;
871
872	if (algo == CRYPTO_MD5_HMAC) {
873		MD5Init(&md5ctx);
874		MD5Update(&md5ctx, key, klen);
875		MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
876		bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
877	} else {
878		SHA1Init(&sha1ctx);
879		SHA1Update(&sha1ctx, key, klen);
880		SHA1Update(&sha1ctx, hmac_ipad_buffer,
881		    SHA1_HMAC_BLOCK_LEN - klen);
882		bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
883	}
884
885	for (i = 0; i < klen; i++)
886		key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
887
888	if (algo == CRYPTO_MD5_HMAC) {
889		MD5Init(&md5ctx);
890		MD5Update(&md5ctx, key, klen);
891		MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
892		bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
893	} else {
894		SHA1Init(&sha1ctx);
895		SHA1Update(&sha1ctx, key, klen);
896		SHA1Update(&sha1ctx, hmac_opad_buffer,
897		    SHA1_HMAC_BLOCK_LEN - klen);
898		bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
899	}
900
901	for (i = 0; i < klen; i++)
902		key[i] ^= HMAC_OPAD_VAL;
903}
904
905/*
906 * Allocate a new 'session' and return an encoded session id.  'sidp'
907 * contains our registration id, and should contain an encoded session
908 * id on successful allocation.
909 */
910static int
911ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
912{
913	struct ubsec_softc *sc = device_get_softc(dev);
914	struct cryptoini *c, *encini = NULL, *macini = NULL;
915	struct ubsec_session *ses = NULL;
916	int sesn;
917
918	if (sidp == NULL || cri == NULL || sc == NULL)
919		return (EINVAL);
920
921	for (c = cri; c != NULL; c = c->cri_next) {
922		if (c->cri_alg == CRYPTO_MD5_HMAC ||
923		    c->cri_alg == CRYPTO_SHA1_HMAC) {
924			if (macini)
925				return (EINVAL);
926			macini = c;
927		} else if (c->cri_alg == CRYPTO_DES_CBC ||
928		    c->cri_alg == CRYPTO_3DES_CBC) {
929			if (encini)
930				return (EINVAL);
931			encini = c;
932		} else
933			return (EINVAL);
934	}
935	if (encini == NULL && macini == NULL)
936		return (EINVAL);
937
938	if (sc->sc_sessions == NULL) {
939		ses = sc->sc_sessions = (struct ubsec_session *)malloc(
940		    sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
941		if (ses == NULL)
942			return (ENOMEM);
943		sesn = 0;
944		sc->sc_nsessions = 1;
945	} else {
946		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
947			if (sc->sc_sessions[sesn].ses_used == 0) {
948				ses = &sc->sc_sessions[sesn];
949				break;
950			}
951		}
952
953		if (ses == NULL) {
954			sesn = sc->sc_nsessions;
955			ses = (struct ubsec_session *)malloc((sesn + 1) *
956			    sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
957			if (ses == NULL)
958				return (ENOMEM);
959			bcopy(sc->sc_sessions, ses, sesn *
960			    sizeof(struct ubsec_session));
961			bzero(sc->sc_sessions, sesn *
962			    sizeof(struct ubsec_session));
963			free(sc->sc_sessions, M_DEVBUF);
964			sc->sc_sessions = ses;
965			ses = &sc->sc_sessions[sesn];
966			sc->sc_nsessions++;
967		}
968	}
969	bzero(ses, sizeof(struct ubsec_session));
970	ses->ses_used = 1;
971
972	if (encini) {
973		/* get an IV, network byte order */
974		/* XXX may read fewer than requested */
975		read_random(ses->ses_iv, sizeof(ses->ses_iv));
976
977		if (encini->cri_key != NULL) {
978			ubsec_setup_enckey(ses, encini->cri_alg,
979			    encini->cri_key);
980		}
981	}
982
983	if (macini) {
984		ses->ses_mlen = macini->cri_mlen;
985		if (ses->ses_mlen == 0) {
986			if (macini->cri_alg == CRYPTO_MD5_HMAC)
987				ses->ses_mlen = MD5_HASH_LEN;
988			else
989				ses->ses_mlen = SHA1_HASH_LEN;
990		}
991
992		if (macini->cri_key != NULL) {
993			ubsec_setup_mackey(ses, macini->cri_alg,
994			    macini->cri_key, macini->cri_klen / 8);
995		}
996	}
997
998	*sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
999	return (0);
1000}
1001
1002/*
1003 * Deallocate a session.
1004 */
1005static int
1006ubsec_freesession(device_t dev, u_int64_t tid)
1007{
1008	struct ubsec_softc *sc = device_get_softc(dev);
1009	int session, ret;
1010	u_int32_t sid = CRYPTO_SESID2LID(tid);
1011
1012	if (sc == NULL)
1013		return (EINVAL);
1014
1015	session = UBSEC_SESSION(sid);
1016	if (session < sc->sc_nsessions) {
1017		bzero(&sc->sc_sessions[session],
1018			sizeof(sc->sc_sessions[session]));
1019		ret = 0;
1020	} else
1021		ret = EINVAL;
1022
1023	return (ret);
1024}
1025
1026static void
1027ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1028{
1029	struct ubsec_operand *op = arg;
1030
1031	KASSERT(nsegs <= UBS_MAX_SCATTER,
1032		("Too many DMA segments returned when mapping operand"));
1033#ifdef UBSEC_DEBUG
1034	if (ubsec_debug)
1035		printf("ubsec_op_cb: mapsize %u nsegs %d error %d\n",
1036			(u_int) mapsize, nsegs, error);
1037#endif
1038	if (error != 0)
1039		return;
1040	op->mapsize = mapsize;
1041	op->nsegs = nsegs;
1042	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1043}
1044
1045static int
1046ubsec_process(device_t dev, struct cryptop *crp, int hint)
1047{
1048	struct ubsec_softc *sc = device_get_softc(dev);
1049	struct ubsec_q *q = NULL;
1050	int err = 0, i, j, nicealign;
1051	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1052	int encoffset = 0, macoffset = 0, cpskip, cpoffset;
1053	int sskip, dskip, stheend, dtheend;
1054	int16_t coffset;
1055	struct ubsec_session *ses;
1056	struct ubsec_pktctx ctx;
1057	struct ubsec_dma *dmap = NULL;
1058
1059	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
1060		ubsecstats.hst_invalid++;
1061		return (EINVAL);
1062	}
1063	if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
1064		ubsecstats.hst_badsession++;
1065		return (EINVAL);
1066	}
1067
1068	mtx_lock(&sc->sc_freeqlock);
1069	if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
1070		ubsecstats.hst_queuefull++;
1071		sc->sc_needwakeup |= CRYPTO_SYMQ;
1072		mtx_unlock(&sc->sc_freeqlock);
1073		return (ERESTART);
1074	}
1075	q = SIMPLEQ_FIRST(&sc->sc_freequeue);
1076	SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
1077	mtx_unlock(&sc->sc_freeqlock);
1078
1079	dmap = q->q_dma; /* Save dma pointer */
1080	bzero(q, sizeof(struct ubsec_q));
1081	bzero(&ctx, sizeof(ctx));
1082
1083	q->q_sesn = UBSEC_SESSION(crp->crp_sid);
1084	q->q_dma = dmap;
1085	ses = &sc->sc_sessions[q->q_sesn];
1086
1087	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1088		q->q_src_m = (struct mbuf *)crp->crp_buf;
1089		q->q_dst_m = (struct mbuf *)crp->crp_buf;
1090	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1091		q->q_src_io = (struct uio *)crp->crp_buf;
1092		q->q_dst_io = (struct uio *)crp->crp_buf;
1093	} else {
1094		ubsecstats.hst_badflags++;
1095		err = EINVAL;
1096		goto errout;	/* XXX we don't handle contiguous blocks! */
1097	}
1098
1099	bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
1100
1101	dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
1102	dmap->d_dma->d_mcr.mcr_flags = 0;
1103	q->q_crp = crp;
1104
1105	crd1 = crp->crp_desc;
1106	if (crd1 == NULL) {
1107		ubsecstats.hst_nodesc++;
1108		err = EINVAL;
1109		goto errout;
1110	}
1111	crd2 = crd1->crd_next;
1112
1113	if (crd2 == NULL) {
1114		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1115		    crd1->crd_alg == CRYPTO_SHA1_HMAC) {
1116			maccrd = crd1;
1117			enccrd = NULL;
1118		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1119		    crd1->crd_alg == CRYPTO_3DES_CBC) {
1120			maccrd = NULL;
1121			enccrd = crd1;
1122		} else {
1123			ubsecstats.hst_badalg++;
1124			err = EINVAL;
1125			goto errout;
1126		}
1127	} else {
1128		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
1129		    crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
1130		    (crd2->crd_alg == CRYPTO_DES_CBC ||
1131			crd2->crd_alg == CRYPTO_3DES_CBC) &&
1132		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1133			maccrd = crd1;
1134			enccrd = crd2;
1135		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1136		    crd1->crd_alg == CRYPTO_3DES_CBC) &&
1137		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
1138			crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
1139		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
1140			enccrd = crd1;
1141			maccrd = crd2;
1142		} else {
1143			/*
1144			 * We cannot order the ubsec as requested
1145			 */
1146			ubsecstats.hst_badalg++;
1147			err = EINVAL;
1148			goto errout;
1149		}
1150	}
1151
1152	if (enccrd) {
1153		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1154			ubsec_setup_enckey(ses, enccrd->crd_alg,
1155			    enccrd->crd_key);
1156		}
1157
1158		encoffset = enccrd->crd_skip;
1159		ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
1160
1161		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1162			q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
1163
1164			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1165				bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1166			else {
1167				ctx.pc_iv[0] = ses->ses_iv[0];
1168				ctx.pc_iv[1] = ses->ses_iv[1];
1169			}
1170
1171			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1172				crypto_copyback(crp->crp_flags, crp->crp_buf,
1173				    enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
1174			}
1175		} else {
1176			ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
1177
1178			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1179				bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1180			else {
1181				crypto_copydata(crp->crp_flags, crp->crp_buf,
1182				    enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
1183			}
1184		}
1185
1186		ctx.pc_deskey[0] = ses->ses_deskey[0];
1187		ctx.pc_deskey[1] = ses->ses_deskey[1];
1188		ctx.pc_deskey[2] = ses->ses_deskey[2];
1189		ctx.pc_deskey[3] = ses->ses_deskey[3];
1190		ctx.pc_deskey[4] = ses->ses_deskey[4];
1191		ctx.pc_deskey[5] = ses->ses_deskey[5];
1192		SWAP32(ctx.pc_iv[0]);
1193		SWAP32(ctx.pc_iv[1]);
1194	}
1195
1196	if (maccrd) {
1197		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1198			ubsec_setup_mackey(ses, maccrd->crd_alg,
1199			    maccrd->crd_key, maccrd->crd_klen / 8);
1200		}
1201
1202		macoffset = maccrd->crd_skip;
1203
1204		if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
1205			ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
1206		else
1207			ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
1208
1209		for (i = 0; i < 5; i++) {
1210			ctx.pc_hminner[i] = ses->ses_hminner[i];
1211			ctx.pc_hmouter[i] = ses->ses_hmouter[i];
1212
1213			HTOLE32(ctx.pc_hminner[i]);
1214			HTOLE32(ctx.pc_hmouter[i]);
1215		}
1216	}
1217
1218	if (enccrd && maccrd) {
1219		/*
1220		 * ubsec cannot handle packets where the end of encryption
1221		 * and authentication are not the same, or where the
1222		 * encrypted part begins before the authenticated part.
1223		 */
1224		if ((encoffset + enccrd->crd_len) !=
1225		    (macoffset + maccrd->crd_len)) {
1226			ubsecstats.hst_lenmismatch++;
1227			err = EINVAL;
1228			goto errout;
1229		}
1230		if (enccrd->crd_skip < maccrd->crd_skip) {
1231			ubsecstats.hst_skipmismatch++;
1232			err = EINVAL;
1233			goto errout;
1234		}
1235		sskip = maccrd->crd_skip;
1236		cpskip = dskip = enccrd->crd_skip;
1237		stheend = maccrd->crd_len;
1238		dtheend = enccrd->crd_len;
1239		coffset = enccrd->crd_skip - maccrd->crd_skip;
1240		cpoffset = cpskip + dtheend;
1241#ifdef UBSEC_DEBUG
1242		if (ubsec_debug) {
1243			printf("mac: skip %d, len %d, inject %d\n",
1244			    maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
1245			printf("enc: skip %d, len %d, inject %d\n",
1246			    enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
1247			printf("src: skip %d, len %d\n", sskip, stheend);
1248			printf("dst: skip %d, len %d\n", dskip, dtheend);
1249			printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1250			    coffset, stheend, cpskip, cpoffset);
1251		}
1252#endif
1253	} else {
1254		cpskip = dskip = sskip = macoffset + encoffset;
1255		dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
1256		cpoffset = cpskip + dtheend;
1257		coffset = 0;
1258	}
1259	ctx.pc_offset = htole16(coffset >> 2);
1260
1261	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) {
1262		ubsecstats.hst_nomap++;
1263		err = ENOMEM;
1264		goto errout;
1265	}
1266	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1267		if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1268		    q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
1269			bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1270			q->q_src_map = NULL;
1271			ubsecstats.hst_noload++;
1272			err = ENOMEM;
1273			goto errout;
1274		}
1275	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1276		if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1277		    q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
1278			bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1279			q->q_src_map = NULL;
1280			ubsecstats.hst_noload++;
1281			err = ENOMEM;
1282			goto errout;
1283		}
1284	}
1285	nicealign = ubsec_dmamap_aligned(&q->q_src);
1286
1287	dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1288
1289#ifdef UBSEC_DEBUG
1290	if (ubsec_debug)
1291		printf("src skip: %d nicealign: %u\n", sskip, nicealign);
1292#endif
1293	for (i = j = 0; i < q->q_src_nsegs; i++) {
1294		struct ubsec_pktbuf *pb;
1295		bus_size_t packl = q->q_src_segs[i].ds_len;
1296		bus_addr_t packp = q->q_src_segs[i].ds_addr;
1297
1298		if (sskip >= packl) {
1299			sskip -= packl;
1300			continue;
1301		}
1302
1303		packl -= sskip;
1304		packp += sskip;
1305		sskip = 0;
1306
1307		if (packl > 0xfffc) {
1308			err = EIO;
1309			goto errout;
1310		}
1311
1312		if (j == 0)
1313			pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1314		else
1315			pb = &dmap->d_dma->d_sbuf[j - 1];
1316
1317		pb->pb_addr = htole32(packp);
1318
1319		if (stheend) {
1320			if (packl > stheend) {
1321				pb->pb_len = htole32(stheend);
1322				stheend = 0;
1323			} else {
1324				pb->pb_len = htole32(packl);
1325				stheend -= packl;
1326			}
1327		} else
1328			pb->pb_len = htole32(packl);
1329
1330		if ((i + 1) == q->q_src_nsegs)
1331			pb->pb_next = 0;
1332		else
1333			pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1334			    offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1335		j++;
1336	}
1337
1338	if (enccrd == NULL && maccrd != NULL) {
1339		dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1340		dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1341		dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr +
1342		    offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1343#ifdef UBSEC_DEBUG
1344		if (ubsec_debug)
1345			printf("opkt: %x %x %x\n",
1346			    dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1347			    dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1348			    dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1349#endif
1350	} else {
1351		if (crp->crp_flags & CRYPTO_F_IOV) {
1352			if (!nicealign) {
1353				ubsecstats.hst_iovmisaligned++;
1354				err = EINVAL;
1355				goto errout;
1356			}
1357			if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
1358			     &q->q_dst_map)) {
1359				ubsecstats.hst_nomap++;
1360				err = ENOMEM;
1361				goto errout;
1362			}
1363			if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1364			    q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) {
1365				bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1366				q->q_dst_map = NULL;
1367				ubsecstats.hst_noload++;
1368				err = ENOMEM;
1369				goto errout;
1370			}
1371		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1372			if (nicealign) {
1373				q->q_dst = q->q_src;
1374			} else {
1375				int totlen, len;
1376				struct mbuf *m, *top, **mp;
1377
1378				ubsecstats.hst_unaligned++;
1379				totlen = q->q_src_mapsize;
1380				if (totlen >= MINCLSIZE) {
1381					m = m_getcl(M_DONTWAIT, MT_DATA,
1382					    q->q_src_m->m_flags & M_PKTHDR);
1383					len = MCLBYTES;
1384				} else if (q->q_src_m->m_flags & M_PKTHDR) {
1385					m = m_gethdr(M_DONTWAIT, MT_DATA);
1386					len = MHLEN;
1387				} else {
1388					m = m_get(M_DONTWAIT, MT_DATA);
1389					len = MLEN;
1390				}
1391				if (m && q->q_src_m->m_flags & M_PKTHDR &&
1392				    !m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)) {
1393					m_free(m);
1394					m = NULL;
1395				}
1396				if (m == NULL) {
1397					ubsecstats.hst_nombuf++;
1398					err = sc->sc_nqueue ? ERESTART : ENOMEM;
1399					goto errout;
1400				}
1401				m->m_len = len = min(totlen, len);
1402				totlen -= len;
1403				top = m;
1404				mp = &top;
1405
1406				while (totlen > 0) {
1407					if (totlen >= MINCLSIZE) {
1408						m = m_getcl(M_DONTWAIT,
1409						    MT_DATA, 0);
1410						len = MCLBYTES;
1411					} else {
1412						m = m_get(M_DONTWAIT, MT_DATA);
1413						len = MLEN;
1414					}
1415					if (m == NULL) {
1416						m_freem(top);
1417						ubsecstats.hst_nombuf++;
1418						err = sc->sc_nqueue ? ERESTART : ENOMEM;
1419						goto errout;
1420					}
1421					m->m_len = len = min(totlen, len);
1422					totlen -= len;
1423					*mp = m;
1424					mp = &m->m_next;
1425				}
1426				q->q_dst_m = top;
1427				ubsec_mcopy(q->q_src_m, q->q_dst_m,
1428				    cpskip, cpoffset);
1429				if (bus_dmamap_create(sc->sc_dmat,
1430				    BUS_DMA_NOWAIT, &q->q_dst_map) != 0) {
1431					ubsecstats.hst_nomap++;
1432					err = ENOMEM;
1433					goto errout;
1434				}
1435				if (bus_dmamap_load_mbuf(sc->sc_dmat,
1436				    q->q_dst_map, q->q_dst_m,
1437				    ubsec_op_cb, &q->q_dst,
1438				    BUS_DMA_NOWAIT) != 0) {
1439					bus_dmamap_destroy(sc->sc_dmat,
1440					q->q_dst_map);
1441					q->q_dst_map = NULL;
1442					ubsecstats.hst_noload++;
1443					err = ENOMEM;
1444					goto errout;
1445				}
1446			}
1447		} else {
1448			ubsecstats.hst_badflags++;
1449			err = EINVAL;
1450			goto errout;
1451		}
1452
1453#ifdef UBSEC_DEBUG
1454		if (ubsec_debug)
1455			printf("dst skip: %d\n", dskip);
1456#endif
1457		for (i = j = 0; i < q->q_dst_nsegs; i++) {
1458			struct ubsec_pktbuf *pb;
1459			bus_size_t packl = q->q_dst_segs[i].ds_len;
1460			bus_addr_t packp = q->q_dst_segs[i].ds_addr;
1461
1462			if (dskip >= packl) {
1463				dskip -= packl;
1464				continue;
1465			}
1466
1467			packl -= dskip;
1468			packp += dskip;
1469			dskip = 0;
1470
1471			if (packl > 0xfffc) {
1472				err = EIO;
1473				goto errout;
1474			}
1475
1476			if (j == 0)
1477				pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1478			else
1479				pb = &dmap->d_dma->d_dbuf[j - 1];
1480
1481			pb->pb_addr = htole32(packp);
1482
1483			if (dtheend) {
1484				if (packl > dtheend) {
1485					pb->pb_len = htole32(dtheend);
1486					dtheend = 0;
1487				} else {
1488					pb->pb_len = htole32(packl);
1489					dtheend -= packl;
1490				}
1491			} else
1492				pb->pb_len = htole32(packl);
1493
1494			if ((i + 1) == q->q_dst_nsegs) {
1495				if (maccrd)
1496					pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1497					    offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1498				else
1499					pb->pb_next = 0;
1500			} else
1501				pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1502				    offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1503			j++;
1504		}
1505	}
1506
1507	dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1508	    offsetof(struct ubsec_dmachunk, d_ctx));
1509
1510	if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1511		struct ubsec_pktctx_long *ctxl;
1512
1513		ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr +
1514		    offsetof(struct ubsec_dmachunk, d_ctx));
1515
1516		/* transform small context into long context */
1517		ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long));
1518		ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC);
1519		ctxl->pc_flags = ctx.pc_flags;
1520		ctxl->pc_offset = ctx.pc_offset;
1521		for (i = 0; i < 6; i++)
1522			ctxl->pc_deskey[i] = ctx.pc_deskey[i];
1523		for (i = 0; i < 5; i++)
1524			ctxl->pc_hminner[i] = ctx.pc_hminner[i];
1525		for (i = 0; i < 5; i++)
1526			ctxl->pc_hmouter[i] = ctx.pc_hmouter[i];
1527		ctxl->pc_iv[0] = ctx.pc_iv[0];
1528		ctxl->pc_iv[1] = ctx.pc_iv[1];
1529	} else
1530		bcopy(&ctx, dmap->d_alloc.dma_vaddr +
1531		    offsetof(struct ubsec_dmachunk, d_ctx),
1532		    sizeof(struct ubsec_pktctx));
1533
1534	mtx_lock(&sc->sc_mcr1lock);
1535	SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1536	sc->sc_nqueue++;
1537	ubsecstats.hst_ipackets++;
1538	ubsecstats.hst_ibytes += dmap->d_alloc.dma_size;
1539	if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR)
1540		ubsec_feed(sc);
1541	mtx_unlock(&sc->sc_mcr1lock);
1542	return (0);
1543
1544errout:
1545	if (q != NULL) {
1546		if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1547			m_freem(q->q_dst_m);
1548
1549		if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1550			bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1551			bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1552		}
1553		if (q->q_src_map != NULL) {
1554			bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1555			bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1556		}
1557	}
1558	if (q != NULL || err == ERESTART) {
1559		mtx_lock(&sc->sc_freeqlock);
1560		if (q != NULL)
1561			SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1562		if (err == ERESTART)
1563			sc->sc_needwakeup |= CRYPTO_SYMQ;
1564		mtx_unlock(&sc->sc_freeqlock);
1565	}
1566	if (err != ERESTART) {
1567		crp->crp_etype = err;
1568		crypto_done(crp);
1569	}
1570	return (err);
1571}
1572
1573static void
1574ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1575{
1576	struct cryptop *crp = (struct cryptop *)q->q_crp;
1577	struct cryptodesc *crd;
1578	struct ubsec_dma *dmap = q->q_dma;
1579
1580	ubsecstats.hst_opackets++;
1581	ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1582
1583	ubsec_dma_sync(&dmap->d_alloc,
1584	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1585	if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1586		bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1587		    BUS_DMASYNC_POSTREAD);
1588		bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1589		bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1590	}
1591	bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE);
1592	bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1593	bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1594
1595	if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
1596		m_freem(q->q_src_m);
1597		crp->crp_buf = (caddr_t)q->q_dst_m;
1598	}
1599
1600	/* copy out IV for future use */
1601	if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
1602		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1603			if (crd->crd_alg != CRYPTO_DES_CBC &&
1604			    crd->crd_alg != CRYPTO_3DES_CBC)
1605				continue;
1606			crypto_copydata(crp->crp_flags, crp->crp_buf,
1607			    crd->crd_skip + crd->crd_len - 8, 8,
1608			    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1609			break;
1610		}
1611	}
1612
1613	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1614		if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1615		    crd->crd_alg != CRYPTO_SHA1_HMAC)
1616			continue;
1617		crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1618		    sc->sc_sessions[q->q_sesn].ses_mlen,
1619		    (caddr_t)dmap->d_dma->d_macbuf);
1620		break;
1621	}
1622	mtx_lock(&sc->sc_freeqlock);
1623	SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1624	mtx_unlock(&sc->sc_freeqlock);
1625	crypto_done(crp);
1626}
1627
1628static void
1629ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset)
1630{
1631	int i, j, dlen, slen;
1632	caddr_t dptr, sptr;
1633
1634	j = 0;
1635	sptr = srcm->m_data;
1636	slen = srcm->m_len;
1637	dptr = dstm->m_data;
1638	dlen = dstm->m_len;
1639
1640	while (1) {
1641		for (i = 0; i < min(slen, dlen); i++) {
1642			if (j < hoffset || j >= toffset)
1643				*dptr++ = *sptr++;
1644			slen--;
1645			dlen--;
1646			j++;
1647		}
1648		if (slen == 0) {
1649			srcm = srcm->m_next;
1650			if (srcm == NULL)
1651				return;
1652			sptr = srcm->m_data;
1653			slen = srcm->m_len;
1654		}
1655		if (dlen == 0) {
1656			dstm = dstm->m_next;
1657			if (dstm == NULL)
1658				return;
1659			dptr = dstm->m_data;
1660			dlen = dstm->m_len;
1661		}
1662	}
1663}
1664
1665/*
1666 * feed the key generator, must be called at splimp() or higher.
1667 */
1668static int
1669ubsec_feed2(struct ubsec_softc *sc)
1670{
1671	struct ubsec_q2 *q;
1672
1673	while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) {
1674		if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL)
1675			break;
1676		q = SIMPLEQ_FIRST(&sc->sc_queue2);
1677
1678		ubsec_dma_sync(&q->q_mcr,
1679		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1680		ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE);
1681
1682		WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
1683		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next);
1684		--sc->sc_nqueue2;
1685		SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
1686	}
1687	return (0);
1688}
1689
1690/*
1691 * Callback for handling random numbers
1692 */
1693static void
1694ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q)
1695{
1696	struct cryptkop *krp;
1697	struct ubsec_ctx_keyop *ctx;
1698
1699	ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr;
1700	ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE);
1701
1702	switch (q->q_type) {
1703#ifndef UBSEC_NO_RNG
1704	case UBS_CTXOP_RNGBYPASS: {
1705		struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q;
1706
1707		ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD);
1708		(*sc->sc_harvest)(sc->sc_rndtest,
1709			rng->rng_buf.dma_vaddr,
1710			UBSEC_RNG_BUFSIZ*sizeof (u_int32_t));
1711		rng->rng_used = 0;
1712		callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1713		break;
1714	}
1715#endif
1716	case UBS_CTXOP_MODEXP: {
1717		struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
1718		u_int rlen, clen;
1719
1720		krp = me->me_krp;
1721		rlen = (me->me_modbits + 7) / 8;
1722		clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8;
1723
1724		ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE);
1725		ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE);
1726		ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD);
1727		ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE);
1728
1729		if (clen < rlen)
1730			krp->krp_status = E2BIG;
1731		else {
1732			if (sc->sc_flags & UBS_FLAGS_HWNORM) {
1733				bzero(krp->krp_param[krp->krp_iparams].crp_p,
1734				    (krp->krp_param[krp->krp_iparams].crp_nbits
1735					+ 7) / 8);
1736				bcopy(me->me_C.dma_vaddr,
1737				    krp->krp_param[krp->krp_iparams].crp_p,
1738				    (me->me_modbits + 7) / 8);
1739			} else
1740				ubsec_kshift_l(me->me_shiftbits,
1741				    me->me_C.dma_vaddr, me->me_normbits,
1742				    krp->krp_param[krp->krp_iparams].crp_p,
1743				    krp->krp_param[krp->krp_iparams].crp_nbits);
1744		}
1745
1746		crypto_kdone(krp);
1747
1748		/* bzero all potentially sensitive data */
1749		bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
1750		bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
1751		bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
1752		bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
1753
1754		/* Can't free here, so put us on the free list. */
1755		SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next);
1756		break;
1757	}
1758	case UBS_CTXOP_RSAPRIV: {
1759		struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
1760		u_int len;
1761
1762		krp = rp->rpr_krp;
1763		ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE);
1764		ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD);
1765
1766		len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8;
1767		bcopy(rp->rpr_msgout.dma_vaddr,
1768		    krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len);
1769
1770		crypto_kdone(krp);
1771
1772		bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
1773		bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
1774		bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size);
1775
1776		/* Can't free here, so put us on the free list. */
1777		SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next);
1778		break;
1779	}
1780	default:
1781		device_printf(sc->sc_dev, "unknown ctx op: %x\n",
1782		    letoh16(ctx->ctx_op));
1783		break;
1784	}
1785}
1786
1787#ifndef UBSEC_NO_RNG
1788static void
1789ubsec_rng(void *vsc)
1790{
1791	struct ubsec_softc *sc = vsc;
1792	struct ubsec_q2_rng *rng = &sc->sc_rng;
1793	struct ubsec_mcr *mcr;
1794	struct ubsec_ctx_rngbypass *ctx;
1795
1796	mtx_lock(&sc->sc_mcr2lock);
1797	if (rng->rng_used) {
1798		mtx_unlock(&sc->sc_mcr2lock);
1799		return;
1800	}
1801	sc->sc_nqueue2++;
1802	if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE)
1803		goto out;
1804
1805	mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr;
1806	ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr;
1807
1808	mcr->mcr_pkts = htole16(1);
1809	mcr->mcr_flags = 0;
1810	mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr);
1811	mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0;
1812	mcr->mcr_ipktbuf.pb_len = 0;
1813	mcr->mcr_reserved = mcr->mcr_pktlen = 0;
1814	mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr);
1815	mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) &
1816	    UBS_PKTBUF_LEN);
1817	mcr->mcr_opktbuf.pb_next = 0;
1818
1819	ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass));
1820	ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS);
1821	rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS;
1822
1823	ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD);
1824
1825	SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next);
1826	rng->rng_used = 1;
1827	ubsec_feed2(sc);
1828	ubsecstats.hst_rng++;
1829	mtx_unlock(&sc->sc_mcr2lock);
1830
1831	return;
1832
1833out:
1834	/*
1835	 * Something weird happened, generate our own call back.
1836	 */
1837	sc->sc_nqueue2--;
1838	mtx_unlock(&sc->sc_mcr2lock);
1839	callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1840}
1841#endif /* UBSEC_NO_RNG */
1842
1843static void
1844ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1845{
1846	bus_addr_t *paddr = (bus_addr_t*) arg;
1847	*paddr = segs->ds_addr;
1848}
1849
1850static int
1851ubsec_dma_malloc(
1852	struct ubsec_softc *sc,
1853	bus_size_t size,
1854	struct ubsec_dma_alloc *dma,
1855	int mapflags
1856)
1857{
1858	int r;
1859
1860	/* XXX could specify sc_dmat as parent but that just adds overhead */
1861	r = bus_dma_tag_create(NULL,			/* parent */
1862			       1, 0,			/* alignment, bounds */
1863			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1864			       BUS_SPACE_MAXADDR,	/* highaddr */
1865			       NULL, NULL,		/* filter, filterarg */
1866			       size,			/* maxsize */
1867			       1,			/* nsegments */
1868			       size,			/* maxsegsize */
1869			       BUS_DMA_ALLOCNOW,	/* flags */
1870			       NULL, NULL,		/* lockfunc, lockarg */
1871			       &dma->dma_tag);
1872	if (r != 0) {
1873		device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1874			"bus_dma_tag_create failed; error %u\n", r);
1875		goto fail_0;
1876	}
1877
1878	r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1879	if (r != 0) {
1880		device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1881			"bus_dmamap_create failed; error %u\n", r);
1882		goto fail_1;
1883	}
1884
1885	r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1886			     BUS_DMA_NOWAIT, &dma->dma_map);
1887	if (r != 0) {
1888		device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1889			"bus_dmammem_alloc failed; size %zu, error %u\n",
1890			size, r);
1891		goto fail_2;
1892	}
1893
1894	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1895		            size,
1896			    ubsec_dmamap_cb,
1897			    &dma->dma_paddr,
1898			    mapflags | BUS_DMA_NOWAIT);
1899	if (r != 0) {
1900		device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1901			"bus_dmamap_load failed; error %u\n", r);
1902		goto fail_3;
1903	}
1904
1905	dma->dma_size = size;
1906	return (0);
1907
1908fail_3:
1909	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1910fail_2:
1911	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1912fail_1:
1913	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1914	bus_dma_tag_destroy(dma->dma_tag);
1915fail_0:
1916	dma->dma_map = NULL;
1917	dma->dma_tag = NULL;
1918	return (r);
1919}
1920
1921static void
1922ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
1923{
1924	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1925	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1926	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1927	bus_dma_tag_destroy(dma->dma_tag);
1928}
1929
1930/*
1931 * Resets the board.  Values in the regesters are left as is
1932 * from the reset (i.e. initial values are assigned elsewhere).
1933 */
1934static void
1935ubsec_reset_board(struct ubsec_softc *sc)
1936{
1937    volatile u_int32_t ctrl;
1938
1939    ctrl = READ_REG(sc, BS_CTRL);
1940    ctrl |= BS_CTRL_RESET;
1941    WRITE_REG(sc, BS_CTRL, ctrl);
1942
1943    /*
1944     * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
1945     */
1946    DELAY(10);
1947}
1948
1949/*
1950 * Init Broadcom registers
1951 */
1952static void
1953ubsec_init_board(struct ubsec_softc *sc)
1954{
1955	u_int32_t ctrl;
1956
1957	ctrl = READ_REG(sc, BS_CTRL);
1958	ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
1959	ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT;
1960
1961	if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG))
1962		ctrl |= BS_CTRL_MCR2INT;
1963	else
1964		ctrl &= ~BS_CTRL_MCR2INT;
1965
1966	if (sc->sc_flags & UBS_FLAGS_HWNORM)
1967		ctrl &= ~BS_CTRL_SWNORM;
1968
1969	WRITE_REG(sc, BS_CTRL, ctrl);
1970}
1971
1972/*
1973 * Init Broadcom PCI registers
1974 */
1975static void
1976ubsec_init_pciregs(device_t dev)
1977{
1978#if 0
1979	u_int32_t misc;
1980
1981	misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT);
1982	misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT))
1983	    | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT);
1984	misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT))
1985	    | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT);
1986	pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc);
1987#endif
1988
1989	/*
1990	 * This will set the cache line size to 1, this will
1991	 * force the BCM58xx chip just to do burst read/writes.
1992	 * Cache line read/writes are to slow
1993	 */
1994	pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1);
1995}
1996
1997/*
1998 * Clean up after a chip crash.
1999 * It is assumed that the caller in splimp()
2000 */
2001static void
2002ubsec_cleanchip(struct ubsec_softc *sc)
2003{
2004	struct ubsec_q *q;
2005
2006	while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
2007		q = SIMPLEQ_FIRST(&sc->sc_qchip);
2008		SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
2009		ubsec_free_q(sc, q);
2010	}
2011	sc->sc_nqchip = 0;
2012}
2013
2014/*
2015 * free a ubsec_q
2016 * It is assumed that the caller is within splimp().
2017 */
2018static int
2019ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
2020{
2021	struct ubsec_q *q2;
2022	struct cryptop *crp;
2023	int npkts;
2024	int i;
2025
2026	npkts = q->q_nstacked_mcrs;
2027
2028	for (i = 0; i < npkts; i++) {
2029		if(q->q_stacked_mcr[i]) {
2030			q2 = q->q_stacked_mcr[i];
2031
2032			if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
2033				m_freem(q2->q_dst_m);
2034
2035			crp = (struct cryptop *)q2->q_crp;
2036
2037			SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
2038
2039			crp->crp_etype = EFAULT;
2040			crypto_done(crp);
2041		} else {
2042			break;
2043		}
2044	}
2045
2046	/*
2047	 * Free header MCR
2048	 */
2049	if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
2050		m_freem(q->q_dst_m);
2051
2052	crp = (struct cryptop *)q->q_crp;
2053
2054	SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
2055
2056	crp->crp_etype = EFAULT;
2057	crypto_done(crp);
2058	return(0);
2059}
2060
2061/*
2062 * Routine to reset the chip and clean up.
2063 * It is assumed that the caller is in splimp()
2064 */
2065static void
2066ubsec_totalreset(struct ubsec_softc *sc)
2067{
2068	ubsec_reset_board(sc);
2069	ubsec_init_board(sc);
2070	ubsec_cleanchip(sc);
2071}
2072
2073static int
2074ubsec_dmamap_aligned(struct ubsec_operand *op)
2075{
2076	int i;
2077
2078	for (i = 0; i < op->nsegs; i++) {
2079		if (op->segs[i].ds_addr & 3)
2080			return (0);
2081		if ((i != (op->nsegs - 1)) &&
2082		    (op->segs[i].ds_len & 3))
2083			return (0);
2084	}
2085	return (1);
2086}
2087
2088static void
2089ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q)
2090{
2091	switch (q->q_type) {
2092	case UBS_CTXOP_MODEXP: {
2093		struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
2094
2095		ubsec_dma_free(sc, &me->me_q.q_mcr);
2096		ubsec_dma_free(sc, &me->me_q.q_ctx);
2097		ubsec_dma_free(sc, &me->me_M);
2098		ubsec_dma_free(sc, &me->me_E);
2099		ubsec_dma_free(sc, &me->me_C);
2100		ubsec_dma_free(sc, &me->me_epb);
2101		free(me, M_DEVBUF);
2102		break;
2103	}
2104	case UBS_CTXOP_RSAPRIV: {
2105		struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
2106
2107		ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2108		ubsec_dma_free(sc, &rp->rpr_q.q_ctx);
2109		ubsec_dma_free(sc, &rp->rpr_msgin);
2110		ubsec_dma_free(sc, &rp->rpr_msgout);
2111		free(rp, M_DEVBUF);
2112		break;
2113	}
2114	default:
2115		device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type);
2116		break;
2117	}
2118}
2119
2120static int
2121ubsec_kprocess(device_t dev, struct cryptkop *krp, int hint)
2122{
2123	struct ubsec_softc *sc = device_get_softc(dev);
2124	int r;
2125
2126	if (krp == NULL || krp->krp_callback == NULL)
2127		return (EINVAL);
2128
2129	while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) {
2130		struct ubsec_q2 *q;
2131
2132		q = SIMPLEQ_FIRST(&sc->sc_q2free);
2133		SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next);
2134		ubsec_kfree(sc, q);
2135	}
2136
2137	switch (krp->krp_op) {
2138	case CRK_MOD_EXP:
2139		if (sc->sc_flags & UBS_FLAGS_HWNORM)
2140			r = ubsec_kprocess_modexp_hw(sc, krp, hint);
2141		else
2142			r = ubsec_kprocess_modexp_sw(sc, krp, hint);
2143		break;
2144	case CRK_MOD_EXP_CRT:
2145		return (ubsec_kprocess_rsapriv(sc, krp, hint));
2146	default:
2147		device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n",
2148		    krp->krp_op);
2149		krp->krp_status = EOPNOTSUPP;
2150		crypto_kdone(krp);
2151		return (0);
2152	}
2153	return (0);			/* silence compiler */
2154}
2155
2156/*
2157 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization)
2158 */
2159static int
2160ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2161{
2162	struct ubsec_q2_modexp *me;
2163	struct ubsec_mcr *mcr;
2164	struct ubsec_ctx_modexp *ctx;
2165	struct ubsec_pktbuf *epb;
2166	int err = 0;
2167	u_int nbits, normbits, mbits, shiftbits, ebits;
2168
2169	me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2170	if (me == NULL) {
2171		err = ENOMEM;
2172		goto errout;
2173	}
2174	bzero(me, sizeof *me);
2175	me->me_krp = krp;
2176	me->me_q.q_type = UBS_CTXOP_MODEXP;
2177
2178	nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2179	if (nbits <= 512)
2180		normbits = 512;
2181	else if (nbits <= 768)
2182		normbits = 768;
2183	else if (nbits <= 1024)
2184		normbits = 1024;
2185	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2186		normbits = 1536;
2187	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2188		normbits = 2048;
2189	else {
2190		err = E2BIG;
2191		goto errout;
2192	}
2193
2194	shiftbits = normbits - nbits;
2195
2196	me->me_modbits = nbits;
2197	me->me_shiftbits = shiftbits;
2198	me->me_normbits = normbits;
2199
2200	/* Sanity check: result bits must be >= true modulus bits. */
2201	if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2202		err = ERANGE;
2203		goto errout;
2204	}
2205
2206	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2207	    &me->me_q.q_mcr, 0)) {
2208		err = ENOMEM;
2209		goto errout;
2210	}
2211	mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2212
2213	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2214	    &me->me_q.q_ctx, 0)) {
2215		err = ENOMEM;
2216		goto errout;
2217	}
2218
2219	mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2220	if (mbits > nbits) {
2221		err = E2BIG;
2222		goto errout;
2223	}
2224	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2225		err = ENOMEM;
2226		goto errout;
2227	}
2228	ubsec_kshift_r(shiftbits,
2229	    krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits,
2230	    me->me_M.dma_vaddr, normbits);
2231
2232	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2233		err = ENOMEM;
2234		goto errout;
2235	}
2236	bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2237
2238	ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2239	if (ebits > nbits) {
2240		err = E2BIG;
2241		goto errout;
2242	}
2243	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2244		err = ENOMEM;
2245		goto errout;
2246	}
2247	ubsec_kshift_r(shiftbits,
2248	    krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits,
2249	    me->me_E.dma_vaddr, normbits);
2250
2251	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2252	    &me->me_epb, 0)) {
2253		err = ENOMEM;
2254		goto errout;
2255	}
2256	epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2257	epb->pb_addr = htole32(me->me_E.dma_paddr);
2258	epb->pb_next = 0;
2259	epb->pb_len = htole32(normbits / 8);
2260
2261#ifdef UBSEC_DEBUG
2262	if (ubsec_debug) {
2263		printf("Epb ");
2264		ubsec_dump_pb(epb);
2265	}
2266#endif
2267
2268	mcr->mcr_pkts = htole16(1);
2269	mcr->mcr_flags = 0;
2270	mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2271	mcr->mcr_reserved = 0;
2272	mcr->mcr_pktlen = 0;
2273
2274	mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2275	mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2276	mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2277
2278	mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2279	mcr->mcr_opktbuf.pb_next = 0;
2280	mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2281
2282#ifdef DIAGNOSTIC
2283	/* Misaligned output buffer will hang the chip. */
2284	if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2285		panic("%s: modexp invalid addr 0x%x\n",
2286		    device_get_nameunit(sc->sc_dev),
2287		    letoh32(mcr->mcr_opktbuf.pb_addr));
2288	if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2289		panic("%s: modexp invalid len 0x%x\n",
2290		    device_get_nameunit(sc->sc_dev),
2291		    letoh32(mcr->mcr_opktbuf.pb_len));
2292#endif
2293
2294	ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2295	bzero(ctx, sizeof(*ctx));
2296	ubsec_kshift_r(shiftbits,
2297	    krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits,
2298	    ctx->me_N, normbits);
2299	ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2300	ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2301	ctx->me_E_len = htole16(nbits);
2302	ctx->me_N_len = htole16(nbits);
2303
2304#ifdef UBSEC_DEBUG
2305	if (ubsec_debug) {
2306		ubsec_dump_mcr(mcr);
2307		ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2308	}
2309#endif
2310
2311	/*
2312	 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2313	 * everything else.
2314	 */
2315	ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE);
2316	ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE);
2317	ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD);
2318	ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE);
2319
2320	/* Enqueue and we're done... */
2321	mtx_lock(&sc->sc_mcr2lock);
2322	SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2323	ubsec_feed2(sc);
2324	ubsecstats.hst_modexp++;
2325	mtx_unlock(&sc->sc_mcr2lock);
2326
2327	return (0);
2328
2329errout:
2330	if (me != NULL) {
2331		if (me->me_q.q_mcr.dma_map != NULL)
2332			ubsec_dma_free(sc, &me->me_q.q_mcr);
2333		if (me->me_q.q_ctx.dma_map != NULL) {
2334			bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2335			ubsec_dma_free(sc, &me->me_q.q_ctx);
2336		}
2337		if (me->me_M.dma_map != NULL) {
2338			bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2339			ubsec_dma_free(sc, &me->me_M);
2340		}
2341		if (me->me_E.dma_map != NULL) {
2342			bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2343			ubsec_dma_free(sc, &me->me_E);
2344		}
2345		if (me->me_C.dma_map != NULL) {
2346			bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2347			ubsec_dma_free(sc, &me->me_C);
2348		}
2349		if (me->me_epb.dma_map != NULL)
2350			ubsec_dma_free(sc, &me->me_epb);
2351		free(me, M_DEVBUF);
2352	}
2353	krp->krp_status = err;
2354	crypto_kdone(krp);
2355	return (0);
2356}
2357
2358/*
2359 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization)
2360 */
2361static int
2362ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2363{
2364	struct ubsec_q2_modexp *me;
2365	struct ubsec_mcr *mcr;
2366	struct ubsec_ctx_modexp *ctx;
2367	struct ubsec_pktbuf *epb;
2368	int err = 0;
2369	u_int nbits, normbits, mbits, shiftbits, ebits;
2370
2371	me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2372	if (me == NULL) {
2373		err = ENOMEM;
2374		goto errout;
2375	}
2376	bzero(me, sizeof *me);
2377	me->me_krp = krp;
2378	me->me_q.q_type = UBS_CTXOP_MODEXP;
2379
2380	nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2381	if (nbits <= 512)
2382		normbits = 512;
2383	else if (nbits <= 768)
2384		normbits = 768;
2385	else if (nbits <= 1024)
2386		normbits = 1024;
2387	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2388		normbits = 1536;
2389	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2390		normbits = 2048;
2391	else {
2392		err = E2BIG;
2393		goto errout;
2394	}
2395
2396	shiftbits = normbits - nbits;
2397
2398	/* XXX ??? */
2399	me->me_modbits = nbits;
2400	me->me_shiftbits = shiftbits;
2401	me->me_normbits = normbits;
2402
2403	/* Sanity check: result bits must be >= true modulus bits. */
2404	if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2405		err = ERANGE;
2406		goto errout;
2407	}
2408
2409	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2410	    &me->me_q.q_mcr, 0)) {
2411		err = ENOMEM;
2412		goto errout;
2413	}
2414	mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2415
2416	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2417	    &me->me_q.q_ctx, 0)) {
2418		err = ENOMEM;
2419		goto errout;
2420	}
2421
2422	mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2423	if (mbits > nbits) {
2424		err = E2BIG;
2425		goto errout;
2426	}
2427	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2428		err = ENOMEM;
2429		goto errout;
2430	}
2431	bzero(me->me_M.dma_vaddr, normbits / 8);
2432	bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p,
2433	    me->me_M.dma_vaddr, (mbits + 7) / 8);
2434
2435	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2436		err = ENOMEM;
2437		goto errout;
2438	}
2439	bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2440
2441	ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2442	if (ebits > nbits) {
2443		err = E2BIG;
2444		goto errout;
2445	}
2446	if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2447		err = ENOMEM;
2448		goto errout;
2449	}
2450	bzero(me->me_E.dma_vaddr, normbits / 8);
2451	bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p,
2452	    me->me_E.dma_vaddr, (ebits + 7) / 8);
2453
2454	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2455	    &me->me_epb, 0)) {
2456		err = ENOMEM;
2457		goto errout;
2458	}
2459	epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2460	epb->pb_addr = htole32(me->me_E.dma_paddr);
2461	epb->pb_next = 0;
2462	epb->pb_len = htole32((ebits + 7) / 8);
2463
2464#ifdef UBSEC_DEBUG
2465	if (ubsec_debug) {
2466		printf("Epb ");
2467		ubsec_dump_pb(epb);
2468	}
2469#endif
2470
2471	mcr->mcr_pkts = htole16(1);
2472	mcr->mcr_flags = 0;
2473	mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2474	mcr->mcr_reserved = 0;
2475	mcr->mcr_pktlen = 0;
2476
2477	mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2478	mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2479	mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2480
2481	mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2482	mcr->mcr_opktbuf.pb_next = 0;
2483	mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2484
2485#ifdef DIAGNOSTIC
2486	/* Misaligned output buffer will hang the chip. */
2487	if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2488		panic("%s: modexp invalid addr 0x%x\n",
2489		    device_get_nameunit(sc->sc_dev),
2490		    letoh32(mcr->mcr_opktbuf.pb_addr));
2491	if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2492		panic("%s: modexp invalid len 0x%x\n",
2493		    device_get_nameunit(sc->sc_dev),
2494		    letoh32(mcr->mcr_opktbuf.pb_len));
2495#endif
2496
2497	ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2498	bzero(ctx, sizeof(*ctx));
2499	bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N,
2500	    (nbits + 7) / 8);
2501	ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2502	ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2503	ctx->me_E_len = htole16(ebits);
2504	ctx->me_N_len = htole16(nbits);
2505
2506#ifdef UBSEC_DEBUG
2507	if (ubsec_debug) {
2508		ubsec_dump_mcr(mcr);
2509		ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2510	}
2511#endif
2512
2513	/*
2514	 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2515	 * everything else.
2516	 */
2517	ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE);
2518	ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE);
2519	ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD);
2520	ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE);
2521
2522	/* Enqueue and we're done... */
2523	mtx_lock(&sc->sc_mcr2lock);
2524	SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2525	ubsec_feed2(sc);
2526	mtx_unlock(&sc->sc_mcr2lock);
2527
2528	return (0);
2529
2530errout:
2531	if (me != NULL) {
2532		if (me->me_q.q_mcr.dma_map != NULL)
2533			ubsec_dma_free(sc, &me->me_q.q_mcr);
2534		if (me->me_q.q_ctx.dma_map != NULL) {
2535			bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2536			ubsec_dma_free(sc, &me->me_q.q_ctx);
2537		}
2538		if (me->me_M.dma_map != NULL) {
2539			bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2540			ubsec_dma_free(sc, &me->me_M);
2541		}
2542		if (me->me_E.dma_map != NULL) {
2543			bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2544			ubsec_dma_free(sc, &me->me_E);
2545		}
2546		if (me->me_C.dma_map != NULL) {
2547			bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2548			ubsec_dma_free(sc, &me->me_C);
2549		}
2550		if (me->me_epb.dma_map != NULL)
2551			ubsec_dma_free(sc, &me->me_epb);
2552		free(me, M_DEVBUF);
2553	}
2554	krp->krp_status = err;
2555	crypto_kdone(krp);
2556	return (0);
2557}
2558
2559static int
2560ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2561{
2562	struct ubsec_q2_rsapriv *rp = NULL;
2563	struct ubsec_mcr *mcr;
2564	struct ubsec_ctx_rsapriv *ctx;
2565	int err = 0;
2566	u_int padlen, msglen;
2567
2568	msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]);
2569	padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]);
2570	if (msglen > padlen)
2571		padlen = msglen;
2572
2573	if (padlen <= 256)
2574		padlen = 256;
2575	else if (padlen <= 384)
2576		padlen = 384;
2577	else if (padlen <= 512)
2578		padlen = 512;
2579	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768)
2580		padlen = 768;
2581	else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024)
2582		padlen = 1024;
2583	else {
2584		err = E2BIG;
2585		goto errout;
2586	}
2587
2588	if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) {
2589		err = E2BIG;
2590		goto errout;
2591	}
2592
2593	if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) {
2594		err = E2BIG;
2595		goto errout;
2596	}
2597
2598	if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) {
2599		err = E2BIG;
2600		goto errout;
2601	}
2602
2603	rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT);
2604	if (rp == NULL)
2605		return (ENOMEM);
2606	bzero(rp, sizeof *rp);
2607	rp->rpr_krp = krp;
2608	rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV;
2609
2610	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2611	    &rp->rpr_q.q_mcr, 0)) {
2612		err = ENOMEM;
2613		goto errout;
2614	}
2615	mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr;
2616
2617	if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv),
2618	    &rp->rpr_q.q_ctx, 0)) {
2619		err = ENOMEM;
2620		goto errout;
2621	}
2622	ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr;
2623	bzero(ctx, sizeof *ctx);
2624
2625	/* Copy in p */
2626	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p,
2627	    &ctx->rpr_buf[0 * (padlen / 8)],
2628	    (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8);
2629
2630	/* Copy in q */
2631	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p,
2632	    &ctx->rpr_buf[1 * (padlen / 8)],
2633	    (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8);
2634
2635	/* Copy in dp */
2636	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p,
2637	    &ctx->rpr_buf[2 * (padlen / 8)],
2638	    (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8);
2639
2640	/* Copy in dq */
2641	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p,
2642	    &ctx->rpr_buf[3 * (padlen / 8)],
2643	    (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8);
2644
2645	/* Copy in pinv */
2646	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p,
2647	    &ctx->rpr_buf[4 * (padlen / 8)],
2648	    (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8);
2649
2650	msglen = padlen * 2;
2651
2652	/* Copy in input message (aligned buffer/length). */
2653	if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) {
2654		/* Is this likely? */
2655		err = E2BIG;
2656		goto errout;
2657	}
2658	if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) {
2659		err = ENOMEM;
2660		goto errout;
2661	}
2662	bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8);
2663	bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p,
2664	    rp->rpr_msgin.dma_vaddr,
2665	    (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8);
2666
2667	/* Prepare space for output message (aligned buffer/length). */
2668	if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) {
2669		/* Is this likely? */
2670		err = E2BIG;
2671		goto errout;
2672	}
2673	if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) {
2674		err = ENOMEM;
2675		goto errout;
2676	}
2677	bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8);
2678
2679	mcr->mcr_pkts = htole16(1);
2680	mcr->mcr_flags = 0;
2681	mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr);
2682	mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr);
2683	mcr->mcr_ipktbuf.pb_next = 0;
2684	mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size);
2685	mcr->mcr_reserved = 0;
2686	mcr->mcr_pktlen = htole16(msglen);
2687	mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr);
2688	mcr->mcr_opktbuf.pb_next = 0;
2689	mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size);
2690
2691#ifdef DIAGNOSTIC
2692	if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) {
2693		panic("%s: rsapriv: invalid msgin %x(0x%jx)",
2694		    device_get_nameunit(sc->sc_dev),
2695		    rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size);
2696	}
2697	if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) {
2698		panic("%s: rsapriv: invalid msgout %x(0x%jx)",
2699		    device_get_nameunit(sc->sc_dev),
2700		    rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size);
2701	}
2702#endif
2703
2704	ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8));
2705	ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV);
2706	ctx->rpr_q_len = htole16(padlen);
2707	ctx->rpr_p_len = htole16(padlen);
2708
2709	/*
2710	 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2711	 * everything else.
2712	 */
2713	ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE);
2714	ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD);
2715
2716	/* Enqueue and we're done... */
2717	mtx_lock(&sc->sc_mcr2lock);
2718	SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next);
2719	ubsec_feed2(sc);
2720	ubsecstats.hst_modexpcrt++;
2721	mtx_unlock(&sc->sc_mcr2lock);
2722	return (0);
2723
2724errout:
2725	if (rp != NULL) {
2726		if (rp->rpr_q.q_mcr.dma_map != NULL)
2727			ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2728		if (rp->rpr_msgin.dma_map != NULL) {
2729			bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
2730			ubsec_dma_free(sc, &rp->rpr_msgin);
2731		}
2732		if (rp->rpr_msgout.dma_map != NULL) {
2733			bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
2734			ubsec_dma_free(sc, &rp->rpr_msgout);
2735		}
2736		free(rp, M_DEVBUF);
2737	}
2738	krp->krp_status = err;
2739	crypto_kdone(krp);
2740	return (0);
2741}
2742
2743#ifdef UBSEC_DEBUG
2744static void
2745ubsec_dump_pb(volatile struct ubsec_pktbuf *pb)
2746{
2747	printf("addr 0x%x (0x%x) next 0x%x\n",
2748	    pb->pb_addr, pb->pb_len, pb->pb_next);
2749}
2750
2751static void
2752ubsec_dump_ctx2(struct ubsec_ctx_keyop *c)
2753{
2754	printf("CTX (0x%x):\n", c->ctx_len);
2755	switch (letoh16(c->ctx_op)) {
2756	case UBS_CTXOP_RNGBYPASS:
2757	case UBS_CTXOP_RNGSHA1:
2758		break;
2759	case UBS_CTXOP_MODEXP:
2760	{
2761		struct ubsec_ctx_modexp *cx = (void *)c;
2762		int i, len;
2763
2764		printf(" Elen %u, Nlen %u\n",
2765		    letoh16(cx->me_E_len), letoh16(cx->me_N_len));
2766		len = (cx->me_N_len + 7)/8;
2767		for (i = 0; i < len; i++)
2768			printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]);
2769		printf("\n");
2770		break;
2771	}
2772	default:
2773		printf("unknown context: %x\n", c->ctx_op);
2774	}
2775	printf("END CTX\n");
2776}
2777
2778static void
2779ubsec_dump_mcr(struct ubsec_mcr *mcr)
2780{
2781	volatile struct ubsec_mcr_add *ma;
2782	int i;
2783
2784	printf("MCR:\n");
2785	printf(" pkts: %u, flags 0x%x\n",
2786	    letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
2787	ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
2788	for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
2789		printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
2790		    letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
2791		    letoh16(ma->mcr_reserved));
2792		printf(" %d: ipkt ", i);
2793		ubsec_dump_pb(&ma->mcr_ipktbuf);
2794		printf(" %d: opkt ", i);
2795		ubsec_dump_pb(&ma->mcr_opktbuf);
2796		ma++;
2797	}
2798	printf("END MCR\n");
2799}
2800#endif /* UBSEC_DEBUG */
2801
2802/*
2803 * Return the number of significant bits of a big number.
2804 */
2805static int
2806ubsec_ksigbits(struct crparam *cr)
2807{
2808	u_int plen = (cr->crp_nbits + 7) / 8;
2809	int i, sig = plen * 8;
2810	u_int8_t c, *p = cr->crp_p;
2811
2812	for (i = plen - 1; i >= 0; i--) {
2813		c = p[i];
2814		if (c != 0) {
2815			while ((c & 0x80) == 0) {
2816				sig--;
2817				c <<= 1;
2818			}
2819			break;
2820		}
2821		sig -= 8;
2822	}
2823	return (sig);
2824}
2825
2826static void
2827ubsec_kshift_r(
2828	u_int shiftbits,
2829	u_int8_t *src, u_int srcbits,
2830	u_int8_t *dst, u_int dstbits)
2831{
2832	u_int slen, dlen;
2833	int i, si, di, n;
2834
2835	slen = (srcbits + 7) / 8;
2836	dlen = (dstbits + 7) / 8;
2837
2838	for (i = 0; i < slen; i++)
2839		dst[i] = src[i];
2840	for (i = 0; i < dlen - slen; i++)
2841		dst[slen + i] = 0;
2842
2843	n = shiftbits / 8;
2844	if (n != 0) {
2845		si = dlen - n - 1;
2846		di = dlen - 1;
2847		while (si >= 0)
2848			dst[di--] = dst[si--];
2849		while (di >= 0)
2850			dst[di--] = 0;
2851	}
2852
2853	n = shiftbits % 8;
2854	if (n != 0) {
2855		for (i = dlen - 1; i > 0; i--)
2856			dst[i] = (dst[i] << n) |
2857			    (dst[i - 1] >> (8 - n));
2858		dst[0] = dst[0] << n;
2859	}
2860}
2861
2862static void
2863ubsec_kshift_l(
2864	u_int shiftbits,
2865	u_int8_t *src, u_int srcbits,
2866	u_int8_t *dst, u_int dstbits)
2867{
2868	int slen, dlen, i, n;
2869
2870	slen = (srcbits + 7) / 8;
2871	dlen = (dstbits + 7) / 8;
2872
2873	n = shiftbits / 8;
2874	for (i = 0; i < slen; i++)
2875		dst[i] = src[i + n];
2876	for (i = 0; i < dlen - slen; i++)
2877		dst[slen + i] = 0;
2878
2879	n = shiftbits % 8;
2880	if (n != 0) {
2881		for (i = 0; i < (dlen - 1); i++)
2882			dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n));
2883		dst[dlen - 1] = dst[dlen - 1] >> n;
2884	}
2885}
2886