hifn7751.c revision 216519
1/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
2
3/*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 *			http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested:  Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 *   notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *   notice, this list of conditions and the following disclaimer in the
23 *   documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 *   derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/dev/hifn/hifn7751.c 216519 2010-12-18 14:24:24Z tijl $");
45
46/*
47 * Driver for various Hifn encryption processors.
48 */
49#include "opt_hifn.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/proc.h>
54#include <sys/errno.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/module.h>
58#include <sys/mbuf.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/sysctl.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <opencrypto/cryptodev.h>
72#include <sys/random.h>
73#include <sys/kobj.h>
74
75#include "cryptodev_if.h"
76
77#include <dev/pci/pcivar.h>
78#include <dev/pci/pcireg.h>
79
80#ifdef HIFN_RNDTEST
81#include <dev/rndtest/rndtest.h>
82#endif
83#include <dev/hifn/hifn7751reg.h>
84#include <dev/hifn/hifn7751var.h>
85
86#ifdef HIFN_VULCANDEV
87#include <sys/conf.h>
88#include <sys/uio.h>
89
90static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
91#endif
92
93/*
94 * Prototypes and count for the pci_device structure
95 */
96static	int hifn_probe(device_t);
97static	int hifn_attach(device_t);
98static	int hifn_detach(device_t);
99static	int hifn_suspend(device_t);
100static	int hifn_resume(device_t);
101static	int hifn_shutdown(device_t);
102
103static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
104static	int hifn_freesession(device_t, u_int64_t);
105static	int hifn_process(device_t, struct cryptop *, int);
106
107static device_method_t hifn_methods[] = {
108	/* Device interface */
109	DEVMETHOD(device_probe,		hifn_probe),
110	DEVMETHOD(device_attach,	hifn_attach),
111	DEVMETHOD(device_detach,	hifn_detach),
112	DEVMETHOD(device_suspend,	hifn_suspend),
113	DEVMETHOD(device_resume,	hifn_resume),
114	DEVMETHOD(device_shutdown,	hifn_shutdown),
115
116	/* bus interface */
117	DEVMETHOD(bus_print_child,	bus_generic_print_child),
118	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
119
120	/* crypto device methods */
121	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
122	DEVMETHOD(cryptodev_freesession,hifn_freesession),
123	DEVMETHOD(cryptodev_process,	hifn_process),
124
125	{ 0, 0 }
126};
127static driver_t hifn_driver = {
128	"hifn",
129	hifn_methods,
130	sizeof (struct hifn_softc)
131};
132static devclass_t hifn_devclass;
133
134DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
135MODULE_DEPEND(hifn, crypto, 1, 1, 1);
136#ifdef HIFN_RNDTEST
137MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
138#endif
139
140static	void hifn_reset_board(struct hifn_softc *, int);
141static	void hifn_reset_puc(struct hifn_softc *);
142static	void hifn_puc_wait(struct hifn_softc *);
143static	int hifn_enable_crypto(struct hifn_softc *);
144static	void hifn_set_retry(struct hifn_softc *sc);
145static	void hifn_init_dma(struct hifn_softc *);
146static	void hifn_init_pci_registers(struct hifn_softc *);
147static	int hifn_sramsize(struct hifn_softc *);
148static	int hifn_dramsize(struct hifn_softc *);
149static	int hifn_ramtype(struct hifn_softc *);
150static	void hifn_sessions(struct hifn_softc *);
151static	void hifn_intr(void *);
152static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
153static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
154static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
155static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
156static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
157static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
158static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
159static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
160static	int hifn_init_pubrng(struct hifn_softc *);
161static	void hifn_rng(void *);
162static	void hifn_tick(void *);
163static	void hifn_abort(struct hifn_softc *);
164static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
165
166static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
167static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
168
169static __inline u_int32_t
170READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
171{
172    u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
173    sc->sc_bar0_lastreg = (bus_size_t) -1;
174    return (v);
175}
176#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
177
178static __inline u_int32_t
179READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
180{
181    u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
182    sc->sc_bar1_lastreg = (bus_size_t) -1;
183    return (v);
184}
185#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
186
187SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
188
189#ifdef HIFN_DEBUG
190static	int hifn_debug = 0;
191SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
192	    0, "control debugging msgs");
193#endif
194
195static	struct hifn_stats hifnstats;
196SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
197	    hifn_stats, "driver statistics");
198static	int hifn_maxbatch = 1;
199SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
200	    0, "max ops to batch w/o interrupt");
201
202/*
203 * Probe for a supported device.  The PCI vendor and device
204 * IDs are used to detect devices we know how to handle.
205 */
206static int
207hifn_probe(device_t dev)
208{
209	if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
210	    pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
211		return (BUS_PROBE_DEFAULT);
212	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
213	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
214	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
215	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
216	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
217	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
218		return (BUS_PROBE_DEFAULT);
219	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
220	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
221		return (BUS_PROBE_DEFAULT);
222	return (ENXIO);
223}
224
225static void
226hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
227{
228	bus_addr_t *paddr = (bus_addr_t*) arg;
229	*paddr = segs->ds_addr;
230}
231
232static const char*
233hifn_partname(struct hifn_softc *sc)
234{
235	/* XXX sprintf numbers when not decoded */
236	switch (pci_get_vendor(sc->sc_dev)) {
237	case PCI_VENDOR_HIFN:
238		switch (pci_get_device(sc->sc_dev)) {
239		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
240		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
241		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
242		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
243		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
244		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
245		}
246		return "Hifn unknown-part";
247	case PCI_VENDOR_INVERTEX:
248		switch (pci_get_device(sc->sc_dev)) {
249		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
250		}
251		return "Invertex unknown-part";
252	case PCI_VENDOR_NETSEC:
253		switch (pci_get_device(sc->sc_dev)) {
254		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
255		}
256		return "NetSec unknown-part";
257	}
258	return "Unknown-vendor unknown-part";
259}
260
261static void
262default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
263{
264	random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
265}
266
267static u_int
268checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
269{
270	if (v > max) {
271		device_printf(dev, "Warning, %s %u out of range, "
272			"using max %u\n", what, v, max);
273		v = max;
274	} else if (v < min) {
275		device_printf(dev, "Warning, %s %u out of range, "
276			"using min %u\n", what, v, min);
277		v = min;
278	}
279	return v;
280}
281
282/*
283 * Select PLL configuration for 795x parts.  This is complicated in
284 * that we cannot determine the optimal parameters without user input.
285 * The reference clock is derived from an external clock through a
286 * multiplier.  The external clock is either the host bus (i.e. PCI)
287 * or an external clock generator.  When using the PCI bus we assume
288 * the clock is either 33 or 66 MHz; for an external source we cannot
289 * tell the speed.
290 *
291 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
292 * for an external source, followed by the frequency.  We calculate
293 * the appropriate multiplier and PLL register contents accordingly.
294 * When no configuration is given we default to "pci66" since that
295 * always will allow the card to work.  If a card is using the PCI
296 * bus clock and in a 33MHz slot then it will be operating at half
297 * speed until the correct information is provided.
298 *
299 * We use a default setting of "ext66" because according to Mike Ham
300 * of HiFn, almost every board in existence has an external crystal
301 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
302 * because PCI33 can have clocks from 0 to 33Mhz, and some have
303 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
304 */
305static void
306hifn_getpllconfig(device_t dev, u_int *pll)
307{
308	const char *pllspec;
309	u_int freq, mul, fl, fh;
310	u_int32_t pllconfig;
311	char *nxt;
312
313	if (resource_string_value("hifn", device_get_unit(dev),
314	    "pllconfig", &pllspec))
315		pllspec = "ext66";
316	fl = 33, fh = 66;
317	pllconfig = 0;
318	if (strncmp(pllspec, "ext", 3) == 0) {
319		pllspec += 3;
320		pllconfig |= HIFN_PLL_REF_SEL;
321		switch (pci_get_device(dev)) {
322		case PCI_PRODUCT_HIFN_7955:
323		case PCI_PRODUCT_HIFN_7956:
324			fl = 20, fh = 100;
325			break;
326#ifdef notyet
327		case PCI_PRODUCT_HIFN_7954:
328			fl = 20, fh = 66;
329			break;
330#endif
331		}
332	} else if (strncmp(pllspec, "pci", 3) == 0)
333		pllspec += 3;
334	freq = strtoul(pllspec, &nxt, 10);
335	if (nxt == pllspec)
336		freq = 66;
337	else
338		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
339	/*
340	 * Calculate multiplier.  We target a Fck of 266 MHz,
341	 * allowing only even values, possibly rounded down.
342	 * Multipliers > 8 must set the charge pump current.
343	 */
344	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
345	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
346	if (mul > 8)
347		pllconfig |= HIFN_PLL_IS;
348	*pll = pllconfig;
349}
350
351/*
352 * Attach an interface that successfully probed.
353 */
354static int
355hifn_attach(device_t dev)
356{
357	struct hifn_softc *sc = device_get_softc(dev);
358	caddr_t kva;
359	int rseg, rid;
360	char rbase;
361	u_int16_t ena, rev;
362
363	sc->sc_dev = dev;
364
365	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
366
367	/* XXX handle power management */
368
369	/*
370	 * The 7951 and 795x have a random number generator and
371	 * public key support; note this.
372	 */
373	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
374	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
375	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
376	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
377		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
378	/*
379	 * The 7811 has a random number generator and
380	 * we also note it's identity 'cuz of some quirks.
381	 */
382	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
383	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
384		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
385
386	/*
387	 * The 795x parts support AES.
388	 */
389	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
390	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
391	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
392		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
393		/*
394		 * Select PLL configuration.  This depends on the
395		 * bus and board design and must be manually configured
396		 * if the default setting is unacceptable.
397		 */
398		hifn_getpllconfig(dev, &sc->sc_pllconfig);
399	}
400
401	/*
402	 * Setup PCI resources. Note that we record the bus
403	 * tag and handle for each register mapping, this is
404	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
405	 * and WRITE_REG_1 macros throughout the driver.
406	 */
407	pci_enable_busmaster(dev);
408
409	rid = HIFN_BAR0;
410	sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
411			 			RF_ACTIVE);
412	if (sc->sc_bar0res == NULL) {
413		device_printf(dev, "cannot map bar%d register space\n", 0);
414		goto fail_pci;
415	}
416	sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
417	sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
418	sc->sc_bar0_lastreg = (bus_size_t) -1;
419
420	rid = HIFN_BAR1;
421	sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
422						RF_ACTIVE);
423	if (sc->sc_bar1res == NULL) {
424		device_printf(dev, "cannot map bar%d register space\n", 1);
425		goto fail_io0;
426	}
427	sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
428	sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
429	sc->sc_bar1_lastreg = (bus_size_t) -1;
430
431	hifn_set_retry(sc);
432
433	/*
434	 * Setup the area where the Hifn DMA's descriptors
435	 * and associated data structures.
436	 */
437	if (bus_dma_tag_create(NULL,			/* parent */
438			       1, 0,			/* alignment,boundary */
439			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
440			       BUS_SPACE_MAXADDR,	/* highaddr */
441			       NULL, NULL,		/* filter, filterarg */
442			       HIFN_MAX_DMALEN,		/* maxsize */
443			       MAX_SCATTER,		/* nsegments */
444			       HIFN_MAX_SEGLEN,		/* maxsegsize */
445			       BUS_DMA_ALLOCNOW,	/* flags */
446			       NULL,			/* lockfunc */
447			       NULL,			/* lockarg */
448			       &sc->sc_dmat)) {
449		device_printf(dev, "cannot allocate DMA tag\n");
450		goto fail_io1;
451	}
452	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
453		device_printf(dev, "cannot create dma map\n");
454		bus_dma_tag_destroy(sc->sc_dmat);
455		goto fail_io1;
456	}
457	if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
458		device_printf(dev, "cannot alloc dma buffer\n");
459		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
460		bus_dma_tag_destroy(sc->sc_dmat);
461		goto fail_io1;
462	}
463	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
464			     sizeof (*sc->sc_dma),
465			     hifn_dmamap_cb, &sc->sc_dma_physaddr,
466			     BUS_DMA_NOWAIT)) {
467		device_printf(dev, "cannot load dma map\n");
468		bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
469		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
470		bus_dma_tag_destroy(sc->sc_dmat);
471		goto fail_io1;
472	}
473	sc->sc_dma = (struct hifn_dma *)kva;
474	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
475
476	KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
477	KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
478	KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
479	KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
480
481	/*
482	 * Reset the board and do the ``secret handshake''
483	 * to enable the crypto support.  Then complete the
484	 * initialization procedure by setting up the interrupt
485	 * and hooking in to the system crypto support so we'll
486	 * get used for system services like the crypto device,
487	 * IPsec, RNG device, etc.
488	 */
489	hifn_reset_board(sc, 0);
490
491	if (hifn_enable_crypto(sc) != 0) {
492		device_printf(dev, "crypto enabling failed\n");
493		goto fail_mem;
494	}
495	hifn_reset_puc(sc);
496
497	hifn_init_dma(sc);
498	hifn_init_pci_registers(sc);
499
500	/* XXX can't dynamically determine ram type for 795x; force dram */
501	if (sc->sc_flags & HIFN_IS_7956)
502		sc->sc_drammodel = 1;
503	else if (hifn_ramtype(sc))
504		goto fail_mem;
505
506	if (sc->sc_drammodel == 0)
507		hifn_sramsize(sc);
508	else
509		hifn_dramsize(sc);
510
511	/*
512	 * Workaround for NetSec 7751 rev A: half ram size because two
513	 * of the address lines were left floating
514	 */
515	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
516	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
517	    pci_get_revid(dev) == 0x61)	/*XXX???*/
518		sc->sc_ramsize >>= 1;
519
520	/*
521	 * Arrange the interrupt line.
522	 */
523	rid = 0;
524	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
525					    RF_SHAREABLE|RF_ACTIVE);
526	if (sc->sc_irq == NULL) {
527		device_printf(dev, "could not map interrupt\n");
528		goto fail_mem;
529	}
530	/*
531	 * NB: Network code assumes we are blocked with splimp()
532	 *     so make sure the IRQ is marked appropriately.
533	 */
534	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
535			   NULL, hifn_intr, sc, &sc->sc_intrhand)) {
536		device_printf(dev, "could not setup interrupt\n");
537		goto fail_intr2;
538	}
539
540	hifn_sessions(sc);
541
542	/*
543	 * NB: Keep only the low 16 bits; this masks the chip id
544	 *     from the 7951.
545	 */
546	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
547
548	rseg = sc->sc_ramsize / 1024;
549	rbase = 'K';
550	if (sc->sc_ramsize >= (1024 * 1024)) {
551		rbase = 'M';
552		rseg /= 1024;
553	}
554	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
555		hifn_partname(sc), rev,
556		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
557	if (sc->sc_flags & HIFN_IS_7956)
558		printf(", pll=0x%x<%s clk, %ux mult>",
559			sc->sc_pllconfig,
560			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
561			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
562	printf("\n");
563
564	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
565	if (sc->sc_cid < 0) {
566		device_printf(dev, "could not get crypto driver id\n");
567		goto fail_intr;
568	}
569
570	WRITE_REG_0(sc, HIFN_0_PUCNFG,
571	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
572	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
573
574	switch (ena) {
575	case HIFN_PUSTAT_ENA_2:
576		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
577		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
578		if (sc->sc_flags & HIFN_HAS_AES)
579			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
580		/*FALLTHROUGH*/
581	case HIFN_PUSTAT_ENA_1:
582		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
583		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
584		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
585		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
586		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
587		break;
588	}
589
590	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
591	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
592
593	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
594		hifn_init_pubrng(sc);
595
596	callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
597	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
598
599	return (0);
600
601fail_intr:
602	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
603fail_intr2:
604	/* XXX don't store rid */
605	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
606fail_mem:
607	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
608	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
609	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
610	bus_dma_tag_destroy(sc->sc_dmat);
611
612	/* Turn off DMA polling */
613	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
614	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
615fail_io1:
616	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
617fail_io0:
618	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
619fail_pci:
620	mtx_destroy(&sc->sc_mtx);
621	return (ENXIO);
622}
623
624/*
625 * Detach an interface that successfully probed.
626 */
627static int
628hifn_detach(device_t dev)
629{
630	struct hifn_softc *sc = device_get_softc(dev);
631
632	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
633
634	/* disable interrupts */
635	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
636
637	/*XXX other resources */
638	callout_stop(&sc->sc_tickto);
639	callout_stop(&sc->sc_rngto);
640#ifdef HIFN_RNDTEST
641	if (sc->sc_rndtest)
642		rndtest_detach(sc->sc_rndtest);
643#endif
644
645	/* Turn off DMA polling */
646	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
647	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
648
649	crypto_unregister_all(sc->sc_cid);
650
651	bus_generic_detach(dev);	/*XXX should be no children, right? */
652
653	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
654	/* XXX don't store rid */
655	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
656
657	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
658	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
659	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
660	bus_dma_tag_destroy(sc->sc_dmat);
661
662	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
663	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
664
665	mtx_destroy(&sc->sc_mtx);
666
667	return (0);
668}
669
670/*
671 * Stop all chip I/O so that the kernel's probe routines don't
672 * get confused by errant DMAs when rebooting.
673 */
674static int
675hifn_shutdown(device_t dev)
676{
677#ifdef notyet
678	hifn_stop(device_get_softc(dev));
679#endif
680	return (0);
681}
682
683/*
684 * Device suspend routine.  Stop the interface and save some PCI
685 * settings in case the BIOS doesn't restore them properly on
686 * resume.
687 */
688static int
689hifn_suspend(device_t dev)
690{
691	struct hifn_softc *sc = device_get_softc(dev);
692#ifdef notyet
693	hifn_stop(sc);
694#endif
695	sc->sc_suspended = 1;
696
697	return (0);
698}
699
700/*
701 * Device resume routine.  Restore some PCI settings in case the BIOS
702 * doesn't, re-enable busmastering, and restart the interface if
703 * appropriate.
704 */
705static int
706hifn_resume(device_t dev)
707{
708	struct hifn_softc *sc = device_get_softc(dev);
709#ifdef notyet
710        /* reinitialize interface if necessary */
711        if (ifp->if_flags & IFF_UP)
712                rl_init(sc);
713#endif
714	sc->sc_suspended = 0;
715
716	return (0);
717}
718
719static int
720hifn_init_pubrng(struct hifn_softc *sc)
721{
722	u_int32_t r;
723	int i;
724
725#ifdef HIFN_RNDTEST
726	sc->sc_rndtest = rndtest_attach(sc->sc_dev);
727	if (sc->sc_rndtest)
728		sc->sc_harvest = rndtest_harvest;
729	else
730		sc->sc_harvest = default_harvest;
731#else
732	sc->sc_harvest = default_harvest;
733#endif
734	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
735		/* Reset 7951 public key/rng engine */
736		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
737		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
738
739		for (i = 0; i < 100; i++) {
740			DELAY(1000);
741			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
742			    HIFN_PUBRST_RESET) == 0)
743				break;
744		}
745
746		if (i == 100) {
747			device_printf(sc->sc_dev, "public key init failed\n");
748			return (1);
749		}
750	}
751
752	/* Enable the rng, if available */
753	if (sc->sc_flags & HIFN_HAS_RNG) {
754		if (sc->sc_flags & HIFN_IS_7811) {
755			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
756			if (r & HIFN_7811_RNGENA_ENA) {
757				r &= ~HIFN_7811_RNGENA_ENA;
758				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
759			}
760			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
761			    HIFN_7811_RNGCFG_DEFL);
762			r |= HIFN_7811_RNGENA_ENA;
763			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
764		} else
765			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
766			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
767			    HIFN_RNGCFG_ENA);
768
769		sc->sc_rngfirst = 1;
770		if (hz >= 100)
771			sc->sc_rnghz = hz / 100;
772		else
773			sc->sc_rnghz = 1;
774		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
775		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
776	}
777
778	/* Enable public key engine, if available */
779	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
780		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
781		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
782		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
783#ifdef HIFN_VULCANDEV
784		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
785					UID_ROOT, GID_WHEEL, 0666,
786					"vulcanpk");
787		sc->sc_pkdev->si_drv1 = sc;
788#endif
789	}
790
791	return (0);
792}
793
794static void
795hifn_rng(void *vsc)
796{
797#define	RANDOM_BITS(n)	(n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
798	struct hifn_softc *sc = vsc;
799	u_int32_t sts, num[2];
800	int i;
801
802	if (sc->sc_flags & HIFN_IS_7811) {
803		/* ONLY VALID ON 7811!!!! */
804		for (i = 0; i < 5; i++) {
805			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
806			if (sts & HIFN_7811_RNGSTS_UFL) {
807				device_printf(sc->sc_dev,
808					      "RNG underflow: disabling\n");
809				return;
810			}
811			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
812				break;
813
814			/*
815			 * There are at least two words in the RNG FIFO
816			 * at this point.
817			 */
818			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
819			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
820			/* NB: discard first data read */
821			if (sc->sc_rngfirst)
822				sc->sc_rngfirst = 0;
823			else
824				(*sc->sc_harvest)(sc->sc_rndtest,
825					num, sizeof (num));
826		}
827	} else {
828		num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
829
830		/* NB: discard first data read */
831		if (sc->sc_rngfirst)
832			sc->sc_rngfirst = 0;
833		else
834			(*sc->sc_harvest)(sc->sc_rndtest,
835				num, sizeof (num[0]));
836	}
837
838	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
839#undef RANDOM_BITS
840}
841
842static void
843hifn_puc_wait(struct hifn_softc *sc)
844{
845	int i;
846	int reg = HIFN_0_PUCTRL;
847
848	if (sc->sc_flags & HIFN_IS_7956) {
849		reg = HIFN_0_PUCTRL2;
850	}
851
852	for (i = 5000; i > 0; i--) {
853		DELAY(1);
854		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
855			break;
856	}
857	if (!i)
858		device_printf(sc->sc_dev, "proc unit did not reset\n");
859}
860
861/*
862 * Reset the processing unit.
863 */
864static void
865hifn_reset_puc(struct hifn_softc *sc)
866{
867	/* Reset processing unit */
868	int reg = HIFN_0_PUCTRL;
869
870	if (sc->sc_flags & HIFN_IS_7956) {
871		reg = HIFN_0_PUCTRL2;
872	}
873	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
874
875	hifn_puc_wait(sc);
876}
877
878/*
879 * Set the Retry and TRDY registers; note that we set them to
880 * zero because the 7811 locks up when forced to retry (section
881 * 3.6 of "Specification Update SU-0014-04".  Not clear if we
882 * should do this for all Hifn parts, but it doesn't seem to hurt.
883 */
884static void
885hifn_set_retry(struct hifn_softc *sc)
886{
887	/* NB: RETRY only responds to 8-bit reads/writes */
888	pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
889	pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
890}
891
892/*
893 * Resets the board.  Values in the regesters are left as is
894 * from the reset (i.e. initial values are assigned elsewhere).
895 */
896static void
897hifn_reset_board(struct hifn_softc *sc, int full)
898{
899	u_int32_t reg;
900
901	/*
902	 * Set polling in the DMA configuration register to zero.  0x7 avoids
903	 * resetting the board and zeros out the other fields.
904	 */
905	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
906	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
907
908	/*
909	 * Now that polling has been disabled, we have to wait 1 ms
910	 * before resetting the board.
911	 */
912	DELAY(1000);
913
914	/* Reset the DMA unit */
915	if (full) {
916		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
917		DELAY(1000);
918	} else {
919		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
920		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
921		hifn_reset_puc(sc);
922	}
923
924	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
925	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
926
927	/* Bring dma unit out of reset */
928	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
929	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
930
931	hifn_puc_wait(sc);
932	hifn_set_retry(sc);
933
934	if (sc->sc_flags & HIFN_IS_7811) {
935		for (reg = 0; reg < 1000; reg++) {
936			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
937			    HIFN_MIPSRST_CRAMINIT)
938				break;
939			DELAY(1000);
940		}
941		if (reg == 1000)
942			printf(": cram init timeout\n");
943	} else {
944	  /* set up DMA configuration register #2 */
945	  /* turn off all PK and BAR0 swaps */
946	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
947		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
948		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
949		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
950		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
951	}
952
953}
954
955static u_int32_t
956hifn_next_signature(u_int32_t a, u_int cnt)
957{
958	int i;
959	u_int32_t v;
960
961	for (i = 0; i < cnt; i++) {
962
963		/* get the parity */
964		v = a & 0x80080125;
965		v ^= v >> 16;
966		v ^= v >> 8;
967		v ^= v >> 4;
968		v ^= v >> 2;
969		v ^= v >> 1;
970
971		a = (v & 1) ^ (a << 1);
972	}
973
974	return a;
975}
976
977struct pci2id {
978	u_short		pci_vendor;
979	u_short		pci_prod;
980	char		card_id[13];
981};
982static struct pci2id pci2id[] = {
983	{
984		PCI_VENDOR_HIFN,
985		PCI_PRODUCT_HIFN_7951,
986		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
987		  0x00, 0x00, 0x00, 0x00, 0x00 }
988	}, {
989		PCI_VENDOR_HIFN,
990		PCI_PRODUCT_HIFN_7955,
991		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
992		  0x00, 0x00, 0x00, 0x00, 0x00 }
993	}, {
994		PCI_VENDOR_HIFN,
995		PCI_PRODUCT_HIFN_7956,
996		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997		  0x00, 0x00, 0x00, 0x00, 0x00 }
998	}, {
999		PCI_VENDOR_NETSEC,
1000		PCI_PRODUCT_NETSEC_7751,
1001		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1002		  0x00, 0x00, 0x00, 0x00, 0x00 }
1003	}, {
1004		PCI_VENDOR_INVERTEX,
1005		PCI_PRODUCT_INVERTEX_AEON,
1006		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1007		  0x00, 0x00, 0x00, 0x00, 0x00 }
1008	}, {
1009		PCI_VENDOR_HIFN,
1010		PCI_PRODUCT_HIFN_7811,
1011		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1012		  0x00, 0x00, 0x00, 0x00, 0x00 }
1013	}, {
1014		/*
1015		 * Other vendors share this PCI ID as well, such as
1016		 * http://www.powercrypt.com, and obviously they also
1017		 * use the same key.
1018		 */
1019		PCI_VENDOR_HIFN,
1020		PCI_PRODUCT_HIFN_7751,
1021		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1022		  0x00, 0x00, 0x00, 0x00, 0x00 }
1023	},
1024};
1025
1026/*
1027 * Checks to see if crypto is already enabled.  If crypto isn't enable,
1028 * "hifn_enable_crypto" is called to enable it.  The check is important,
1029 * as enabling crypto twice will lock the board.
1030 */
1031static int
1032hifn_enable_crypto(struct hifn_softc *sc)
1033{
1034	u_int32_t dmacfg, ramcfg, encl, addr, i;
1035	char *offtbl = NULL;
1036
1037	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
1038		if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1039		    pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1040			offtbl = pci2id[i].card_id;
1041			break;
1042		}
1043	}
1044	if (offtbl == NULL) {
1045		device_printf(sc->sc_dev, "Unknown card!\n");
1046		return (1);
1047	}
1048
1049	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1050	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1051
1052	/*
1053	 * The RAM config register's encrypt level bit needs to be set before
1054	 * every read performed on the encryption level register.
1055	 */
1056	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1057
1058	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1059
1060	/*
1061	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
1062	 * next reboot.
1063	 */
1064	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1065#ifdef HIFN_DEBUG
1066		if (hifn_debug)
1067			device_printf(sc->sc_dev,
1068			    "Strong crypto already enabled!\n");
1069#endif
1070		goto report;
1071	}
1072
1073	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1074#ifdef HIFN_DEBUG
1075		if (hifn_debug)
1076			device_printf(sc->sc_dev,
1077			      "Unknown encryption level 0x%x\n", encl);
1078#endif
1079		return 1;
1080	}
1081
1082	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1083	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1084	DELAY(1000);
1085	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1086	DELAY(1000);
1087	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1088	DELAY(1000);
1089
1090	for (i = 0; i <= 12; i++) {
1091		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1092		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1093
1094		DELAY(1000);
1095	}
1096
1097	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1098	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1099
1100#ifdef HIFN_DEBUG
1101	if (hifn_debug) {
1102		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1103			device_printf(sc->sc_dev, "Engine is permanently "
1104				"locked until next system reset!\n");
1105		else
1106			device_printf(sc->sc_dev, "Engine enabled "
1107				"successfully!\n");
1108	}
1109#endif
1110
1111report:
1112	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1113	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1114
1115	switch (encl) {
1116	case HIFN_PUSTAT_ENA_1:
1117	case HIFN_PUSTAT_ENA_2:
1118		break;
1119	case HIFN_PUSTAT_ENA_0:
1120	default:
1121		device_printf(sc->sc_dev, "disabled");
1122		break;
1123	}
1124
1125	return 0;
1126}
1127
1128/*
1129 * Give initial values to the registers listed in the "Register Space"
1130 * section of the HIFN Software Development reference manual.
1131 */
1132static void
1133hifn_init_pci_registers(struct hifn_softc *sc)
1134{
1135	/* write fixed values needed by the Initialization registers */
1136	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1137	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1138	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1139
1140	/* write all 4 ring address registers */
1141	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1142	    offsetof(struct hifn_dma, cmdr[0]));
1143	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1144	    offsetof(struct hifn_dma, srcr[0]));
1145	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1146	    offsetof(struct hifn_dma, dstr[0]));
1147	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1148	    offsetof(struct hifn_dma, resr[0]));
1149
1150	DELAY(2000);
1151
1152	/* write status register */
1153	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1154	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1155	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1156	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1157	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1158	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1159	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1160	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1161	    HIFN_DMACSR_S_WAIT |
1162	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1163	    HIFN_DMACSR_C_WAIT |
1164	    HIFN_DMACSR_ENGINE |
1165	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1166		HIFN_DMACSR_PUBDONE : 0) |
1167	    ((sc->sc_flags & HIFN_IS_7811) ?
1168		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1169
1170	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1171	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1172	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1173	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1174	    ((sc->sc_flags & HIFN_IS_7811) ?
1175		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1176	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1177	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1178
1179
1180	if (sc->sc_flags & HIFN_IS_7956) {
1181		u_int32_t pll;
1182
1183		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1184		    HIFN_PUCNFG_TCALLPHASES |
1185		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1186
1187		/* turn off the clocks and insure bypass is set */
1188		pll = READ_REG_1(sc, HIFN_1_PLL);
1189		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1190		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
1191		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1192		DELAY(10*1000);		/* 10ms */
1193
1194		/* change configuration */
1195		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1196		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1197		DELAY(10*1000);		/* 10ms */
1198
1199		/* disable bypass */
1200		pll &= ~HIFN_PLL_BP;
1201		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1202		/* enable clocks with new configuration */
1203		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1204		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1205	} else {
1206		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1207		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1208		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1209		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1210	}
1211
1212	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1213	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1214	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1215	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1216	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1217}
1218
1219/*
1220 * The maximum number of sessions supported by the card
1221 * is dependent on the amount of context ram, which
1222 * encryption algorithms are enabled, and how compression
1223 * is configured.  This should be configured before this
1224 * routine is called.
1225 */
1226static void
1227hifn_sessions(struct hifn_softc *sc)
1228{
1229	u_int32_t pucnfg;
1230	int ctxsize;
1231
1232	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1233
1234	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1235		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1236			ctxsize = 128;
1237		else
1238			ctxsize = 512;
1239		/*
1240		 * 7955/7956 has internal context memory of 32K
1241		 */
1242		if (sc->sc_flags & HIFN_IS_7956)
1243			sc->sc_maxses = 32768 / ctxsize;
1244		else
1245			sc->sc_maxses = 1 +
1246			    ((sc->sc_ramsize - 32768) / ctxsize);
1247	} else
1248		sc->sc_maxses = sc->sc_ramsize / 16384;
1249
1250	if (sc->sc_maxses > 2048)
1251		sc->sc_maxses = 2048;
1252}
1253
1254/*
1255 * Determine ram type (sram or dram).  Board should be just out of a reset
1256 * state when this is called.
1257 */
1258static int
1259hifn_ramtype(struct hifn_softc *sc)
1260{
1261	u_int8_t data[8], dataexpect[8];
1262	int i;
1263
1264	for (i = 0; i < sizeof(data); i++)
1265		data[i] = dataexpect[i] = 0x55;
1266	if (hifn_writeramaddr(sc, 0, data))
1267		return (-1);
1268	if (hifn_readramaddr(sc, 0, data))
1269		return (-1);
1270	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1271		sc->sc_drammodel = 1;
1272		return (0);
1273	}
1274
1275	for (i = 0; i < sizeof(data); i++)
1276		data[i] = dataexpect[i] = 0xaa;
1277	if (hifn_writeramaddr(sc, 0, data))
1278		return (-1);
1279	if (hifn_readramaddr(sc, 0, data))
1280		return (-1);
1281	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1282		sc->sc_drammodel = 1;
1283		return (0);
1284	}
1285
1286	return (0);
1287}
1288
1289#define	HIFN_SRAM_MAX		(32 << 20)
1290#define	HIFN_SRAM_STEP_SIZE	16384
1291#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1292
1293static int
1294hifn_sramsize(struct hifn_softc *sc)
1295{
1296	u_int32_t a;
1297	u_int8_t data[8];
1298	u_int8_t dataexpect[sizeof(data)];
1299	int32_t i;
1300
1301	for (i = 0; i < sizeof(data); i++)
1302		data[i] = dataexpect[i] = i ^ 0x5a;
1303
1304	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1305		a = i * HIFN_SRAM_STEP_SIZE;
1306		bcopy(&i, data, sizeof(i));
1307		hifn_writeramaddr(sc, a, data);
1308	}
1309
1310	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1311		a = i * HIFN_SRAM_STEP_SIZE;
1312		bcopy(&i, dataexpect, sizeof(i));
1313		if (hifn_readramaddr(sc, a, data) < 0)
1314			return (0);
1315		if (bcmp(data, dataexpect, sizeof(data)) != 0)
1316			return (0);
1317		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1318	}
1319
1320	return (0);
1321}
1322
1323/*
1324 * XXX For dram boards, one should really try all of the
1325 * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1326 * is already set up correctly.
1327 */
1328static int
1329hifn_dramsize(struct hifn_softc *sc)
1330{
1331	u_int32_t cnfg;
1332
1333	if (sc->sc_flags & HIFN_IS_7956) {
1334		/*
1335		 * 7955/7956 have a fixed internal ram of only 32K.
1336		 */
1337		sc->sc_ramsize = 32768;
1338	} else {
1339		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1340		    HIFN_PUCNFG_DRAMMASK;
1341		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1342	}
1343	return (0);
1344}
1345
1346static void
1347hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1348{
1349	struct hifn_dma *dma = sc->sc_dma;
1350
1351	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1352		sc->sc_cmdi = 0;
1353		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1354		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1355		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1356		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1357	}
1358	*cmdp = sc->sc_cmdi++;
1359	sc->sc_cmdk = sc->sc_cmdi;
1360
1361	if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1362		sc->sc_srci = 0;
1363		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1364		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1365		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1366		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1367	}
1368	*srcp = sc->sc_srci++;
1369	sc->sc_srck = sc->sc_srci;
1370
1371	if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1372		sc->sc_dsti = 0;
1373		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1374		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1375		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1376		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1377	}
1378	*dstp = sc->sc_dsti++;
1379	sc->sc_dstk = sc->sc_dsti;
1380
1381	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1382		sc->sc_resi = 0;
1383		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1384		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1385		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1386		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1387	}
1388	*resp = sc->sc_resi++;
1389	sc->sc_resk = sc->sc_resi;
1390}
1391
1392static int
1393hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1394{
1395	struct hifn_dma *dma = sc->sc_dma;
1396	hifn_base_command_t wc;
1397	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1398	int r, cmdi, resi, srci, dsti;
1399
1400	wc.masks = htole16(3 << 13);
1401	wc.session_num = htole16(addr >> 14);
1402	wc.total_source_count = htole16(8);
1403	wc.total_dest_count = htole16(addr & 0x3fff);
1404
1405	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1406
1407	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1408	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1409	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1410
1411	/* build write command */
1412	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1413	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1414	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1415
1416	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1417	    + offsetof(struct hifn_dma, test_src));
1418	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1419	    + offsetof(struct hifn_dma, test_dst));
1420
1421	dma->cmdr[cmdi].l = htole32(16 | masks);
1422	dma->srcr[srci].l = htole32(8 | masks);
1423	dma->dstr[dsti].l = htole32(4 | masks);
1424	dma->resr[resi].l = htole32(4 | masks);
1425
1426	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1427	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1428
1429	for (r = 10000; r >= 0; r--) {
1430		DELAY(10);
1431		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1432		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1433		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1434			break;
1435		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1436		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1437	}
1438	if (r == 0) {
1439		device_printf(sc->sc_dev, "writeramaddr -- "
1440		    "result[%d](addr %d) still valid\n", resi, addr);
1441		r = -1;
1442		return (-1);
1443	} else
1444		r = 0;
1445
1446	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1447	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1448	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1449
1450	return (r);
1451}
1452
1453static int
1454hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1455{
1456	struct hifn_dma *dma = sc->sc_dma;
1457	hifn_base_command_t rc;
1458	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1459	int r, cmdi, srci, dsti, resi;
1460
1461	rc.masks = htole16(2 << 13);
1462	rc.session_num = htole16(addr >> 14);
1463	rc.total_source_count = htole16(addr & 0x3fff);
1464	rc.total_dest_count = htole16(8);
1465
1466	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1467
1468	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1469	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1470	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1471
1472	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1473	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1474
1475	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1476	    offsetof(struct hifn_dma, test_src));
1477	dma->test_src = 0;
1478	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1479	    offsetof(struct hifn_dma, test_dst));
1480	dma->test_dst = 0;
1481	dma->cmdr[cmdi].l = htole32(8 | masks);
1482	dma->srcr[srci].l = htole32(8 | masks);
1483	dma->dstr[dsti].l = htole32(8 | masks);
1484	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1485
1486	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1487	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1488
1489	for (r = 10000; r >= 0; r--) {
1490		DELAY(10);
1491		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1492		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1493		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1494			break;
1495		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1496		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1497	}
1498	if (r == 0) {
1499		device_printf(sc->sc_dev, "readramaddr -- "
1500		    "result[%d](addr %d) still valid\n", resi, addr);
1501		r = -1;
1502	} else {
1503		r = 0;
1504		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1505	}
1506
1507	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1508	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1509	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1510
1511	return (r);
1512}
1513
1514/*
1515 * Initialize the descriptor rings.
1516 */
1517static void
1518hifn_init_dma(struct hifn_softc *sc)
1519{
1520	struct hifn_dma *dma = sc->sc_dma;
1521	int i;
1522
1523	hifn_set_retry(sc);
1524
1525	/* initialize static pointer values */
1526	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1527		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1528		    offsetof(struct hifn_dma, command_bufs[i][0]));
1529	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1530		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1531		    offsetof(struct hifn_dma, result_bufs[i][0]));
1532
1533	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1534	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1535	dma->srcr[HIFN_D_SRC_RSIZE].p =
1536	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1537	dma->dstr[HIFN_D_DST_RSIZE].p =
1538	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1539	dma->resr[HIFN_D_RES_RSIZE].p =
1540	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1541
1542	sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1543	sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1544	sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1545}
1546
1547/*
1548 * Writes out the raw command buffer space.  Returns the
1549 * command buffer size.
1550 */
1551static u_int
1552hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1553{
1554	u_int8_t *buf_pos;
1555	hifn_base_command_t *base_cmd;
1556	hifn_mac_command_t *mac_cmd;
1557	hifn_crypt_command_t *cry_cmd;
1558	int using_mac, using_crypt, len, ivlen;
1559	u_int32_t dlen, slen;
1560
1561	buf_pos = buf;
1562	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1563	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1564
1565	base_cmd = (hifn_base_command_t *)buf_pos;
1566	base_cmd->masks = htole16(cmd->base_masks);
1567	slen = cmd->src_mapsize;
1568	if (cmd->sloplen)
1569		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1570	else
1571		dlen = cmd->dst_mapsize;
1572	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1573	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1574	dlen >>= 16;
1575	slen >>= 16;
1576	base_cmd->session_num = htole16(
1577	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1578	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1579	buf_pos += sizeof(hifn_base_command_t);
1580
1581	if (using_mac) {
1582		mac_cmd = (hifn_mac_command_t *)buf_pos;
1583		dlen = cmd->maccrd->crd_len;
1584		mac_cmd->source_count = htole16(dlen & 0xffff);
1585		dlen >>= 16;
1586		mac_cmd->masks = htole16(cmd->mac_masks |
1587		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1588		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1589		mac_cmd->reserved = 0;
1590		buf_pos += sizeof(hifn_mac_command_t);
1591	}
1592
1593	if (using_crypt) {
1594		cry_cmd = (hifn_crypt_command_t *)buf_pos;
1595		dlen = cmd->enccrd->crd_len;
1596		cry_cmd->source_count = htole16(dlen & 0xffff);
1597		dlen >>= 16;
1598		cry_cmd->masks = htole16(cmd->cry_masks |
1599		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1600		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1601		cry_cmd->reserved = 0;
1602		buf_pos += sizeof(hifn_crypt_command_t);
1603	}
1604
1605	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1606		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1607		buf_pos += HIFN_MAC_KEY_LENGTH;
1608	}
1609
1610	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1611		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1612		case HIFN_CRYPT_CMD_ALG_3DES:
1613			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1614			buf_pos += HIFN_3DES_KEY_LENGTH;
1615			break;
1616		case HIFN_CRYPT_CMD_ALG_DES:
1617			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1618			buf_pos += HIFN_DES_KEY_LENGTH;
1619			break;
1620		case HIFN_CRYPT_CMD_ALG_RC4:
1621			len = 256;
1622			do {
1623				int clen;
1624
1625				clen = MIN(cmd->cklen, len);
1626				bcopy(cmd->ck, buf_pos, clen);
1627				len -= clen;
1628				buf_pos += clen;
1629			} while (len > 0);
1630			bzero(buf_pos, 4);
1631			buf_pos += 4;
1632			break;
1633		case HIFN_CRYPT_CMD_ALG_AES:
1634			/*
1635			 * AES keys are variable 128, 192 and
1636			 * 256 bits (16, 24 and 32 bytes).
1637			 */
1638			bcopy(cmd->ck, buf_pos, cmd->cklen);
1639			buf_pos += cmd->cklen;
1640			break;
1641		}
1642	}
1643
1644	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1645		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1646		case HIFN_CRYPT_CMD_ALG_AES:
1647			ivlen = HIFN_AES_IV_LENGTH;
1648			break;
1649		default:
1650			ivlen = HIFN_IV_LENGTH;
1651			break;
1652		}
1653		bcopy(cmd->iv, buf_pos, ivlen);
1654		buf_pos += ivlen;
1655	}
1656
1657	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1658		bzero(buf_pos, 8);
1659		buf_pos += 8;
1660	}
1661
1662	return (buf_pos - buf);
1663}
1664
1665static int
1666hifn_dmamap_aligned(struct hifn_operand *op)
1667{
1668	int i;
1669
1670	for (i = 0; i < op->nsegs; i++) {
1671		if (op->segs[i].ds_addr & 3)
1672			return (0);
1673		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1674			return (0);
1675	}
1676	return (1);
1677}
1678
1679static __inline int
1680hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1681{
1682	struct hifn_dma *dma = sc->sc_dma;
1683
1684	if (++idx == HIFN_D_DST_RSIZE) {
1685		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1686		    HIFN_D_MASKDONEIRQ);
1687		HIFN_DSTR_SYNC(sc, idx,
1688		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1689		idx = 0;
1690	}
1691	return (idx);
1692}
1693
1694static int
1695hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1696{
1697	struct hifn_dma *dma = sc->sc_dma;
1698	struct hifn_operand *dst = &cmd->dst;
1699	u_int32_t p, l;
1700	int idx, used = 0, i;
1701
1702	idx = sc->sc_dsti;
1703	for (i = 0; i < dst->nsegs - 1; i++) {
1704		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1705		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1706		    HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1707		HIFN_DSTR_SYNC(sc, idx,
1708		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1709		used++;
1710
1711		idx = hifn_dmamap_dstwrap(sc, idx);
1712	}
1713
1714	if (cmd->sloplen == 0) {
1715		p = dst->segs[i].ds_addr;
1716		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1717		    dst->segs[i].ds_len;
1718	} else {
1719		p = sc->sc_dma_physaddr +
1720		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1721		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1722		    sizeof(u_int32_t);
1723
1724		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1725			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1726			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1727			    HIFN_D_MASKDONEIRQ |
1728			    (dst->segs[i].ds_len - cmd->sloplen));
1729			HIFN_DSTR_SYNC(sc, idx,
1730			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1731			used++;
1732
1733			idx = hifn_dmamap_dstwrap(sc, idx);
1734		}
1735	}
1736	dma->dstr[idx].p = htole32(p);
1737	dma->dstr[idx].l = htole32(l);
1738	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1739	used++;
1740
1741	idx = hifn_dmamap_dstwrap(sc, idx);
1742
1743	sc->sc_dsti = idx;
1744	sc->sc_dstu += used;
1745	return (idx);
1746}
1747
1748static __inline int
1749hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1750{
1751	struct hifn_dma *dma = sc->sc_dma;
1752
1753	if (++idx == HIFN_D_SRC_RSIZE) {
1754		dma->srcr[idx].l = htole32(HIFN_D_VALID |
1755		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1756		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1757		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1758		idx = 0;
1759	}
1760	return (idx);
1761}
1762
1763static int
1764hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1765{
1766	struct hifn_dma *dma = sc->sc_dma;
1767	struct hifn_operand *src = &cmd->src;
1768	int idx, i;
1769	u_int32_t last = 0;
1770
1771	idx = sc->sc_srci;
1772	for (i = 0; i < src->nsegs; i++) {
1773		if (i == src->nsegs - 1)
1774			last = HIFN_D_LAST;
1775
1776		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1777		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1778		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1779		HIFN_SRCR_SYNC(sc, idx,
1780		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1781
1782		idx = hifn_dmamap_srcwrap(sc, idx);
1783	}
1784	sc->sc_srci = idx;
1785	sc->sc_srcu += src->nsegs;
1786	return (idx);
1787}
1788
1789static void
1790hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1791{
1792	struct hifn_operand *op = arg;
1793
1794	KASSERT(nsegs <= MAX_SCATTER,
1795		("hifn_op_cb: too many DMA segments (%u > %u) "
1796		 "returned when mapping operand", nsegs, MAX_SCATTER));
1797	op->mapsize = mapsize;
1798	op->nsegs = nsegs;
1799	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1800}
1801
1802static int
1803hifn_crypto(
1804	struct hifn_softc *sc,
1805	struct hifn_command *cmd,
1806	struct cryptop *crp,
1807	int hint)
1808{
1809	struct	hifn_dma *dma = sc->sc_dma;
1810	u_int32_t cmdlen, csr;
1811	int cmdi, resi, err = 0;
1812
1813	/*
1814	 * need 1 cmd, and 1 res
1815	 *
1816	 * NB: check this first since it's easy.
1817	 */
1818	HIFN_LOCK(sc);
1819	if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1820	    (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1821#ifdef HIFN_DEBUG
1822		if (hifn_debug) {
1823			device_printf(sc->sc_dev,
1824				"cmd/result exhaustion, cmdu %u resu %u\n",
1825				sc->sc_cmdu, sc->sc_resu);
1826		}
1827#endif
1828		hifnstats.hst_nomem_cr++;
1829		HIFN_UNLOCK(sc);
1830		return (ERESTART);
1831	}
1832
1833	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1834		hifnstats.hst_nomem_map++;
1835		HIFN_UNLOCK(sc);
1836		return (ENOMEM);
1837	}
1838
1839	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1840		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1841		    cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1842			hifnstats.hst_nomem_load++;
1843			err = ENOMEM;
1844			goto err_srcmap1;
1845		}
1846	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1847		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1848		    cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1849			hifnstats.hst_nomem_load++;
1850			err = ENOMEM;
1851			goto err_srcmap1;
1852		}
1853	} else {
1854		err = EINVAL;
1855		goto err_srcmap1;
1856	}
1857
1858	if (hifn_dmamap_aligned(&cmd->src)) {
1859		cmd->sloplen = cmd->src_mapsize & 3;
1860		cmd->dst = cmd->src;
1861	} else {
1862		if (crp->crp_flags & CRYPTO_F_IOV) {
1863			err = EINVAL;
1864			goto err_srcmap;
1865		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1866			int totlen, len;
1867			struct mbuf *m, *m0, *mlast;
1868
1869			KASSERT(cmd->dst_m == cmd->src_m,
1870				("hifn_crypto: dst_m initialized improperly"));
1871			hifnstats.hst_unaligned++;
1872			/*
1873			 * Source is not aligned on a longword boundary.
1874			 * Copy the data to insure alignment.  If we fail
1875			 * to allocate mbufs or clusters while doing this
1876			 * we return ERESTART so the operation is requeued
1877			 * at the crypto later, but only if there are
1878			 * ops already posted to the hardware; otherwise we
1879			 * have no guarantee that we'll be re-entered.
1880			 */
1881			totlen = cmd->src_mapsize;
1882			if (cmd->src_m->m_flags & M_PKTHDR) {
1883				len = MHLEN;
1884				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1885				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1886					m_free(m0);
1887					m0 = NULL;
1888				}
1889			} else {
1890				len = MLEN;
1891				MGET(m0, M_DONTWAIT, MT_DATA);
1892			}
1893			if (m0 == NULL) {
1894				hifnstats.hst_nomem_mbuf++;
1895				err = sc->sc_cmdu ? ERESTART : ENOMEM;
1896				goto err_srcmap;
1897			}
1898			if (totlen >= MINCLSIZE) {
1899				MCLGET(m0, M_DONTWAIT);
1900				if ((m0->m_flags & M_EXT) == 0) {
1901					hifnstats.hst_nomem_mcl++;
1902					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1903					m_freem(m0);
1904					goto err_srcmap;
1905				}
1906				len = MCLBYTES;
1907			}
1908			totlen -= len;
1909			m0->m_pkthdr.len = m0->m_len = len;
1910			mlast = m0;
1911
1912			while (totlen > 0) {
1913				MGET(m, M_DONTWAIT, MT_DATA);
1914				if (m == NULL) {
1915					hifnstats.hst_nomem_mbuf++;
1916					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1917					m_freem(m0);
1918					goto err_srcmap;
1919				}
1920				len = MLEN;
1921				if (totlen >= MINCLSIZE) {
1922					MCLGET(m, M_DONTWAIT);
1923					if ((m->m_flags & M_EXT) == 0) {
1924						hifnstats.hst_nomem_mcl++;
1925						err = sc->sc_cmdu ? ERESTART : ENOMEM;
1926						mlast->m_next = m;
1927						m_freem(m0);
1928						goto err_srcmap;
1929					}
1930					len = MCLBYTES;
1931				}
1932
1933				m->m_len = len;
1934				m0->m_pkthdr.len += len;
1935				totlen -= len;
1936
1937				mlast->m_next = m;
1938				mlast = m;
1939			}
1940			cmd->dst_m = m0;
1941		}
1942	}
1943
1944	if (cmd->dst_map == NULL) {
1945		if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1946			hifnstats.hst_nomem_map++;
1947			err = ENOMEM;
1948			goto err_srcmap;
1949		}
1950		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1951			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1952			    cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1953				hifnstats.hst_nomem_map++;
1954				err = ENOMEM;
1955				goto err_dstmap1;
1956			}
1957		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1958			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1959			    cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1960				hifnstats.hst_nomem_load++;
1961				err = ENOMEM;
1962				goto err_dstmap1;
1963			}
1964		}
1965	}
1966
1967#ifdef HIFN_DEBUG
1968	if (hifn_debug) {
1969		device_printf(sc->sc_dev,
1970		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1971		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1972		    READ_REG_1(sc, HIFN_1_DMA_IER),
1973		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1974		    cmd->src_nsegs, cmd->dst_nsegs);
1975	}
1976#endif
1977
1978	if (cmd->src_map == cmd->dst_map) {
1979		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1980		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1981	} else {
1982		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1983		    BUS_DMASYNC_PREWRITE);
1984		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1985		    BUS_DMASYNC_PREREAD);
1986	}
1987
1988	/*
1989	 * need N src, and N dst
1990	 */
1991	if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1992	    (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1993#ifdef HIFN_DEBUG
1994		if (hifn_debug) {
1995			device_printf(sc->sc_dev,
1996				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1997				sc->sc_srcu, cmd->src_nsegs,
1998				sc->sc_dstu, cmd->dst_nsegs);
1999		}
2000#endif
2001		hifnstats.hst_nomem_sd++;
2002		err = ERESTART;
2003		goto err_dstmap;
2004	}
2005
2006	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
2007		sc->sc_cmdi = 0;
2008		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2009		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2010		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2011		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2012	}
2013	cmdi = sc->sc_cmdi++;
2014	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2015	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2016
2017	/* .p for command/result already set */
2018	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2019	    HIFN_D_MASKDONEIRQ);
2020	HIFN_CMDR_SYNC(sc, cmdi,
2021	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2022	sc->sc_cmdu++;
2023
2024	/*
2025	 * We don't worry about missing an interrupt (which a "command wait"
2026	 * interrupt salvages us from), unless there is more than one command
2027	 * in the queue.
2028	 */
2029	if (sc->sc_cmdu > 1) {
2030		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2031		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2032	}
2033
2034	hifnstats.hst_ipackets++;
2035	hifnstats.hst_ibytes += cmd->src_mapsize;
2036
2037	hifn_dmamap_load_src(sc, cmd);
2038
2039	/*
2040	 * Unlike other descriptors, we don't mask done interrupt from
2041	 * result descriptor.
2042	 */
2043#ifdef HIFN_DEBUG
2044	if (hifn_debug)
2045		printf("load res\n");
2046#endif
2047	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2048		sc->sc_resi = 0;
2049		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2050		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2051		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2052		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2053	}
2054	resi = sc->sc_resi++;
2055	KASSERT(sc->sc_hifn_commands[resi] == NULL,
2056		("hifn_crypto: command slot %u busy", resi));
2057	sc->sc_hifn_commands[resi] = cmd;
2058	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2059	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2060		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2061		    HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2062		sc->sc_curbatch++;
2063		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2064			hifnstats.hst_maxbatch = sc->sc_curbatch;
2065		hifnstats.hst_totbatch++;
2066	} else {
2067		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2068		    HIFN_D_VALID | HIFN_D_LAST);
2069		sc->sc_curbatch = 0;
2070	}
2071	HIFN_RESR_SYNC(sc, resi,
2072	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2073	sc->sc_resu++;
2074
2075	if (cmd->sloplen)
2076		cmd->slopidx = resi;
2077
2078	hifn_dmamap_load_dst(sc, cmd);
2079
2080	csr = 0;
2081	if (sc->sc_c_busy == 0) {
2082		csr |= HIFN_DMACSR_C_CTRL_ENA;
2083		sc->sc_c_busy = 1;
2084	}
2085	if (sc->sc_s_busy == 0) {
2086		csr |= HIFN_DMACSR_S_CTRL_ENA;
2087		sc->sc_s_busy = 1;
2088	}
2089	if (sc->sc_r_busy == 0) {
2090		csr |= HIFN_DMACSR_R_CTRL_ENA;
2091		sc->sc_r_busy = 1;
2092	}
2093	if (sc->sc_d_busy == 0) {
2094		csr |= HIFN_DMACSR_D_CTRL_ENA;
2095		sc->sc_d_busy = 1;
2096	}
2097	if (csr)
2098		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2099
2100#ifdef HIFN_DEBUG
2101	if (hifn_debug) {
2102		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2103		    READ_REG_1(sc, HIFN_1_DMA_CSR),
2104		    READ_REG_1(sc, HIFN_1_DMA_IER));
2105	}
2106#endif
2107
2108	sc->sc_active = 5;
2109	HIFN_UNLOCK(sc);
2110	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2111	return (err);		/* success */
2112
2113err_dstmap:
2114	if (cmd->src_map != cmd->dst_map)
2115		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2116err_dstmap1:
2117	if (cmd->src_map != cmd->dst_map)
2118		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2119err_srcmap:
2120	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2121		if (cmd->src_m != cmd->dst_m)
2122			m_freem(cmd->dst_m);
2123	}
2124	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2125err_srcmap1:
2126	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2127	HIFN_UNLOCK(sc);
2128	return (err);
2129}
2130
2131static void
2132hifn_tick(void* vsc)
2133{
2134	struct hifn_softc *sc = vsc;
2135
2136	HIFN_LOCK(sc);
2137	if (sc->sc_active == 0) {
2138		u_int32_t r = 0;
2139
2140		if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2141			sc->sc_c_busy = 0;
2142			r |= HIFN_DMACSR_C_CTRL_DIS;
2143		}
2144		if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2145			sc->sc_s_busy = 0;
2146			r |= HIFN_DMACSR_S_CTRL_DIS;
2147		}
2148		if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2149			sc->sc_d_busy = 0;
2150			r |= HIFN_DMACSR_D_CTRL_DIS;
2151		}
2152		if (sc->sc_resu == 0 && sc->sc_r_busy) {
2153			sc->sc_r_busy = 0;
2154			r |= HIFN_DMACSR_R_CTRL_DIS;
2155		}
2156		if (r)
2157			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2158	} else
2159		sc->sc_active--;
2160	HIFN_UNLOCK(sc);
2161	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2162}
2163
2164static void
2165hifn_intr(void *arg)
2166{
2167	struct hifn_softc *sc = arg;
2168	struct hifn_dma *dma;
2169	u_int32_t dmacsr, restart;
2170	int i, u;
2171
2172	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2173
2174	/* Nothing in the DMA unit interrupted */
2175	if ((dmacsr & sc->sc_dmaier) == 0)
2176		return;
2177
2178	HIFN_LOCK(sc);
2179
2180	dma = sc->sc_dma;
2181
2182#ifdef HIFN_DEBUG
2183	if (hifn_debug) {
2184		device_printf(sc->sc_dev,
2185		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2186		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2187		    sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2188		    sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2189		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2190	}
2191#endif
2192
2193	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2194
2195	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2196	    (dmacsr & HIFN_DMACSR_PUBDONE))
2197		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2198		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2199
2200	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2201	if (restart)
2202		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2203
2204	if (sc->sc_flags & HIFN_IS_7811) {
2205		if (dmacsr & HIFN_DMACSR_ILLR)
2206			device_printf(sc->sc_dev, "illegal read\n");
2207		if (dmacsr & HIFN_DMACSR_ILLW)
2208			device_printf(sc->sc_dev, "illegal write\n");
2209	}
2210
2211	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2212	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2213	if (restart) {
2214		device_printf(sc->sc_dev, "abort, resetting.\n");
2215		hifnstats.hst_abort++;
2216		hifn_abort(sc);
2217		HIFN_UNLOCK(sc);
2218		return;
2219	}
2220
2221	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2222		/*
2223		 * If no slots to process and we receive a "waiting on
2224		 * command" interrupt, we disable the "waiting on command"
2225		 * (by clearing it).
2226		 */
2227		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2228		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2229	}
2230
2231	/* clear the rings */
2232	i = sc->sc_resk; u = sc->sc_resu;
2233	while (u != 0) {
2234		HIFN_RESR_SYNC(sc, i,
2235		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2236		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2237			HIFN_RESR_SYNC(sc, i,
2238			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2239			break;
2240		}
2241
2242		if (i != HIFN_D_RES_RSIZE) {
2243			struct hifn_command *cmd;
2244			u_int8_t *macbuf = NULL;
2245
2246			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2247			cmd = sc->sc_hifn_commands[i];
2248			KASSERT(cmd != NULL,
2249				("hifn_intr: null command slot %u", i));
2250			sc->sc_hifn_commands[i] = NULL;
2251
2252			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2253				macbuf = dma->result_bufs[i];
2254				macbuf += 12;
2255			}
2256
2257			hifn_callback(sc, cmd, macbuf);
2258			hifnstats.hst_opackets++;
2259			u--;
2260		}
2261
2262		if (++i == (HIFN_D_RES_RSIZE + 1))
2263			i = 0;
2264	}
2265	sc->sc_resk = i; sc->sc_resu = u;
2266
2267	i = sc->sc_srck; u = sc->sc_srcu;
2268	while (u != 0) {
2269		if (i == HIFN_D_SRC_RSIZE)
2270			i = 0;
2271		HIFN_SRCR_SYNC(sc, i,
2272		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2273		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2274			HIFN_SRCR_SYNC(sc, i,
2275			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2276			break;
2277		}
2278		i++, u--;
2279	}
2280	sc->sc_srck = i; sc->sc_srcu = u;
2281
2282	i = sc->sc_cmdk; u = sc->sc_cmdu;
2283	while (u != 0) {
2284		HIFN_CMDR_SYNC(sc, i,
2285		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2286		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2287			HIFN_CMDR_SYNC(sc, i,
2288			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2289			break;
2290		}
2291		if (i != HIFN_D_CMD_RSIZE) {
2292			u--;
2293			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2294		}
2295		if (++i == (HIFN_D_CMD_RSIZE + 1))
2296			i = 0;
2297	}
2298	sc->sc_cmdk = i; sc->sc_cmdu = u;
2299
2300	HIFN_UNLOCK(sc);
2301
2302	if (sc->sc_needwakeup) {		/* XXX check high watermark */
2303		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2304#ifdef HIFN_DEBUG
2305		if (hifn_debug)
2306			device_printf(sc->sc_dev,
2307				"wakeup crypto (%x) u %d/%d/%d/%d\n",
2308				sc->sc_needwakeup,
2309				sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2310#endif
2311		sc->sc_needwakeup &= ~wakeup;
2312		crypto_unblock(sc->sc_cid, wakeup);
2313	}
2314}
2315
2316/*
2317 * Allocate a new 'session' and return an encoded session id.  'sidp'
2318 * contains our registration id, and should contain an encoded session
2319 * id on successful allocation.
2320 */
2321static int
2322hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2323{
2324	struct hifn_softc *sc = device_get_softc(dev);
2325	struct cryptoini *c;
2326	int mac = 0, cry = 0, sesn;
2327	struct hifn_session *ses = NULL;
2328
2329	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2330	if (sidp == NULL || cri == NULL || sc == NULL)
2331		return (EINVAL);
2332
2333	HIFN_LOCK(sc);
2334	if (sc->sc_sessions == NULL) {
2335		ses = sc->sc_sessions = (struct hifn_session *)malloc(
2336		    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2337		if (ses == NULL) {
2338			HIFN_UNLOCK(sc);
2339			return (ENOMEM);
2340		}
2341		sesn = 0;
2342		sc->sc_nsessions = 1;
2343	} else {
2344		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2345			if (!sc->sc_sessions[sesn].hs_used) {
2346				ses = &sc->sc_sessions[sesn];
2347				break;
2348			}
2349		}
2350
2351		if (ses == NULL) {
2352			sesn = sc->sc_nsessions;
2353			ses = (struct hifn_session *)malloc((sesn + 1) *
2354			    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2355			if (ses == NULL) {
2356				HIFN_UNLOCK(sc);
2357				return (ENOMEM);
2358			}
2359			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2360			bzero(sc->sc_sessions, sesn * sizeof(*ses));
2361			free(sc->sc_sessions, M_DEVBUF);
2362			sc->sc_sessions = ses;
2363			ses = &sc->sc_sessions[sesn];
2364			sc->sc_nsessions++;
2365		}
2366	}
2367	HIFN_UNLOCK(sc);
2368
2369	bzero(ses, sizeof(*ses));
2370	ses->hs_used = 1;
2371
2372	for (c = cri; c != NULL; c = c->cri_next) {
2373		switch (c->cri_alg) {
2374		case CRYPTO_MD5:
2375		case CRYPTO_SHA1:
2376		case CRYPTO_MD5_HMAC:
2377		case CRYPTO_SHA1_HMAC:
2378			if (mac)
2379				return (EINVAL);
2380			mac = 1;
2381			ses->hs_mlen = c->cri_mlen;
2382			if (ses->hs_mlen == 0) {
2383				switch (c->cri_alg) {
2384				case CRYPTO_MD5:
2385				case CRYPTO_MD5_HMAC:
2386					ses->hs_mlen = 16;
2387					break;
2388				case CRYPTO_SHA1:
2389				case CRYPTO_SHA1_HMAC:
2390					ses->hs_mlen = 20;
2391					break;
2392				}
2393			}
2394			break;
2395		case CRYPTO_DES_CBC:
2396		case CRYPTO_3DES_CBC:
2397		case CRYPTO_AES_CBC:
2398			/* XXX this may read fewer, does it matter? */
2399			read_random(ses->hs_iv,
2400				c->cri_alg == CRYPTO_AES_CBC ?
2401					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2402			/*FALLTHROUGH*/
2403		case CRYPTO_ARC4:
2404			if (cry)
2405				return (EINVAL);
2406			cry = 1;
2407			break;
2408		default:
2409			return (EINVAL);
2410		}
2411	}
2412	if (mac == 0 && cry == 0)
2413		return (EINVAL);
2414
2415	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2416
2417	return (0);
2418}
2419
2420/*
2421 * Deallocate a session.
2422 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2423 * XXX to blow away any keys already stored there.
2424 */
2425static int
2426hifn_freesession(device_t dev, u_int64_t tid)
2427{
2428	struct hifn_softc *sc = device_get_softc(dev);
2429	int session, error;
2430	u_int32_t sid = CRYPTO_SESID2LID(tid);
2431
2432	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2433	if (sc == NULL)
2434		return (EINVAL);
2435
2436	HIFN_LOCK(sc);
2437	session = HIFN_SESSION(sid);
2438	if (session < sc->sc_nsessions) {
2439		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2440		error = 0;
2441	} else
2442		error = EINVAL;
2443	HIFN_UNLOCK(sc);
2444
2445	return (error);
2446}
2447
2448static int
2449hifn_process(device_t dev, struct cryptop *crp, int hint)
2450{
2451	struct hifn_softc *sc = device_get_softc(dev);
2452	struct hifn_command *cmd = NULL;
2453	int session, err, ivlen;
2454	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2455
2456	if (crp == NULL || crp->crp_callback == NULL) {
2457		hifnstats.hst_invalid++;
2458		return (EINVAL);
2459	}
2460	session = HIFN_SESSION(crp->crp_sid);
2461
2462	if (sc == NULL || session >= sc->sc_nsessions) {
2463		err = EINVAL;
2464		goto errout;
2465	}
2466
2467	cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2468	if (cmd == NULL) {
2469		hifnstats.hst_nomem++;
2470		err = ENOMEM;
2471		goto errout;
2472	}
2473
2474	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2475		cmd->src_m = (struct mbuf *)crp->crp_buf;
2476		cmd->dst_m = (struct mbuf *)crp->crp_buf;
2477	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2478		cmd->src_io = (struct uio *)crp->crp_buf;
2479		cmd->dst_io = (struct uio *)crp->crp_buf;
2480	} else {
2481		err = EINVAL;
2482		goto errout;	/* XXX we don't handle contiguous buffers! */
2483	}
2484
2485	crd1 = crp->crp_desc;
2486	if (crd1 == NULL) {
2487		err = EINVAL;
2488		goto errout;
2489	}
2490	crd2 = crd1->crd_next;
2491
2492	if (crd2 == NULL) {
2493		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2494		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2495		    crd1->crd_alg == CRYPTO_SHA1 ||
2496		    crd1->crd_alg == CRYPTO_MD5) {
2497			maccrd = crd1;
2498			enccrd = NULL;
2499		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2500		    crd1->crd_alg == CRYPTO_3DES_CBC ||
2501		    crd1->crd_alg == CRYPTO_AES_CBC ||
2502		    crd1->crd_alg == CRYPTO_ARC4) {
2503			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2504				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2505			maccrd = NULL;
2506			enccrd = crd1;
2507		} else {
2508			err = EINVAL;
2509			goto errout;
2510		}
2511	} else {
2512		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2513                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2514                     crd1->crd_alg == CRYPTO_MD5 ||
2515                     crd1->crd_alg == CRYPTO_SHA1) &&
2516		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2517		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2518		     crd2->crd_alg == CRYPTO_AES_CBC ||
2519		     crd2->crd_alg == CRYPTO_ARC4) &&
2520		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2521			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2522			maccrd = crd1;
2523			enccrd = crd2;
2524		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2525		     crd1->crd_alg == CRYPTO_ARC4 ||
2526		     crd1->crd_alg == CRYPTO_3DES_CBC ||
2527		     crd1->crd_alg == CRYPTO_AES_CBC) &&
2528		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2529                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2530                     crd2->crd_alg == CRYPTO_MD5 ||
2531                     crd2->crd_alg == CRYPTO_SHA1) &&
2532		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
2533			enccrd = crd1;
2534			maccrd = crd2;
2535		} else {
2536			/*
2537			 * We cannot order the 7751 as requested
2538			 */
2539			err = EINVAL;
2540			goto errout;
2541		}
2542	}
2543
2544	if (enccrd) {
2545		cmd->enccrd = enccrd;
2546		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2547		switch (enccrd->crd_alg) {
2548		case CRYPTO_ARC4:
2549			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2550			break;
2551		case CRYPTO_DES_CBC:
2552			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2553			    HIFN_CRYPT_CMD_MODE_CBC |
2554			    HIFN_CRYPT_CMD_NEW_IV;
2555			break;
2556		case CRYPTO_3DES_CBC:
2557			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2558			    HIFN_CRYPT_CMD_MODE_CBC |
2559			    HIFN_CRYPT_CMD_NEW_IV;
2560			break;
2561		case CRYPTO_AES_CBC:
2562			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2563			    HIFN_CRYPT_CMD_MODE_CBC |
2564			    HIFN_CRYPT_CMD_NEW_IV;
2565			break;
2566		default:
2567			err = EINVAL;
2568			goto errout;
2569		}
2570		if (enccrd->crd_alg != CRYPTO_ARC4) {
2571			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2572				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2573			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2574				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2575					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2576				else
2577					bcopy(sc->sc_sessions[session].hs_iv,
2578					    cmd->iv, ivlen);
2579
2580				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2581				    == 0) {
2582					crypto_copyback(crp->crp_flags,
2583					    crp->crp_buf, enccrd->crd_inject,
2584					    ivlen, cmd->iv);
2585				}
2586			} else {
2587				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2588					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2589				else {
2590					crypto_copydata(crp->crp_flags,
2591					    crp->crp_buf, enccrd->crd_inject,
2592					    ivlen, cmd->iv);
2593				}
2594			}
2595		}
2596
2597		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2598			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2599		cmd->ck = enccrd->crd_key;
2600		cmd->cklen = enccrd->crd_klen >> 3;
2601		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2602
2603		/*
2604		 * Need to specify the size for the AES key in the masks.
2605		 */
2606		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2607		    HIFN_CRYPT_CMD_ALG_AES) {
2608			switch (cmd->cklen) {
2609			case 16:
2610				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2611				break;
2612			case 24:
2613				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2614				break;
2615			case 32:
2616				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2617				break;
2618			default:
2619				err = EINVAL;
2620				goto errout;
2621			}
2622		}
2623	}
2624
2625	if (maccrd) {
2626		cmd->maccrd = maccrd;
2627		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2628
2629		switch (maccrd->crd_alg) {
2630		case CRYPTO_MD5:
2631			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2632			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2633			    HIFN_MAC_CMD_POS_IPSEC;
2634                       break;
2635		case CRYPTO_MD5_HMAC:
2636			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2637			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2638			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2639			break;
2640		case CRYPTO_SHA1:
2641			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2642			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2643			    HIFN_MAC_CMD_POS_IPSEC;
2644			break;
2645		case CRYPTO_SHA1_HMAC:
2646			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2647			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2648			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2649			break;
2650		}
2651
2652		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2653		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2654			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2655			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2656			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2657			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2658		}
2659	}
2660
2661	cmd->crp = crp;
2662	cmd->session_num = session;
2663	cmd->softc = sc;
2664
2665	err = hifn_crypto(sc, cmd, crp, hint);
2666	if (!err) {
2667		return 0;
2668	} else if (err == ERESTART) {
2669		/*
2670		 * There weren't enough resources to dispatch the request
2671		 * to the part.  Notify the caller so they'll requeue this
2672		 * request and resubmit it again soon.
2673		 */
2674#ifdef HIFN_DEBUG
2675		if (hifn_debug)
2676			device_printf(sc->sc_dev, "requeue request\n");
2677#endif
2678		free(cmd, M_DEVBUF);
2679		sc->sc_needwakeup |= CRYPTO_SYMQ;
2680		return (err);
2681	}
2682
2683errout:
2684	if (cmd != NULL)
2685		free(cmd, M_DEVBUF);
2686	if (err == EINVAL)
2687		hifnstats.hst_invalid++;
2688	else
2689		hifnstats.hst_nomem++;
2690	crp->crp_etype = err;
2691	crypto_done(crp);
2692	return (err);
2693}
2694
2695static void
2696hifn_abort(struct hifn_softc *sc)
2697{
2698	struct hifn_dma *dma = sc->sc_dma;
2699	struct hifn_command *cmd;
2700	struct cryptop *crp;
2701	int i, u;
2702
2703	i = sc->sc_resk; u = sc->sc_resu;
2704	while (u != 0) {
2705		cmd = sc->sc_hifn_commands[i];
2706		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2707		sc->sc_hifn_commands[i] = NULL;
2708		crp = cmd->crp;
2709
2710		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2711			/* Salvage what we can. */
2712			u_int8_t *macbuf;
2713
2714			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2715				macbuf = dma->result_bufs[i];
2716				macbuf += 12;
2717			} else
2718				macbuf = NULL;
2719			hifnstats.hst_opackets++;
2720			hifn_callback(sc, cmd, macbuf);
2721		} else {
2722			if (cmd->src_map == cmd->dst_map) {
2723				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2724				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2725			} else {
2726				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2727				    BUS_DMASYNC_POSTWRITE);
2728				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2729				    BUS_DMASYNC_POSTREAD);
2730			}
2731
2732			if (cmd->src_m != cmd->dst_m) {
2733				m_freem(cmd->src_m);
2734				crp->crp_buf = (caddr_t)cmd->dst_m;
2735			}
2736
2737			/* non-shared buffers cannot be restarted */
2738			if (cmd->src_map != cmd->dst_map) {
2739				/*
2740				 * XXX should be EAGAIN, delayed until
2741				 * after the reset.
2742				 */
2743				crp->crp_etype = ENOMEM;
2744				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2745				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2746			} else
2747				crp->crp_etype = ENOMEM;
2748
2749			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2750			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2751
2752			free(cmd, M_DEVBUF);
2753			if (crp->crp_etype != EAGAIN)
2754				crypto_done(crp);
2755		}
2756
2757		if (++i == HIFN_D_RES_RSIZE)
2758			i = 0;
2759		u--;
2760	}
2761	sc->sc_resk = i; sc->sc_resu = u;
2762
2763	hifn_reset_board(sc, 1);
2764	hifn_init_dma(sc);
2765	hifn_init_pci_registers(sc);
2766}
2767
2768static void
2769hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2770{
2771	struct hifn_dma *dma = sc->sc_dma;
2772	struct cryptop *crp = cmd->crp;
2773	struct cryptodesc *crd;
2774	struct mbuf *m;
2775	int totlen, i, u, ivlen;
2776
2777	if (cmd->src_map == cmd->dst_map) {
2778		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2779		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2780	} else {
2781		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2782		    BUS_DMASYNC_POSTWRITE);
2783		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2784		    BUS_DMASYNC_POSTREAD);
2785	}
2786
2787	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2788		if (cmd->src_m != cmd->dst_m) {
2789			crp->crp_buf = (caddr_t)cmd->dst_m;
2790			totlen = cmd->src_mapsize;
2791			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2792				if (totlen < m->m_len) {
2793					m->m_len = totlen;
2794					totlen = 0;
2795				} else
2796					totlen -= m->m_len;
2797			}
2798			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2799			m_freem(cmd->src_m);
2800		}
2801	}
2802
2803	if (cmd->sloplen != 0) {
2804		crypto_copyback(crp->crp_flags, crp->crp_buf,
2805		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2806		    (caddr_t)&dma->slop[cmd->slopidx]);
2807	}
2808
2809	i = sc->sc_dstk; u = sc->sc_dstu;
2810	while (u != 0) {
2811		if (i == HIFN_D_DST_RSIZE)
2812			i = 0;
2813		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2814		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2815		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2816			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2817			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2818			break;
2819		}
2820		i++, u--;
2821	}
2822	sc->sc_dstk = i; sc->sc_dstu = u;
2823
2824	hifnstats.hst_obytes += cmd->dst_mapsize;
2825
2826	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2827	    HIFN_BASE_CMD_CRYPT) {
2828		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2829			if (crd->crd_alg != CRYPTO_DES_CBC &&
2830			    crd->crd_alg != CRYPTO_3DES_CBC &&
2831			    crd->crd_alg != CRYPTO_AES_CBC)
2832				continue;
2833			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2834				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2835			crypto_copydata(crp->crp_flags, crp->crp_buf,
2836			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
2837			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2838			break;
2839		}
2840	}
2841
2842	if (macbuf != NULL) {
2843		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2844                        int len;
2845
2846			if (crd->crd_alg != CRYPTO_MD5 &&
2847			    crd->crd_alg != CRYPTO_SHA1 &&
2848			    crd->crd_alg != CRYPTO_MD5_HMAC &&
2849			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
2850				continue;
2851			}
2852			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2853			crypto_copyback(crp->crp_flags, crp->crp_buf,
2854			    crd->crd_inject, len, macbuf);
2855			break;
2856		}
2857	}
2858
2859	if (cmd->src_map != cmd->dst_map) {
2860		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2861		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2862	}
2863	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2864	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2865	free(cmd, M_DEVBUF);
2866	crypto_done(crp);
2867}
2868
2869/*
2870 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2871 * and Group 1 registers; avoid conditions that could create
2872 * burst writes by doing a read in between the writes.
2873 *
2874 * NB: The read we interpose is always to the same register;
2875 *     we do this because reading from an arbitrary (e.g. last)
2876 *     register may not always work.
2877 */
2878static void
2879hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2880{
2881	if (sc->sc_flags & HIFN_IS_7811) {
2882		if (sc->sc_bar0_lastreg == reg - 4)
2883			bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2884		sc->sc_bar0_lastreg = reg;
2885	}
2886	bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2887}
2888
2889static void
2890hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2891{
2892	if (sc->sc_flags & HIFN_IS_7811) {
2893		if (sc->sc_bar1_lastreg == reg - 4)
2894			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2895		sc->sc_bar1_lastreg = reg;
2896	}
2897	bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2898}
2899
2900#ifdef HIFN_VULCANDEV
2901/*
2902 * this code provides support for mapping the PK engine's register
2903 * into a userspace program.
2904 *
2905 */
2906static int
2907vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2908	      vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2909{
2910	struct hifn_softc *sc;
2911	vm_paddr_t pd;
2912	void *b;
2913
2914	sc = dev->si_drv1;
2915
2916	pd = rman_get_start(sc->sc_bar1res);
2917	b = rman_get_virtual(sc->sc_bar1res);
2918
2919#if 0
2920	printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2921	    (unsigned long long)pd, offset);
2922	hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2923#endif
2924
2925	if (offset == 0) {
2926		*paddr = pd;
2927		return (0);
2928	}
2929	return (-1);
2930}
2931
2932static struct cdevsw vulcanpk_cdevsw = {
2933	.d_version =	D_VERSION,
2934	.d_mmap =	vulcanpk_mmap,
2935	.d_name =	"vulcanpk",
2936};
2937#endif /* HIFN_VULCANDEV */
2938