hifn7751.c revision 167755
1/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
2
3/*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 *			http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested:  Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 *   notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *   notice, this list of conditions and the following disclaimer in the
23 *   documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 *   derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/dev/hifn/hifn7751.c 167755 2007-03-21 03:42:51Z sam $");
45
46/*
47 * Driver for various Hifn encryption processors.
48 */
49#include "opt_hifn.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/proc.h>
54#include <sys/errno.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/module.h>
58#include <sys/mbuf.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/sysctl.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <opencrypto/cryptodev.h>
72#include <sys/random.h>
73#include <sys/kobj.h>
74
75#include "cryptodev_if.h"
76
77#include <dev/pci/pcivar.h>
78#include <dev/pci/pcireg.h>
79
80#ifdef HIFN_RNDTEST
81#include <dev/rndtest/rndtest.h>
82#endif
83#include <dev/hifn/hifn7751reg.h>
84#include <dev/hifn/hifn7751var.h>
85
86#ifdef HIFN_VULCANDEV
87#include <sys/conf.h>
88#include <sys/uio.h>
89
90static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
91#endif
92
93/*
94 * Prototypes and count for the pci_device structure
95 */
96static	int hifn_probe(device_t);
97static	int hifn_attach(device_t);
98static	int hifn_detach(device_t);
99static	int hifn_suspend(device_t);
100static	int hifn_resume(device_t);
101static	void hifn_shutdown(device_t);
102
103static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
104static	int hifn_freesession(device_t, u_int64_t);
105static	int hifn_process(device_t, struct cryptop *, int);
106
107static device_method_t hifn_methods[] = {
108	/* Device interface */
109	DEVMETHOD(device_probe,		hifn_probe),
110	DEVMETHOD(device_attach,	hifn_attach),
111	DEVMETHOD(device_detach,	hifn_detach),
112	DEVMETHOD(device_suspend,	hifn_suspend),
113	DEVMETHOD(device_resume,	hifn_resume),
114	DEVMETHOD(device_shutdown,	hifn_shutdown),
115
116	/* bus interface */
117	DEVMETHOD(bus_print_child,	bus_generic_print_child),
118	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
119
120	/* crypto device methods */
121	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
122	DEVMETHOD(cryptodev_freesession,hifn_freesession),
123	DEVMETHOD(cryptodev_process,	hifn_process),
124
125	{ 0, 0 }
126};
127static driver_t hifn_driver = {
128	"hifn",
129	hifn_methods,
130	sizeof (struct hifn_softc)
131};
132static devclass_t hifn_devclass;
133
134DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
135MODULE_DEPEND(hifn, crypto, 1, 1, 1);
136#ifdef HIFN_RNDTEST
137MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
138#endif
139
140static	void hifn_reset_board(struct hifn_softc *, int);
141static	void hifn_reset_puc(struct hifn_softc *);
142static	void hifn_puc_wait(struct hifn_softc *);
143static	int hifn_enable_crypto(struct hifn_softc *);
144static	void hifn_set_retry(struct hifn_softc *sc);
145static	void hifn_init_dma(struct hifn_softc *);
146static	void hifn_init_pci_registers(struct hifn_softc *);
147static	int hifn_sramsize(struct hifn_softc *);
148static	int hifn_dramsize(struct hifn_softc *);
149static	int hifn_ramtype(struct hifn_softc *);
150static	void hifn_sessions(struct hifn_softc *);
151static	void hifn_intr(void *);
152static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
153static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
154static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
155static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
156static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
157static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
158static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
159static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
160static	int hifn_init_pubrng(struct hifn_softc *);
161static	void hifn_rng(void *);
162static	void hifn_tick(void *);
163static	void hifn_abort(struct hifn_softc *);
164static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
165
166static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
167static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
168
169static __inline u_int32_t
170READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
171{
172    u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
173    sc->sc_bar0_lastreg = (bus_size_t) -1;
174    return (v);
175}
176#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
177
178static __inline u_int32_t
179READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
180{
181    u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
182    sc->sc_bar1_lastreg = (bus_size_t) -1;
183    return (v);
184}
185#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
186
187SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
188
189#ifdef HIFN_DEBUG
190static	int hifn_debug = 0;
191SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
192	    0, "control debugging msgs");
193#endif
194
195static	struct hifn_stats hifnstats;
196SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
197	    hifn_stats, "driver statistics");
198static	int hifn_maxbatch = 1;
199SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
200	    0, "max ops to batch w/o interrupt");
201
202/*
203 * Probe for a supported device.  The PCI vendor and device
204 * IDs are used to detect devices we know how to handle.
205 */
206static int
207hifn_probe(device_t dev)
208{
209	if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
210	    pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
211		return (BUS_PROBE_DEFAULT);
212	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
213	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
214	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
215	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
216	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
217	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
218		return (BUS_PROBE_DEFAULT);
219	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
220	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
221		return (BUS_PROBE_DEFAULT);
222	return (ENXIO);
223}
224
225static void
226hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
227{
228	bus_addr_t *paddr = (bus_addr_t*) arg;
229	*paddr = segs->ds_addr;
230}
231
232static const char*
233hifn_partname(struct hifn_softc *sc)
234{
235	/* XXX sprintf numbers when not decoded */
236	switch (pci_get_vendor(sc->sc_dev)) {
237	case PCI_VENDOR_HIFN:
238		switch (pci_get_device(sc->sc_dev)) {
239		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
240		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
241		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
242		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
243		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
244		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
245		}
246		return "Hifn unknown-part";
247	case PCI_VENDOR_INVERTEX:
248		switch (pci_get_device(sc->sc_dev)) {
249		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
250		}
251		return "Invertex unknown-part";
252	case PCI_VENDOR_NETSEC:
253		switch (pci_get_device(sc->sc_dev)) {
254		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
255		}
256		return "NetSec unknown-part";
257	}
258	return "Unknown-vendor unknown-part";
259}
260
261static void
262default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
263{
264	random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
265}
266
267static u_int
268checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
269{
270	if (v > max) {
271		device_printf(dev, "Warning, %s %u out of range, "
272			"using max %u\n", what, v, max);
273		v = max;
274	} else if (v < min) {
275		device_printf(dev, "Warning, %s %u out of range, "
276			"using min %u\n", what, v, min);
277		v = min;
278	}
279	return v;
280}
281
282/*
283 * Select PLL configuration for 795x parts.  This is complicated in
284 * that we cannot determine the optimal parameters without user input.
285 * The reference clock is derived from an external clock through a
286 * multiplier.  The external clock is either the host bus (i.e. PCI)
287 * or an external clock generator.  When using the PCI bus we assume
288 * the clock is either 33 or 66 MHz; for an external source we cannot
289 * tell the speed.
290 *
291 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
292 * for an external source, followed by the frequency.  We calculate
293 * the appropriate multiplier and PLL register contents accordingly.
294 * When no configuration is given we default to "pci66" since that
295 * always will allow the card to work.  If a card is using the PCI
296 * bus clock and in a 33MHz slot then it will be operating at half
297 * speed until the correct information is provided.
298 *
299 * We use a default setting of "ext66" because according to Mike Ham
300 * of HiFn, almost every board in existence has an external crystal
301 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
302 * because PCI33 can have clocks from 0 to 33Mhz, and some have
303 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
304 */
305static void
306hifn_getpllconfig(device_t dev, u_int *pll)
307{
308	const char *pllspec;
309	u_int freq, mul, fl, fh;
310	u_int32_t pllconfig;
311	char *nxt;
312
313	if (resource_string_value("hifn", device_get_unit(dev),
314	    "pllconfig", &pllspec))
315		pllspec = "ext66";
316	fl = 33, fh = 66;
317	pllconfig = 0;
318	if (strncmp(pllspec, "ext", 3) == 0) {
319		pllspec += 3;
320		pllconfig |= HIFN_PLL_REF_SEL;
321		switch (pci_get_device(dev)) {
322		case PCI_PRODUCT_HIFN_7955:
323		case PCI_PRODUCT_HIFN_7956:
324			fl = 20, fh = 100;
325			break;
326#ifdef notyet
327		case PCI_PRODUCT_HIFN_7954:
328			fl = 20, fh = 66;
329			break;
330#endif
331		}
332	} else if (strncmp(pllspec, "pci", 3) == 0)
333		pllspec += 3;
334	freq = strtoul(pllspec, &nxt, 10);
335	if (nxt == pllspec)
336		freq = 66;
337	else
338		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
339	/*
340	 * Calculate multiplier.  We target a Fck of 266 MHz,
341	 * allowing only even values, possibly rounded down.
342	 * Multipliers > 8 must set the charge pump current.
343	 */
344	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
345	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
346	if (mul > 8)
347		pllconfig |= HIFN_PLL_IS;
348	*pll = pllconfig;
349}
350
351/*
352 * Attach an interface that successfully probed.
353 */
354static int
355hifn_attach(device_t dev)
356{
357	struct hifn_softc *sc = device_get_softc(dev);
358	u_int32_t cmd;
359	caddr_t kva;
360	int rseg, rid;
361	char rbase;
362	u_int16_t ena, rev;
363
364	KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
365	bzero(sc, sizeof (*sc));
366	sc->sc_dev = dev;
367
368	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
369
370	/* XXX handle power management */
371
372	/*
373	 * The 7951 and 795x have a random number generator and
374	 * public key support; note this.
375	 */
376	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
377	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
378	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
379	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
380		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
381	/*
382	 * The 7811 has a random number generator and
383	 * we also note it's identity 'cuz of some quirks.
384	 */
385	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
386	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
387		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
388
389	/*
390	 * The 795x parts support AES.
391	 */
392	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
393	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
394	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
395		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
396		/*
397		 * Select PLL configuration.  This depends on the
398		 * bus and board design and must be manually configured
399		 * if the default setting is unacceptable.
400		 */
401		hifn_getpllconfig(dev, &sc->sc_pllconfig);
402	}
403
404	/*
405	 * Configure support for memory-mapped access to
406	 * registers and for DMA operations.
407	 */
408#define	PCIM_ENA	(PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
409	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
410	cmd |= PCIM_ENA;
411	pci_write_config(dev, PCIR_COMMAND, cmd, 4);
412	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
413	if ((cmd & PCIM_ENA) != PCIM_ENA) {
414		device_printf(dev, "failed to enable %s\n",
415			(cmd & PCIM_ENA) == 0 ?
416				"memory mapping & bus mastering" :
417			(cmd & PCIM_CMD_MEMEN) == 0 ?
418				"memory mapping" : "bus mastering");
419		goto fail_pci;
420	}
421#undef PCIM_ENA
422
423	/*
424	 * Setup PCI resources. Note that we record the bus
425	 * tag and handle for each register mapping, this is
426	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
427	 * and WRITE_REG_1 macros throughout the driver.
428	 */
429	rid = HIFN_BAR0;
430	sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
431			 			RF_ACTIVE);
432	if (sc->sc_bar0res == NULL) {
433		device_printf(dev, "cannot map bar%d register space\n", 0);
434		goto fail_pci;
435	}
436	sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
437	sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
438	sc->sc_bar0_lastreg = (bus_size_t) -1;
439
440	rid = HIFN_BAR1;
441	sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
442						RF_ACTIVE);
443	if (sc->sc_bar1res == NULL) {
444		device_printf(dev, "cannot map bar%d register space\n", 1);
445		goto fail_io0;
446	}
447	sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
448	sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
449	sc->sc_bar1_lastreg = (bus_size_t) -1;
450
451	hifn_set_retry(sc);
452
453	/*
454	 * Setup the area where the Hifn DMA's descriptors
455	 * and associated data structures.
456	 */
457	if (bus_dma_tag_create(NULL,			/* parent */
458			       1, 0,			/* alignment,boundary */
459			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
460			       BUS_SPACE_MAXADDR,	/* highaddr */
461			       NULL, NULL,		/* filter, filterarg */
462			       HIFN_MAX_DMALEN,		/* maxsize */
463			       MAX_SCATTER,		/* nsegments */
464			       HIFN_MAX_SEGLEN,		/* maxsegsize */
465			       BUS_DMA_ALLOCNOW,	/* flags */
466			       NULL,			/* lockfunc */
467			       NULL,			/* lockarg */
468			       &sc->sc_dmat)) {
469		device_printf(dev, "cannot allocate DMA tag\n");
470		goto fail_io1;
471	}
472	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
473		device_printf(dev, "cannot create dma map\n");
474		bus_dma_tag_destroy(sc->sc_dmat);
475		goto fail_io1;
476	}
477	if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
478		device_printf(dev, "cannot alloc dma buffer\n");
479		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
480		bus_dma_tag_destroy(sc->sc_dmat);
481		goto fail_io1;
482	}
483	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
484			     sizeof (*sc->sc_dma),
485			     hifn_dmamap_cb, &sc->sc_dma_physaddr,
486			     BUS_DMA_NOWAIT)) {
487		device_printf(dev, "cannot load dma map\n");
488		bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
489		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
490		bus_dma_tag_destroy(sc->sc_dmat);
491		goto fail_io1;
492	}
493	sc->sc_dma = (struct hifn_dma *)kva;
494	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
495
496	KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
497	KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
498	KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
499	KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
500
501	/*
502	 * Reset the board and do the ``secret handshake''
503	 * to enable the crypto support.  Then complete the
504	 * initialization procedure by setting up the interrupt
505	 * and hooking in to the system crypto support so we'll
506	 * get used for system services like the crypto device,
507	 * IPsec, RNG device, etc.
508	 */
509	hifn_reset_board(sc, 0);
510
511	if (hifn_enable_crypto(sc) != 0) {
512		device_printf(dev, "crypto enabling failed\n");
513		goto fail_mem;
514	}
515	hifn_reset_puc(sc);
516
517	hifn_init_dma(sc);
518	hifn_init_pci_registers(sc);
519
520	/* XXX can't dynamically determine ram type for 795x; force dram */
521	if (sc->sc_flags & HIFN_IS_7956)
522		sc->sc_drammodel = 1;
523	else if (hifn_ramtype(sc))
524		goto fail_mem;
525
526	if (sc->sc_drammodel == 0)
527		hifn_sramsize(sc);
528	else
529		hifn_dramsize(sc);
530
531	/*
532	 * Workaround for NetSec 7751 rev A: half ram size because two
533	 * of the address lines were left floating
534	 */
535	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
536	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
537	    pci_get_revid(dev) == 0x61)	/*XXX???*/
538		sc->sc_ramsize >>= 1;
539
540	/*
541	 * Arrange the interrupt line.
542	 */
543	rid = 0;
544	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
545					    RF_SHAREABLE|RF_ACTIVE);
546	if (sc->sc_irq == NULL) {
547		device_printf(dev, "could not map interrupt\n");
548		goto fail_mem;
549	}
550	/*
551	 * NB: Network code assumes we are blocked with splimp()
552	 *     so make sure the IRQ is marked appropriately.
553	 */
554	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
555			   NULL, hifn_intr, sc, &sc->sc_intrhand)) {
556		device_printf(dev, "could not setup interrupt\n");
557		goto fail_intr2;
558	}
559
560	hifn_sessions(sc);
561
562	/*
563	 * NB: Keep only the low 16 bits; this masks the chip id
564	 *     from the 7951.
565	 */
566	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
567
568	rseg = sc->sc_ramsize / 1024;
569	rbase = 'K';
570	if (sc->sc_ramsize >= (1024 * 1024)) {
571		rbase = 'M';
572		rseg /= 1024;
573	}
574	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
575		hifn_partname(sc), rev,
576		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
577	if (sc->sc_flags & HIFN_IS_7956)
578		printf(", pll=0x%x<%s clk, %ux mult>",
579			sc->sc_pllconfig,
580			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
581			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
582	printf("\n");
583
584	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
585	if (sc->sc_cid < 0) {
586		device_printf(dev, "could not get crypto driver id\n");
587		goto fail_intr;
588	}
589
590	WRITE_REG_0(sc, HIFN_0_PUCNFG,
591	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
592	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
593
594	switch (ena) {
595	case HIFN_PUSTAT_ENA_2:
596		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
597		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
598		if (sc->sc_flags & HIFN_HAS_AES)
599			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
600		/*FALLTHROUGH*/
601	case HIFN_PUSTAT_ENA_1:
602		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
603		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
604		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
605		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
606		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
607		break;
608	}
609
610	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
611	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
612
613	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
614		hifn_init_pubrng(sc);
615
616	callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
617	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
618
619	return (0);
620
621fail_intr:
622	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
623fail_intr2:
624	/* XXX don't store rid */
625	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
626fail_mem:
627	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
628	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
629	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
630	bus_dma_tag_destroy(sc->sc_dmat);
631
632	/* Turn off DMA polling */
633	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
634	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
635fail_io1:
636	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
637fail_io0:
638	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
639fail_pci:
640	mtx_destroy(&sc->sc_mtx);
641	return (ENXIO);
642}
643
644/*
645 * Detach an interface that successfully probed.
646 */
647static int
648hifn_detach(device_t dev)
649{
650	struct hifn_softc *sc = device_get_softc(dev);
651
652	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
653
654	/* disable interrupts */
655	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
656
657	/*XXX other resources */
658	callout_stop(&sc->sc_tickto);
659	callout_stop(&sc->sc_rngto);
660#ifdef HIFN_RNDTEST
661	if (sc->sc_rndtest)
662		rndtest_detach(sc->sc_rndtest);
663#endif
664
665	/* Turn off DMA polling */
666	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
667	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
668
669	crypto_unregister_all(sc->sc_cid);
670
671	bus_generic_detach(dev);	/*XXX should be no children, right? */
672
673	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
674	/* XXX don't store rid */
675	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
676
677	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
678	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
679	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
680	bus_dma_tag_destroy(sc->sc_dmat);
681
682	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
683	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
684
685	mtx_destroy(&sc->sc_mtx);
686
687	return (0);
688}
689
690/*
691 * Stop all chip I/O so that the kernel's probe routines don't
692 * get confused by errant DMAs when rebooting.
693 */
694static void
695hifn_shutdown(device_t dev)
696{
697#ifdef notyet
698	hifn_stop(device_get_softc(dev));
699#endif
700}
701
702/*
703 * Device suspend routine.  Stop the interface and save some PCI
704 * settings in case the BIOS doesn't restore them properly on
705 * resume.
706 */
707static int
708hifn_suspend(device_t dev)
709{
710	struct hifn_softc *sc = device_get_softc(dev);
711#ifdef notyet
712	hifn_stop(sc);
713#endif
714	sc->sc_suspended = 1;
715
716	return (0);
717}
718
719/*
720 * Device resume routine.  Restore some PCI settings in case the BIOS
721 * doesn't, re-enable busmastering, and restart the interface if
722 * appropriate.
723 */
724static int
725hifn_resume(device_t dev)
726{
727	struct hifn_softc *sc = device_get_softc(dev);
728#ifdef notyet
729	/* reenable busmastering */
730	pci_enable_busmaster(dev);
731	pci_enable_io(dev, HIFN_RES);
732
733        /* reinitialize interface if necessary */
734        if (ifp->if_flags & IFF_UP)
735                rl_init(sc);
736#endif
737	sc->sc_suspended = 0;
738
739	return (0);
740}
741
742static int
743hifn_init_pubrng(struct hifn_softc *sc)
744{
745	u_int32_t r;
746	int i;
747
748#ifdef HIFN_RNDTEST
749	sc->sc_rndtest = rndtest_attach(sc->sc_dev);
750	if (sc->sc_rndtest)
751		sc->sc_harvest = rndtest_harvest;
752	else
753		sc->sc_harvest = default_harvest;
754#else
755	sc->sc_harvest = default_harvest;
756#endif
757	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
758		/* Reset 7951 public key/rng engine */
759		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
760		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
761
762		for (i = 0; i < 100; i++) {
763			DELAY(1000);
764			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
765			    HIFN_PUBRST_RESET) == 0)
766				break;
767		}
768
769		if (i == 100) {
770			device_printf(sc->sc_dev, "public key init failed\n");
771			return (1);
772		}
773	}
774
775	/* Enable the rng, if available */
776	if (sc->sc_flags & HIFN_HAS_RNG) {
777		if (sc->sc_flags & HIFN_IS_7811) {
778			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
779			if (r & HIFN_7811_RNGENA_ENA) {
780				r &= ~HIFN_7811_RNGENA_ENA;
781				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
782			}
783			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
784			    HIFN_7811_RNGCFG_DEFL);
785			r |= HIFN_7811_RNGENA_ENA;
786			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
787		} else
788			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
789			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
790			    HIFN_RNGCFG_ENA);
791
792		sc->sc_rngfirst = 1;
793		if (hz >= 100)
794			sc->sc_rnghz = hz / 100;
795		else
796			sc->sc_rnghz = 1;
797		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
798		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
799	}
800
801	/* Enable public key engine, if available */
802	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
803		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
804		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
805		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
806#ifdef HIFN_VULCANDEV
807		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
808					UID_ROOT, GID_WHEEL, 0666,
809					"vulcanpk");
810		sc->sc_pkdev->si_drv1 = sc;
811#endif
812	}
813
814	return (0);
815}
816
817static void
818hifn_rng(void *vsc)
819{
820#define	RANDOM_BITS(n)	(n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
821	struct hifn_softc *sc = vsc;
822	u_int32_t sts, num[2];
823	int i;
824
825	if (sc->sc_flags & HIFN_IS_7811) {
826		/* ONLY VALID ON 7811!!!! */
827		for (i = 0; i < 5; i++) {
828			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
829			if (sts & HIFN_7811_RNGSTS_UFL) {
830				device_printf(sc->sc_dev,
831					      "RNG underflow: disabling\n");
832				return;
833			}
834			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
835				break;
836
837			/*
838			 * There are at least two words in the RNG FIFO
839			 * at this point.
840			 */
841			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
842			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
843			/* NB: discard first data read */
844			if (sc->sc_rngfirst)
845				sc->sc_rngfirst = 0;
846			else
847				(*sc->sc_harvest)(sc->sc_rndtest,
848					num, sizeof (num));
849		}
850	} else {
851		num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
852
853		/* NB: discard first data read */
854		if (sc->sc_rngfirst)
855			sc->sc_rngfirst = 0;
856		else
857			(*sc->sc_harvest)(sc->sc_rndtest,
858				num, sizeof (num[0]));
859	}
860
861	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
862#undef RANDOM_BITS
863}
864
865static void
866hifn_puc_wait(struct hifn_softc *sc)
867{
868	int i;
869	int reg = HIFN_0_PUCTRL;
870
871	if (sc->sc_flags & HIFN_IS_7956) {
872		reg = HIFN_0_PUCTRL2;
873	}
874
875	for (i = 5000; i > 0; i--) {
876		DELAY(1);
877		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
878			break;
879	}
880	if (!i)
881		device_printf(sc->sc_dev, "proc unit did not reset\n");
882}
883
884/*
885 * Reset the processing unit.
886 */
887static void
888hifn_reset_puc(struct hifn_softc *sc)
889{
890	/* Reset processing unit */
891	int reg = HIFN_0_PUCTRL;
892
893	if (sc->sc_flags & HIFN_IS_7956) {
894		reg = HIFN_0_PUCTRL2;
895	}
896	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
897
898	hifn_puc_wait(sc);
899}
900
901/*
902 * Set the Retry and TRDY registers; note that we set them to
903 * zero because the 7811 locks up when forced to retry (section
904 * 3.6 of "Specification Update SU-0014-04".  Not clear if we
905 * should do this for all Hifn parts, but it doesn't seem to hurt.
906 */
907static void
908hifn_set_retry(struct hifn_softc *sc)
909{
910	/* NB: RETRY only responds to 8-bit reads/writes */
911	pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
912	pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
913}
914
915/*
916 * Resets the board.  Values in the regesters are left as is
917 * from the reset (i.e. initial values are assigned elsewhere).
918 */
919static void
920hifn_reset_board(struct hifn_softc *sc, int full)
921{
922	u_int32_t reg;
923
924	/*
925	 * Set polling in the DMA configuration register to zero.  0x7 avoids
926	 * resetting the board and zeros out the other fields.
927	 */
928	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
929	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
930
931	/*
932	 * Now that polling has been disabled, we have to wait 1 ms
933	 * before resetting the board.
934	 */
935	DELAY(1000);
936
937	/* Reset the DMA unit */
938	if (full) {
939		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
940		DELAY(1000);
941	} else {
942		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
943		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
944		hifn_reset_puc(sc);
945	}
946
947	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
948	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
949
950	/* Bring dma unit out of reset */
951	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
952	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
953
954	hifn_puc_wait(sc);
955	hifn_set_retry(sc);
956
957	if (sc->sc_flags & HIFN_IS_7811) {
958		for (reg = 0; reg < 1000; reg++) {
959			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
960			    HIFN_MIPSRST_CRAMINIT)
961				break;
962			DELAY(1000);
963		}
964		if (reg == 1000)
965			printf(": cram init timeout\n");
966	} else {
967	  /* set up DMA configuration register #2 */
968	  /* turn off all PK and BAR0 swaps */
969	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
970		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
971		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
972		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
973		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
974	}
975
976}
977
978static u_int32_t
979hifn_next_signature(u_int32_t a, u_int cnt)
980{
981	int i;
982	u_int32_t v;
983
984	for (i = 0; i < cnt; i++) {
985
986		/* get the parity */
987		v = a & 0x80080125;
988		v ^= v >> 16;
989		v ^= v >> 8;
990		v ^= v >> 4;
991		v ^= v >> 2;
992		v ^= v >> 1;
993
994		a = (v & 1) ^ (a << 1);
995	}
996
997	return a;
998}
999
1000struct pci2id {
1001	u_short		pci_vendor;
1002	u_short		pci_prod;
1003	char		card_id[13];
1004};
1005static struct pci2id pci2id[] = {
1006	{
1007		PCI_VENDOR_HIFN,
1008		PCI_PRODUCT_HIFN_7951,
1009		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1010		  0x00, 0x00, 0x00, 0x00, 0x00 }
1011	}, {
1012		PCI_VENDOR_HIFN,
1013		PCI_PRODUCT_HIFN_7955,
1014		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1015		  0x00, 0x00, 0x00, 0x00, 0x00 }
1016	}, {
1017		PCI_VENDOR_HIFN,
1018		PCI_PRODUCT_HIFN_7956,
1019		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1020		  0x00, 0x00, 0x00, 0x00, 0x00 }
1021	}, {
1022		PCI_VENDOR_NETSEC,
1023		PCI_PRODUCT_NETSEC_7751,
1024		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1025		  0x00, 0x00, 0x00, 0x00, 0x00 }
1026	}, {
1027		PCI_VENDOR_INVERTEX,
1028		PCI_PRODUCT_INVERTEX_AEON,
1029		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1030		  0x00, 0x00, 0x00, 0x00, 0x00 }
1031	}, {
1032		PCI_VENDOR_HIFN,
1033		PCI_PRODUCT_HIFN_7811,
1034		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035		  0x00, 0x00, 0x00, 0x00, 0x00 }
1036	}, {
1037		/*
1038		 * Other vendors share this PCI ID as well, such as
1039		 * http://www.powercrypt.com, and obviously they also
1040		 * use the same key.
1041		 */
1042		PCI_VENDOR_HIFN,
1043		PCI_PRODUCT_HIFN_7751,
1044		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045		  0x00, 0x00, 0x00, 0x00, 0x00 }
1046	},
1047};
1048
1049/*
1050 * Checks to see if crypto is already enabled.  If crypto isn't enable,
1051 * "hifn_enable_crypto" is called to enable it.  The check is important,
1052 * as enabling crypto twice will lock the board.
1053 */
1054static int
1055hifn_enable_crypto(struct hifn_softc *sc)
1056{
1057	u_int32_t dmacfg, ramcfg, encl, addr, i;
1058	char *offtbl = NULL;
1059
1060	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
1061		if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1062		    pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1063			offtbl = pci2id[i].card_id;
1064			break;
1065		}
1066	}
1067	if (offtbl == NULL) {
1068		device_printf(sc->sc_dev, "Unknown card!\n");
1069		return (1);
1070	}
1071
1072	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1073	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1074
1075	/*
1076	 * The RAM config register's encrypt level bit needs to be set before
1077	 * every read performed on the encryption level register.
1078	 */
1079	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1080
1081	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1082
1083	/*
1084	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
1085	 * next reboot.
1086	 */
1087	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1088#ifdef HIFN_DEBUG
1089		if (hifn_debug)
1090			device_printf(sc->sc_dev,
1091			    "Strong crypto already enabled!\n");
1092#endif
1093		goto report;
1094	}
1095
1096	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1097#ifdef HIFN_DEBUG
1098		if (hifn_debug)
1099			device_printf(sc->sc_dev,
1100			      "Unknown encryption level 0x%x\n", encl);
1101#endif
1102		return 1;
1103	}
1104
1105	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1106	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1107	DELAY(1000);
1108	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1109	DELAY(1000);
1110	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1111	DELAY(1000);
1112
1113	for (i = 0; i <= 12; i++) {
1114		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1115		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1116
1117		DELAY(1000);
1118	}
1119
1120	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1121	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1122
1123#ifdef HIFN_DEBUG
1124	if (hifn_debug) {
1125		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1126			device_printf(sc->sc_dev, "Engine is permanently "
1127				"locked until next system reset!\n");
1128		else
1129			device_printf(sc->sc_dev, "Engine enabled "
1130				"successfully!\n");
1131	}
1132#endif
1133
1134report:
1135	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1136	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1137
1138	switch (encl) {
1139	case HIFN_PUSTAT_ENA_1:
1140	case HIFN_PUSTAT_ENA_2:
1141		break;
1142	case HIFN_PUSTAT_ENA_0:
1143	default:
1144		device_printf(sc->sc_dev, "disabled");
1145		break;
1146	}
1147
1148	return 0;
1149}
1150
1151/*
1152 * Give initial values to the registers listed in the "Register Space"
1153 * section of the HIFN Software Development reference manual.
1154 */
1155static void
1156hifn_init_pci_registers(struct hifn_softc *sc)
1157{
1158	/* write fixed values needed by the Initialization registers */
1159	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1160	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1161	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1162
1163	/* write all 4 ring address registers */
1164	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1165	    offsetof(struct hifn_dma, cmdr[0]));
1166	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1167	    offsetof(struct hifn_dma, srcr[0]));
1168	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1169	    offsetof(struct hifn_dma, dstr[0]));
1170	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1171	    offsetof(struct hifn_dma, resr[0]));
1172
1173	DELAY(2000);
1174
1175	/* write status register */
1176	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1177	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1178	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1179	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1180	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1181	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1182	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1183	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1184	    HIFN_DMACSR_S_WAIT |
1185	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1186	    HIFN_DMACSR_C_WAIT |
1187	    HIFN_DMACSR_ENGINE |
1188	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1189		HIFN_DMACSR_PUBDONE : 0) |
1190	    ((sc->sc_flags & HIFN_IS_7811) ?
1191		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1192
1193	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1194	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1195	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1196	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1197	    ((sc->sc_flags & HIFN_IS_7811) ?
1198		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1199	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1200	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1201
1202
1203	if (sc->sc_flags & HIFN_IS_7956) {
1204		u_int32_t pll;
1205
1206		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1207		    HIFN_PUCNFG_TCALLPHASES |
1208		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1209
1210		/* turn off the clocks and insure bypass is set */
1211		pll = READ_REG_1(sc, HIFN_1_PLL);
1212		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1213		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
1214		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1215		DELAY(10*1000);		/* 10ms */
1216
1217		/* change configuration */
1218		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1219		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1220		DELAY(10*1000);		/* 10ms */
1221
1222		/* disable bypass */
1223		pll &= ~HIFN_PLL_BP;
1224		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1225		/* enable clocks with new configuration */
1226		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1227		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1228	} else {
1229		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1230		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1231		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1232		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1233	}
1234
1235	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1236	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1237	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1238	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1239	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1240}
1241
1242/*
1243 * The maximum number of sessions supported by the card
1244 * is dependent on the amount of context ram, which
1245 * encryption algorithms are enabled, and how compression
1246 * is configured.  This should be configured before this
1247 * routine is called.
1248 */
1249static void
1250hifn_sessions(struct hifn_softc *sc)
1251{
1252	u_int32_t pucnfg;
1253	int ctxsize;
1254
1255	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1256
1257	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1258		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1259			ctxsize = 128;
1260		else
1261			ctxsize = 512;
1262		/*
1263		 * 7955/7956 has internal context memory of 32K
1264		 */
1265		if (sc->sc_flags & HIFN_IS_7956)
1266			sc->sc_maxses = 32768 / ctxsize;
1267		else
1268			sc->sc_maxses = 1 +
1269			    ((sc->sc_ramsize - 32768) / ctxsize);
1270	} else
1271		sc->sc_maxses = sc->sc_ramsize / 16384;
1272
1273	if (sc->sc_maxses > 2048)
1274		sc->sc_maxses = 2048;
1275}
1276
1277/*
1278 * Determine ram type (sram or dram).  Board should be just out of a reset
1279 * state when this is called.
1280 */
1281static int
1282hifn_ramtype(struct hifn_softc *sc)
1283{
1284	u_int8_t data[8], dataexpect[8];
1285	int i;
1286
1287	for (i = 0; i < sizeof(data); i++)
1288		data[i] = dataexpect[i] = 0x55;
1289	if (hifn_writeramaddr(sc, 0, data))
1290		return (-1);
1291	if (hifn_readramaddr(sc, 0, data))
1292		return (-1);
1293	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1294		sc->sc_drammodel = 1;
1295		return (0);
1296	}
1297
1298	for (i = 0; i < sizeof(data); i++)
1299		data[i] = dataexpect[i] = 0xaa;
1300	if (hifn_writeramaddr(sc, 0, data))
1301		return (-1);
1302	if (hifn_readramaddr(sc, 0, data))
1303		return (-1);
1304	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1305		sc->sc_drammodel = 1;
1306		return (0);
1307	}
1308
1309	return (0);
1310}
1311
1312#define	HIFN_SRAM_MAX		(32 << 20)
1313#define	HIFN_SRAM_STEP_SIZE	16384
1314#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1315
1316static int
1317hifn_sramsize(struct hifn_softc *sc)
1318{
1319	u_int32_t a;
1320	u_int8_t data[8];
1321	u_int8_t dataexpect[sizeof(data)];
1322	int32_t i;
1323
1324	for (i = 0; i < sizeof(data); i++)
1325		data[i] = dataexpect[i] = i ^ 0x5a;
1326
1327	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1328		a = i * HIFN_SRAM_STEP_SIZE;
1329		bcopy(&i, data, sizeof(i));
1330		hifn_writeramaddr(sc, a, data);
1331	}
1332
1333	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1334		a = i * HIFN_SRAM_STEP_SIZE;
1335		bcopy(&i, dataexpect, sizeof(i));
1336		if (hifn_readramaddr(sc, a, data) < 0)
1337			return (0);
1338		if (bcmp(data, dataexpect, sizeof(data)) != 0)
1339			return (0);
1340		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1341	}
1342
1343	return (0);
1344}
1345
1346/*
1347 * XXX For dram boards, one should really try all of the
1348 * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1349 * is already set up correctly.
1350 */
1351static int
1352hifn_dramsize(struct hifn_softc *sc)
1353{
1354	u_int32_t cnfg;
1355
1356	if (sc->sc_flags & HIFN_IS_7956) {
1357		/*
1358		 * 7955/7956 have a fixed internal ram of only 32K.
1359		 */
1360		sc->sc_ramsize = 32768;
1361	} else {
1362		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1363		    HIFN_PUCNFG_DRAMMASK;
1364		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1365	}
1366	return (0);
1367}
1368
1369static void
1370hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1371{
1372	struct hifn_dma *dma = sc->sc_dma;
1373
1374	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1375		dma->cmdi = 0;
1376		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1377		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1378		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1379		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1380	}
1381	*cmdp = dma->cmdi++;
1382	dma->cmdk = dma->cmdi;
1383
1384	if (dma->srci == HIFN_D_SRC_RSIZE) {
1385		dma->srci = 0;
1386		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1387		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1388		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1389		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1390	}
1391	*srcp = dma->srci++;
1392	dma->srck = dma->srci;
1393
1394	if (dma->dsti == HIFN_D_DST_RSIZE) {
1395		dma->dsti = 0;
1396		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1397		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1398		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1399		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1400	}
1401	*dstp = dma->dsti++;
1402	dma->dstk = dma->dsti;
1403
1404	if (dma->resi == HIFN_D_RES_RSIZE) {
1405		dma->resi = 0;
1406		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1407		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1408		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1409		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1410	}
1411	*resp = dma->resi++;
1412	dma->resk = dma->resi;
1413}
1414
1415static int
1416hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1417{
1418	struct hifn_dma *dma = sc->sc_dma;
1419	hifn_base_command_t wc;
1420	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1421	int r, cmdi, resi, srci, dsti;
1422
1423	wc.masks = htole16(3 << 13);
1424	wc.session_num = htole16(addr >> 14);
1425	wc.total_source_count = htole16(8);
1426	wc.total_dest_count = htole16(addr & 0x3fff);
1427
1428	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1429
1430	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1431	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1432	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1433
1434	/* build write command */
1435	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1436	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1437	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1438
1439	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1440	    + offsetof(struct hifn_dma, test_src));
1441	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1442	    + offsetof(struct hifn_dma, test_dst));
1443
1444	dma->cmdr[cmdi].l = htole32(16 | masks);
1445	dma->srcr[srci].l = htole32(8 | masks);
1446	dma->dstr[dsti].l = htole32(4 | masks);
1447	dma->resr[resi].l = htole32(4 | masks);
1448
1449	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1450	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1451
1452	for (r = 10000; r >= 0; r--) {
1453		DELAY(10);
1454		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1455		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1456		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1457			break;
1458		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1459		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1460	}
1461	if (r == 0) {
1462		device_printf(sc->sc_dev, "writeramaddr -- "
1463		    "result[%d](addr %d) still valid\n", resi, addr);
1464		r = -1;
1465		return (-1);
1466	} else
1467		r = 0;
1468
1469	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1470	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1471	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1472
1473	return (r);
1474}
1475
1476static int
1477hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1478{
1479	struct hifn_dma *dma = sc->sc_dma;
1480	hifn_base_command_t rc;
1481	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1482	int r, cmdi, srci, dsti, resi;
1483
1484	rc.masks = htole16(2 << 13);
1485	rc.session_num = htole16(addr >> 14);
1486	rc.total_source_count = htole16(addr & 0x3fff);
1487	rc.total_dest_count = htole16(8);
1488
1489	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1490
1491	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1492	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1493	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1494
1495	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1496	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1497
1498	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1499	    offsetof(struct hifn_dma, test_src));
1500	dma->test_src = 0;
1501	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1502	    offsetof(struct hifn_dma, test_dst));
1503	dma->test_dst = 0;
1504	dma->cmdr[cmdi].l = htole32(8 | masks);
1505	dma->srcr[srci].l = htole32(8 | masks);
1506	dma->dstr[dsti].l = htole32(8 | masks);
1507	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1508
1509	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1510	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1511
1512	for (r = 10000; r >= 0; r--) {
1513		DELAY(10);
1514		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1515		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1516		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1517			break;
1518		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1519		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1520	}
1521	if (r == 0) {
1522		device_printf(sc->sc_dev, "readramaddr -- "
1523		    "result[%d](addr %d) still valid\n", resi, addr);
1524		r = -1;
1525	} else {
1526		r = 0;
1527		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1528	}
1529
1530	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1531	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1532	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1533
1534	return (r);
1535}
1536
1537/*
1538 * Initialize the descriptor rings.
1539 */
1540static void
1541hifn_init_dma(struct hifn_softc *sc)
1542{
1543	struct hifn_dma *dma = sc->sc_dma;
1544	int i;
1545
1546	hifn_set_retry(sc);
1547
1548	/* initialize static pointer values */
1549	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1550		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1551		    offsetof(struct hifn_dma, command_bufs[i][0]));
1552	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1553		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1554		    offsetof(struct hifn_dma, result_bufs[i][0]));
1555
1556	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1557	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1558	dma->srcr[HIFN_D_SRC_RSIZE].p =
1559	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1560	dma->dstr[HIFN_D_DST_RSIZE].p =
1561	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1562	dma->resr[HIFN_D_RES_RSIZE].p =
1563	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1564
1565	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1566	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1567	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1568}
1569
1570/*
1571 * Writes out the raw command buffer space.  Returns the
1572 * command buffer size.
1573 */
1574static u_int
1575hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1576{
1577	u_int8_t *buf_pos;
1578	hifn_base_command_t *base_cmd;
1579	hifn_mac_command_t *mac_cmd;
1580	hifn_crypt_command_t *cry_cmd;
1581	int using_mac, using_crypt, len, ivlen;
1582	u_int32_t dlen, slen;
1583
1584	buf_pos = buf;
1585	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1586	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1587
1588	base_cmd = (hifn_base_command_t *)buf_pos;
1589	base_cmd->masks = htole16(cmd->base_masks);
1590	slen = cmd->src_mapsize;
1591	if (cmd->sloplen)
1592		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1593	else
1594		dlen = cmd->dst_mapsize;
1595	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1596	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1597	dlen >>= 16;
1598	slen >>= 16;
1599	base_cmd->session_num = htole16(
1600	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1601	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1602	buf_pos += sizeof(hifn_base_command_t);
1603
1604	if (using_mac) {
1605		mac_cmd = (hifn_mac_command_t *)buf_pos;
1606		dlen = cmd->maccrd->crd_len;
1607		mac_cmd->source_count = htole16(dlen & 0xffff);
1608		dlen >>= 16;
1609		mac_cmd->masks = htole16(cmd->mac_masks |
1610		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1611		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1612		mac_cmd->reserved = 0;
1613		buf_pos += sizeof(hifn_mac_command_t);
1614	}
1615
1616	if (using_crypt) {
1617		cry_cmd = (hifn_crypt_command_t *)buf_pos;
1618		dlen = cmd->enccrd->crd_len;
1619		cry_cmd->source_count = htole16(dlen & 0xffff);
1620		dlen >>= 16;
1621		cry_cmd->masks = htole16(cmd->cry_masks |
1622		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1623		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1624		cry_cmd->reserved = 0;
1625		buf_pos += sizeof(hifn_crypt_command_t);
1626	}
1627
1628	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1629		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1630		buf_pos += HIFN_MAC_KEY_LENGTH;
1631	}
1632
1633	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1634		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1635		case HIFN_CRYPT_CMD_ALG_3DES:
1636			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1637			buf_pos += HIFN_3DES_KEY_LENGTH;
1638			break;
1639		case HIFN_CRYPT_CMD_ALG_DES:
1640			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1641			buf_pos += HIFN_DES_KEY_LENGTH;
1642			break;
1643		case HIFN_CRYPT_CMD_ALG_RC4:
1644			len = 256;
1645			do {
1646				int clen;
1647
1648				clen = MIN(cmd->cklen, len);
1649				bcopy(cmd->ck, buf_pos, clen);
1650				len -= clen;
1651				buf_pos += clen;
1652			} while (len > 0);
1653			bzero(buf_pos, 4);
1654			buf_pos += 4;
1655			break;
1656		case HIFN_CRYPT_CMD_ALG_AES:
1657			/*
1658			 * AES keys are variable 128, 192 and
1659			 * 256 bits (16, 24 and 32 bytes).
1660			 */
1661			bcopy(cmd->ck, buf_pos, cmd->cklen);
1662			buf_pos += cmd->cklen;
1663			break;
1664		}
1665	}
1666
1667	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1668		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1669		case HIFN_CRYPT_CMD_ALG_AES:
1670			ivlen = HIFN_AES_IV_LENGTH;
1671			break;
1672		default:
1673			ivlen = HIFN_IV_LENGTH;
1674			break;
1675		}
1676		bcopy(cmd->iv, buf_pos, ivlen);
1677		buf_pos += ivlen;
1678	}
1679
1680	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1681		bzero(buf_pos, 8);
1682		buf_pos += 8;
1683	}
1684
1685	return (buf_pos - buf);
1686}
1687
1688static int
1689hifn_dmamap_aligned(struct hifn_operand *op)
1690{
1691	int i;
1692
1693	for (i = 0; i < op->nsegs; i++) {
1694		if (op->segs[i].ds_addr & 3)
1695			return (0);
1696		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1697			return (0);
1698	}
1699	return (1);
1700}
1701
1702static __inline int
1703hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1704{
1705	struct hifn_dma *dma = sc->sc_dma;
1706
1707	if (++idx == HIFN_D_DST_RSIZE) {
1708		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1709		    HIFN_D_MASKDONEIRQ);
1710		HIFN_DSTR_SYNC(sc, idx,
1711		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1712		idx = 0;
1713	}
1714	return (idx);
1715}
1716
1717static int
1718hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1719{
1720	struct hifn_dma *dma = sc->sc_dma;
1721	struct hifn_operand *dst = &cmd->dst;
1722	u_int32_t p, l;
1723	int idx, used = 0, i;
1724
1725	idx = dma->dsti;
1726	for (i = 0; i < dst->nsegs - 1; i++) {
1727		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1728		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1729		    HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1730		HIFN_DSTR_SYNC(sc, idx,
1731		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1732		used++;
1733
1734		idx = hifn_dmamap_dstwrap(sc, idx);
1735	}
1736
1737	if (cmd->sloplen == 0) {
1738		p = dst->segs[i].ds_addr;
1739		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1740		    dst->segs[i].ds_len;
1741	} else {
1742		p = sc->sc_dma_physaddr +
1743		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1744		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1745		    sizeof(u_int32_t);
1746
1747		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1748			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1749			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1750			    HIFN_D_MASKDONEIRQ |
1751			    (dst->segs[i].ds_len - cmd->sloplen));
1752			HIFN_DSTR_SYNC(sc, idx,
1753			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1754			used++;
1755
1756			idx = hifn_dmamap_dstwrap(sc, idx);
1757		}
1758	}
1759	dma->dstr[idx].p = htole32(p);
1760	dma->dstr[idx].l = htole32(l);
1761	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1762	used++;
1763
1764	idx = hifn_dmamap_dstwrap(sc, idx);
1765
1766	dma->dsti = idx;
1767	dma->dstu += used;
1768	return (idx);
1769}
1770
1771static __inline int
1772hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1773{
1774	struct hifn_dma *dma = sc->sc_dma;
1775
1776	if (++idx == HIFN_D_SRC_RSIZE) {
1777		dma->srcr[idx].l = htole32(HIFN_D_VALID |
1778		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1779		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1780		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1781		idx = 0;
1782	}
1783	return (idx);
1784}
1785
1786static int
1787hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1788{
1789	struct hifn_dma *dma = sc->sc_dma;
1790	struct hifn_operand *src = &cmd->src;
1791	int idx, i;
1792	u_int32_t last = 0;
1793
1794	idx = dma->srci;
1795	for (i = 0; i < src->nsegs; i++) {
1796		if (i == src->nsegs - 1)
1797			last = HIFN_D_LAST;
1798
1799		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1800		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1801		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1802		HIFN_SRCR_SYNC(sc, idx,
1803		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1804
1805		idx = hifn_dmamap_srcwrap(sc, idx);
1806	}
1807	dma->srci = idx;
1808	dma->srcu += src->nsegs;
1809	return (idx);
1810}
1811
1812static void
1813hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1814{
1815	struct hifn_operand *op = arg;
1816
1817	KASSERT(nsegs <= MAX_SCATTER,
1818		("hifn_op_cb: too many DMA segments (%u > %u) "
1819		 "returned when mapping operand", nsegs, MAX_SCATTER));
1820	op->mapsize = mapsize;
1821	op->nsegs = nsegs;
1822	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1823}
1824
1825static int
1826hifn_crypto(
1827	struct hifn_softc *sc,
1828	struct hifn_command *cmd,
1829	struct cryptop *crp,
1830	int hint)
1831{
1832	struct	hifn_dma *dma = sc->sc_dma;
1833	u_int32_t cmdlen, csr;
1834	int cmdi, resi, err = 0;
1835
1836	/*
1837	 * need 1 cmd, and 1 res
1838	 *
1839	 * NB: check this first since it's easy.
1840	 */
1841	HIFN_LOCK(sc);
1842	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1843	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1844#ifdef HIFN_DEBUG
1845		if (hifn_debug) {
1846			device_printf(sc->sc_dev,
1847				"cmd/result exhaustion, cmdu %u resu %u\n",
1848				dma->cmdu, dma->resu);
1849		}
1850#endif
1851		hifnstats.hst_nomem_cr++;
1852		HIFN_UNLOCK(sc);
1853		return (ERESTART);
1854	}
1855
1856	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1857		hifnstats.hst_nomem_map++;
1858		HIFN_UNLOCK(sc);
1859		return (ENOMEM);
1860	}
1861
1862	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1863		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1864		    cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1865			hifnstats.hst_nomem_load++;
1866			err = ENOMEM;
1867			goto err_srcmap1;
1868		}
1869	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1870		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1871		    cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1872			hifnstats.hst_nomem_load++;
1873			err = ENOMEM;
1874			goto err_srcmap1;
1875		}
1876	} else {
1877		err = EINVAL;
1878		goto err_srcmap1;
1879	}
1880
1881	if (hifn_dmamap_aligned(&cmd->src)) {
1882		cmd->sloplen = cmd->src_mapsize & 3;
1883		cmd->dst = cmd->src;
1884	} else {
1885		if (crp->crp_flags & CRYPTO_F_IOV) {
1886			err = EINVAL;
1887			goto err_srcmap;
1888		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1889			int totlen, len;
1890			struct mbuf *m, *m0, *mlast;
1891
1892			KASSERT(cmd->dst_m == cmd->src_m,
1893				("hifn_crypto: dst_m initialized improperly"));
1894			hifnstats.hst_unaligned++;
1895			/*
1896			 * Source is not aligned on a longword boundary.
1897			 * Copy the data to insure alignment.  If we fail
1898			 * to allocate mbufs or clusters while doing this
1899			 * we return ERESTART so the operation is requeued
1900			 * at the crypto later, but only if there are
1901			 * ops already posted to the hardware; otherwise we
1902			 * have no guarantee that we'll be re-entered.
1903			 */
1904			totlen = cmd->src_mapsize;
1905			if (cmd->src_m->m_flags & M_PKTHDR) {
1906				len = MHLEN;
1907				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1908				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1909					m_free(m0);
1910					m0 = NULL;
1911				}
1912			} else {
1913				len = MLEN;
1914				MGET(m0, M_DONTWAIT, MT_DATA);
1915			}
1916			if (m0 == NULL) {
1917				hifnstats.hst_nomem_mbuf++;
1918				err = dma->cmdu ? ERESTART : ENOMEM;
1919				goto err_srcmap;
1920			}
1921			if (totlen >= MINCLSIZE) {
1922				MCLGET(m0, M_DONTWAIT);
1923				if ((m0->m_flags & M_EXT) == 0) {
1924					hifnstats.hst_nomem_mcl++;
1925					err = dma->cmdu ? ERESTART : ENOMEM;
1926					m_freem(m0);
1927					goto err_srcmap;
1928				}
1929				len = MCLBYTES;
1930			}
1931			totlen -= len;
1932			m0->m_pkthdr.len = m0->m_len = len;
1933			mlast = m0;
1934
1935			while (totlen > 0) {
1936				MGET(m, M_DONTWAIT, MT_DATA);
1937				if (m == NULL) {
1938					hifnstats.hst_nomem_mbuf++;
1939					err = dma->cmdu ? ERESTART : ENOMEM;
1940					m_freem(m0);
1941					goto err_srcmap;
1942				}
1943				len = MLEN;
1944				if (totlen >= MINCLSIZE) {
1945					MCLGET(m, M_DONTWAIT);
1946					if ((m->m_flags & M_EXT) == 0) {
1947						hifnstats.hst_nomem_mcl++;
1948						err = dma->cmdu ? ERESTART : ENOMEM;
1949						mlast->m_next = m;
1950						m_freem(m0);
1951						goto err_srcmap;
1952					}
1953					len = MCLBYTES;
1954				}
1955
1956				m->m_len = len;
1957				m0->m_pkthdr.len += len;
1958				totlen -= len;
1959
1960				mlast->m_next = m;
1961				mlast = m;
1962			}
1963			cmd->dst_m = m0;
1964		}
1965	}
1966
1967	if (cmd->dst_map == NULL) {
1968		if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1969			hifnstats.hst_nomem_map++;
1970			err = ENOMEM;
1971			goto err_srcmap;
1972		}
1973		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1974			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1975			    cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1976				hifnstats.hst_nomem_map++;
1977				err = ENOMEM;
1978				goto err_dstmap1;
1979			}
1980		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1981			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1982			    cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1983				hifnstats.hst_nomem_load++;
1984				err = ENOMEM;
1985				goto err_dstmap1;
1986			}
1987		}
1988	}
1989
1990#ifdef HIFN_DEBUG
1991	if (hifn_debug) {
1992		device_printf(sc->sc_dev,
1993		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1994		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1995		    READ_REG_1(sc, HIFN_1_DMA_IER),
1996		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1997		    cmd->src_nsegs, cmd->dst_nsegs);
1998	}
1999#endif
2000
2001	if (cmd->src_map == cmd->dst_map) {
2002		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2003		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2004	} else {
2005		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2006		    BUS_DMASYNC_PREWRITE);
2007		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2008		    BUS_DMASYNC_PREREAD);
2009	}
2010
2011	/*
2012	 * need N src, and N dst
2013	 */
2014	if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2015	    (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2016#ifdef HIFN_DEBUG
2017		if (hifn_debug) {
2018			device_printf(sc->sc_dev,
2019				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2020				dma->srcu, cmd->src_nsegs,
2021				dma->dstu, cmd->dst_nsegs);
2022		}
2023#endif
2024		hifnstats.hst_nomem_sd++;
2025		err = ERESTART;
2026		goto err_dstmap;
2027	}
2028
2029	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2030		dma->cmdi = 0;
2031		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2032		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2033		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2034		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2035	}
2036	cmdi = dma->cmdi++;
2037	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2038	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2039
2040	/* .p for command/result already set */
2041	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2042	    HIFN_D_MASKDONEIRQ);
2043	HIFN_CMDR_SYNC(sc, cmdi,
2044	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2045	dma->cmdu++;
2046
2047	/*
2048	 * We don't worry about missing an interrupt (which a "command wait"
2049	 * interrupt salvages us from), unless there is more than one command
2050	 * in the queue.
2051	 */
2052	if (dma->cmdu > 1) {
2053		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2054		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2055	}
2056
2057	hifnstats.hst_ipackets++;
2058	hifnstats.hst_ibytes += cmd->src_mapsize;
2059
2060	hifn_dmamap_load_src(sc, cmd);
2061
2062	/*
2063	 * Unlike other descriptors, we don't mask done interrupt from
2064	 * result descriptor.
2065	 */
2066#ifdef HIFN_DEBUG
2067	if (hifn_debug)
2068		printf("load res\n");
2069#endif
2070	if (dma->resi == HIFN_D_RES_RSIZE) {
2071		dma->resi = 0;
2072		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2073		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2074		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2075		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2076	}
2077	resi = dma->resi++;
2078	KASSERT(dma->hifn_commands[resi] == NULL,
2079		("hifn_crypto: command slot %u busy", resi));
2080	dma->hifn_commands[resi] = cmd;
2081	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2082	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2083		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2084		    HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2085		sc->sc_curbatch++;
2086		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2087			hifnstats.hst_maxbatch = sc->sc_curbatch;
2088		hifnstats.hst_totbatch++;
2089	} else {
2090		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2091		    HIFN_D_VALID | HIFN_D_LAST);
2092		sc->sc_curbatch = 0;
2093	}
2094	HIFN_RESR_SYNC(sc, resi,
2095	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2096	dma->resu++;
2097
2098	if (cmd->sloplen)
2099		cmd->slopidx = resi;
2100
2101	hifn_dmamap_load_dst(sc, cmd);
2102
2103	csr = 0;
2104	if (sc->sc_c_busy == 0) {
2105		csr |= HIFN_DMACSR_C_CTRL_ENA;
2106		sc->sc_c_busy = 1;
2107	}
2108	if (sc->sc_s_busy == 0) {
2109		csr |= HIFN_DMACSR_S_CTRL_ENA;
2110		sc->sc_s_busy = 1;
2111	}
2112	if (sc->sc_r_busy == 0) {
2113		csr |= HIFN_DMACSR_R_CTRL_ENA;
2114		sc->sc_r_busy = 1;
2115	}
2116	if (sc->sc_d_busy == 0) {
2117		csr |= HIFN_DMACSR_D_CTRL_ENA;
2118		sc->sc_d_busy = 1;
2119	}
2120	if (csr)
2121		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2122
2123#ifdef HIFN_DEBUG
2124	if (hifn_debug) {
2125		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2126		    READ_REG_1(sc, HIFN_1_DMA_CSR),
2127		    READ_REG_1(sc, HIFN_1_DMA_IER));
2128	}
2129#endif
2130
2131	sc->sc_active = 5;
2132	HIFN_UNLOCK(sc);
2133	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2134	return (err);		/* success */
2135
2136err_dstmap:
2137	if (cmd->src_map != cmd->dst_map)
2138		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2139err_dstmap1:
2140	if (cmd->src_map != cmd->dst_map)
2141		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2142err_srcmap:
2143	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2144		if (cmd->src_m != cmd->dst_m)
2145			m_freem(cmd->dst_m);
2146	}
2147	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2148err_srcmap1:
2149	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2150	HIFN_UNLOCK(sc);
2151	return (err);
2152}
2153
2154static void
2155hifn_tick(void* vsc)
2156{
2157	struct hifn_softc *sc = vsc;
2158
2159	HIFN_LOCK(sc);
2160	if (sc->sc_active == 0) {
2161		struct hifn_dma *dma = sc->sc_dma;
2162		u_int32_t r = 0;
2163
2164		if (dma->cmdu == 0 && sc->sc_c_busy) {
2165			sc->sc_c_busy = 0;
2166			r |= HIFN_DMACSR_C_CTRL_DIS;
2167		}
2168		if (dma->srcu == 0 && sc->sc_s_busy) {
2169			sc->sc_s_busy = 0;
2170			r |= HIFN_DMACSR_S_CTRL_DIS;
2171		}
2172		if (dma->dstu == 0 && sc->sc_d_busy) {
2173			sc->sc_d_busy = 0;
2174			r |= HIFN_DMACSR_D_CTRL_DIS;
2175		}
2176		if (dma->resu == 0 && sc->sc_r_busy) {
2177			sc->sc_r_busy = 0;
2178			r |= HIFN_DMACSR_R_CTRL_DIS;
2179		}
2180		if (r)
2181			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2182	} else
2183		sc->sc_active--;
2184	HIFN_UNLOCK(sc);
2185	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2186}
2187
2188static void
2189hifn_intr(void *arg)
2190{
2191	struct hifn_softc *sc = arg;
2192	struct hifn_dma *dma;
2193	u_int32_t dmacsr, restart;
2194	int i, u;
2195
2196	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2197
2198	/* Nothing in the DMA unit interrupted */
2199	if ((dmacsr & sc->sc_dmaier) == 0)
2200		return;
2201
2202	HIFN_LOCK(sc);
2203
2204	dma = sc->sc_dma;
2205
2206#ifdef HIFN_DEBUG
2207	if (hifn_debug) {
2208		device_printf(sc->sc_dev,
2209		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2210		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2211		    dma->cmdi, dma->srci, dma->dsti, dma->resi,
2212		    dma->cmdk, dma->srck, dma->dstk, dma->resk,
2213		    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2214	}
2215#endif
2216
2217	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2218
2219	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2220	    (dmacsr & HIFN_DMACSR_PUBDONE))
2221		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2222		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2223
2224	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2225	if (restart)
2226		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2227
2228	if (sc->sc_flags & HIFN_IS_7811) {
2229		if (dmacsr & HIFN_DMACSR_ILLR)
2230			device_printf(sc->sc_dev, "illegal read\n");
2231		if (dmacsr & HIFN_DMACSR_ILLW)
2232			device_printf(sc->sc_dev, "illegal write\n");
2233	}
2234
2235	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2236	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2237	if (restart) {
2238		device_printf(sc->sc_dev, "abort, resetting.\n");
2239		hifnstats.hst_abort++;
2240		hifn_abort(sc);
2241		HIFN_UNLOCK(sc);
2242		return;
2243	}
2244
2245	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2246		/*
2247		 * If no slots to process and we receive a "waiting on
2248		 * command" interrupt, we disable the "waiting on command"
2249		 * (by clearing it).
2250		 */
2251		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2252		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2253	}
2254
2255	/* clear the rings */
2256	i = dma->resk; u = dma->resu;
2257	while (u != 0) {
2258		HIFN_RESR_SYNC(sc, i,
2259		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2260		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2261			HIFN_RESR_SYNC(sc, i,
2262			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2263			break;
2264		}
2265
2266		if (i != HIFN_D_RES_RSIZE) {
2267			struct hifn_command *cmd;
2268			u_int8_t *macbuf = NULL;
2269
2270			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2271			cmd = dma->hifn_commands[i];
2272			KASSERT(cmd != NULL,
2273				("hifn_intr: null command slot %u", i));
2274			dma->hifn_commands[i] = NULL;
2275
2276			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2277				macbuf = dma->result_bufs[i];
2278				macbuf += 12;
2279			}
2280
2281			hifn_callback(sc, cmd, macbuf);
2282			hifnstats.hst_opackets++;
2283			u--;
2284		}
2285
2286		if (++i == (HIFN_D_RES_RSIZE + 1))
2287			i = 0;
2288	}
2289	dma->resk = i; dma->resu = u;
2290
2291	i = dma->srck; u = dma->srcu;
2292	while (u != 0) {
2293		if (i == HIFN_D_SRC_RSIZE)
2294			i = 0;
2295		HIFN_SRCR_SYNC(sc, i,
2296		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2297		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2298			HIFN_SRCR_SYNC(sc, i,
2299			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2300			break;
2301		}
2302		i++, u--;
2303	}
2304	dma->srck = i; dma->srcu = u;
2305
2306	i = dma->cmdk; u = dma->cmdu;
2307	while (u != 0) {
2308		HIFN_CMDR_SYNC(sc, i,
2309		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2310		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2311			HIFN_CMDR_SYNC(sc, i,
2312			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2313			break;
2314		}
2315		if (i != HIFN_D_CMD_RSIZE) {
2316			u--;
2317			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2318		}
2319		if (++i == (HIFN_D_CMD_RSIZE + 1))
2320			i = 0;
2321	}
2322	dma->cmdk = i; dma->cmdu = u;
2323
2324	HIFN_UNLOCK(sc);
2325
2326	if (sc->sc_needwakeup) {		/* XXX check high watermark */
2327		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2328#ifdef HIFN_DEBUG
2329		if (hifn_debug)
2330			device_printf(sc->sc_dev,
2331				"wakeup crypto (%x) u %d/%d/%d/%d\n",
2332				sc->sc_needwakeup,
2333				dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2334#endif
2335		sc->sc_needwakeup &= ~wakeup;
2336		crypto_unblock(sc->sc_cid, wakeup);
2337	}
2338}
2339
2340/*
2341 * Allocate a new 'session' and return an encoded session id.  'sidp'
2342 * contains our registration id, and should contain an encoded session
2343 * id on successful allocation.
2344 */
2345static int
2346hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2347{
2348	struct hifn_softc *sc = device_get_softc(dev);
2349	struct cryptoini *c;
2350	int mac = 0, cry = 0, sesn;
2351	struct hifn_session *ses = NULL;
2352
2353	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2354	if (sidp == NULL || cri == NULL || sc == NULL)
2355		return (EINVAL);
2356
2357	HIFN_LOCK(sc);
2358	if (sc->sc_sessions == NULL) {
2359		ses = sc->sc_sessions = (struct hifn_session *)malloc(
2360		    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2361		if (ses == NULL) {
2362			HIFN_UNLOCK(sc);
2363			return (ENOMEM);
2364		}
2365		sesn = 0;
2366		sc->sc_nsessions = 1;
2367	} else {
2368		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2369			if (!sc->sc_sessions[sesn].hs_used) {
2370				ses = &sc->sc_sessions[sesn];
2371				break;
2372			}
2373		}
2374
2375		if (ses == NULL) {
2376			sesn = sc->sc_nsessions;
2377			ses = (struct hifn_session *)malloc((sesn + 1) *
2378			    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2379			if (ses == NULL) {
2380				HIFN_UNLOCK(sc);
2381				return (ENOMEM);
2382			}
2383			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2384			bzero(sc->sc_sessions, sesn * sizeof(*ses));
2385			free(sc->sc_sessions, M_DEVBUF);
2386			sc->sc_sessions = ses;
2387			ses = &sc->sc_sessions[sesn];
2388			sc->sc_nsessions++;
2389		}
2390	}
2391	HIFN_UNLOCK(sc);
2392
2393	bzero(ses, sizeof(*ses));
2394	ses->hs_used = 1;
2395
2396	for (c = cri; c != NULL; c = c->cri_next) {
2397		switch (c->cri_alg) {
2398		case CRYPTO_MD5:
2399		case CRYPTO_SHA1:
2400		case CRYPTO_MD5_HMAC:
2401		case CRYPTO_SHA1_HMAC:
2402			if (mac)
2403				return (EINVAL);
2404			mac = 1;
2405			ses->hs_mlen = c->cri_mlen;
2406			if (ses->hs_mlen == 0) {
2407				switch (c->cri_alg) {
2408				case CRYPTO_MD5:
2409				case CRYPTO_MD5_HMAC:
2410					ses->hs_mlen = 16;
2411					break;
2412				case CRYPTO_SHA1:
2413				case CRYPTO_SHA1_HMAC:
2414					ses->hs_mlen = 20;
2415					break;
2416				}
2417			}
2418			break;
2419		case CRYPTO_DES_CBC:
2420		case CRYPTO_3DES_CBC:
2421		case CRYPTO_AES_CBC:
2422			/* XXX this may read fewer, does it matter? */
2423			read_random(ses->hs_iv,
2424				c->cri_alg == CRYPTO_AES_CBC ?
2425					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2426			/*FALLTHROUGH*/
2427		case CRYPTO_ARC4:
2428			if (cry)
2429				return (EINVAL);
2430			cry = 1;
2431			break;
2432		default:
2433			return (EINVAL);
2434		}
2435	}
2436	if (mac == 0 && cry == 0)
2437		return (EINVAL);
2438
2439	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2440
2441	return (0);
2442}
2443
2444/*
2445 * Deallocate a session.
2446 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2447 * XXX to blow away any keys already stored there.
2448 */
2449static int
2450hifn_freesession(device_t dev, u_int64_t tid)
2451{
2452	struct hifn_softc *sc = device_get_softc(dev);
2453	int session, error;
2454	u_int32_t sid = CRYPTO_SESID2LID(tid);
2455
2456	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2457	if (sc == NULL)
2458		return (EINVAL);
2459
2460	HIFN_LOCK(sc);
2461	session = HIFN_SESSION(sid);
2462	if (session < sc->sc_nsessions) {
2463		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2464		error = 0;
2465	} else
2466		error = EINVAL;
2467	HIFN_UNLOCK(sc);
2468
2469	return (error);
2470}
2471
2472static int
2473hifn_process(device_t dev, struct cryptop *crp, int hint)
2474{
2475	struct hifn_softc *sc = device_get_softc(dev);
2476	struct hifn_command *cmd = NULL;
2477	int session, err, ivlen;
2478	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2479
2480	if (crp == NULL || crp->crp_callback == NULL) {
2481		hifnstats.hst_invalid++;
2482		return (EINVAL);
2483	}
2484	session = HIFN_SESSION(crp->crp_sid);
2485
2486	if (sc == NULL || session >= sc->sc_nsessions) {
2487		err = EINVAL;
2488		goto errout;
2489	}
2490
2491	cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2492	if (cmd == NULL) {
2493		hifnstats.hst_nomem++;
2494		err = ENOMEM;
2495		goto errout;
2496	}
2497
2498	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2499		cmd->src_m = (struct mbuf *)crp->crp_buf;
2500		cmd->dst_m = (struct mbuf *)crp->crp_buf;
2501	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2502		cmd->src_io = (struct uio *)crp->crp_buf;
2503		cmd->dst_io = (struct uio *)crp->crp_buf;
2504	} else {
2505		err = EINVAL;
2506		goto errout;	/* XXX we don't handle contiguous buffers! */
2507	}
2508
2509	crd1 = crp->crp_desc;
2510	if (crd1 == NULL) {
2511		err = EINVAL;
2512		goto errout;
2513	}
2514	crd2 = crd1->crd_next;
2515
2516	if (crd2 == NULL) {
2517		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2518		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2519		    crd1->crd_alg == CRYPTO_SHA1 ||
2520		    crd1->crd_alg == CRYPTO_MD5) {
2521			maccrd = crd1;
2522			enccrd = NULL;
2523		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2524		    crd1->crd_alg == CRYPTO_3DES_CBC ||
2525		    crd1->crd_alg == CRYPTO_AES_CBC ||
2526		    crd1->crd_alg == CRYPTO_ARC4) {
2527			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2528				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2529			maccrd = NULL;
2530			enccrd = crd1;
2531		} else {
2532			err = EINVAL;
2533			goto errout;
2534		}
2535	} else {
2536		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2537                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2538                     crd1->crd_alg == CRYPTO_MD5 ||
2539                     crd1->crd_alg == CRYPTO_SHA1) &&
2540		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2541		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2542		     crd2->crd_alg == CRYPTO_AES_CBC ||
2543		     crd2->crd_alg == CRYPTO_ARC4) &&
2544		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2545			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2546			maccrd = crd1;
2547			enccrd = crd2;
2548		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2549		     crd1->crd_alg == CRYPTO_ARC4 ||
2550		     crd1->crd_alg == CRYPTO_3DES_CBC ||
2551		     crd1->crd_alg == CRYPTO_AES_CBC) &&
2552		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2553                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2554                     crd2->crd_alg == CRYPTO_MD5 ||
2555                     crd2->crd_alg == CRYPTO_SHA1) &&
2556		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
2557			enccrd = crd1;
2558			maccrd = crd2;
2559		} else {
2560			/*
2561			 * We cannot order the 7751 as requested
2562			 */
2563			err = EINVAL;
2564			goto errout;
2565		}
2566	}
2567
2568	if (enccrd) {
2569		cmd->enccrd = enccrd;
2570		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2571		switch (enccrd->crd_alg) {
2572		case CRYPTO_ARC4:
2573			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2574			break;
2575		case CRYPTO_DES_CBC:
2576			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2577			    HIFN_CRYPT_CMD_MODE_CBC |
2578			    HIFN_CRYPT_CMD_NEW_IV;
2579			break;
2580		case CRYPTO_3DES_CBC:
2581			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2582			    HIFN_CRYPT_CMD_MODE_CBC |
2583			    HIFN_CRYPT_CMD_NEW_IV;
2584			break;
2585		case CRYPTO_AES_CBC:
2586			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2587			    HIFN_CRYPT_CMD_MODE_CBC |
2588			    HIFN_CRYPT_CMD_NEW_IV;
2589			break;
2590		default:
2591			err = EINVAL;
2592			goto errout;
2593		}
2594		if (enccrd->crd_alg != CRYPTO_ARC4) {
2595			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2596				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2597			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2598				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2599					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2600				else
2601					bcopy(sc->sc_sessions[session].hs_iv,
2602					    cmd->iv, ivlen);
2603
2604				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2605				    == 0) {
2606					crypto_copyback(crp->crp_flags,
2607					    crp->crp_buf, enccrd->crd_inject,
2608					    ivlen, cmd->iv);
2609				}
2610			} else {
2611				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2612					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2613				else {
2614					crypto_copydata(crp->crp_flags,
2615					    crp->crp_buf, enccrd->crd_inject,
2616					    ivlen, cmd->iv);
2617				}
2618			}
2619		}
2620
2621		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2622			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2623		cmd->ck = enccrd->crd_key;
2624		cmd->cklen = enccrd->crd_klen >> 3;
2625		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2626
2627		/*
2628		 * Need to specify the size for the AES key in the masks.
2629		 */
2630		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2631		    HIFN_CRYPT_CMD_ALG_AES) {
2632			switch (cmd->cklen) {
2633			case 16:
2634				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2635				break;
2636			case 24:
2637				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2638				break;
2639			case 32:
2640				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2641				break;
2642			default:
2643				err = EINVAL;
2644				goto errout;
2645			}
2646		}
2647	}
2648
2649	if (maccrd) {
2650		cmd->maccrd = maccrd;
2651		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2652
2653		switch (maccrd->crd_alg) {
2654		case CRYPTO_MD5:
2655			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2656			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2657			    HIFN_MAC_CMD_POS_IPSEC;
2658                       break;
2659		case CRYPTO_MD5_HMAC:
2660			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2661			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2662			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2663			break;
2664		case CRYPTO_SHA1:
2665			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2666			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2667			    HIFN_MAC_CMD_POS_IPSEC;
2668			break;
2669		case CRYPTO_SHA1_HMAC:
2670			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2671			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2672			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2673			break;
2674		}
2675
2676		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2677		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2678			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2679			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2680			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2681			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2682		}
2683	}
2684
2685	cmd->crp = crp;
2686	cmd->session_num = session;
2687	cmd->softc = sc;
2688
2689	err = hifn_crypto(sc, cmd, crp, hint);
2690	if (!err) {
2691		return 0;
2692	} else if (err == ERESTART) {
2693		/*
2694		 * There weren't enough resources to dispatch the request
2695		 * to the part.  Notify the caller so they'll requeue this
2696		 * request and resubmit it again soon.
2697		 */
2698#ifdef HIFN_DEBUG
2699		if (hifn_debug)
2700			device_printf(sc->sc_dev, "requeue request\n");
2701#endif
2702		free(cmd, M_DEVBUF);
2703		sc->sc_needwakeup |= CRYPTO_SYMQ;
2704		return (err);
2705	}
2706
2707errout:
2708	if (cmd != NULL)
2709		free(cmd, M_DEVBUF);
2710	if (err == EINVAL)
2711		hifnstats.hst_invalid++;
2712	else
2713		hifnstats.hst_nomem++;
2714	crp->crp_etype = err;
2715	crypto_done(crp);
2716	return (err);
2717}
2718
2719static void
2720hifn_abort(struct hifn_softc *sc)
2721{
2722	struct hifn_dma *dma = sc->sc_dma;
2723	struct hifn_command *cmd;
2724	struct cryptop *crp;
2725	int i, u;
2726
2727	i = dma->resk; u = dma->resu;
2728	while (u != 0) {
2729		cmd = dma->hifn_commands[i];
2730		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2731		dma->hifn_commands[i] = NULL;
2732		crp = cmd->crp;
2733
2734		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2735			/* Salvage what we can. */
2736			u_int8_t *macbuf;
2737
2738			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2739				macbuf = dma->result_bufs[i];
2740				macbuf += 12;
2741			} else
2742				macbuf = NULL;
2743			hifnstats.hst_opackets++;
2744			hifn_callback(sc, cmd, macbuf);
2745		} else {
2746			if (cmd->src_map == cmd->dst_map) {
2747				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2748				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2749			} else {
2750				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2751				    BUS_DMASYNC_POSTWRITE);
2752				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2753				    BUS_DMASYNC_POSTREAD);
2754			}
2755
2756			if (cmd->src_m != cmd->dst_m) {
2757				m_freem(cmd->src_m);
2758				crp->crp_buf = (caddr_t)cmd->dst_m;
2759			}
2760
2761			/* non-shared buffers cannot be restarted */
2762			if (cmd->src_map != cmd->dst_map) {
2763				/*
2764				 * XXX should be EAGAIN, delayed until
2765				 * after the reset.
2766				 */
2767				crp->crp_etype = ENOMEM;
2768				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2769				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2770			} else
2771				crp->crp_etype = ENOMEM;
2772
2773			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2774			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2775
2776			free(cmd, M_DEVBUF);
2777			if (crp->crp_etype != EAGAIN)
2778				crypto_done(crp);
2779		}
2780
2781		if (++i == HIFN_D_RES_RSIZE)
2782			i = 0;
2783		u--;
2784	}
2785	dma->resk = i; dma->resu = u;
2786
2787	hifn_reset_board(sc, 1);
2788	hifn_init_dma(sc);
2789	hifn_init_pci_registers(sc);
2790}
2791
2792static void
2793hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2794{
2795	struct hifn_dma *dma = sc->sc_dma;
2796	struct cryptop *crp = cmd->crp;
2797	struct cryptodesc *crd;
2798	struct mbuf *m;
2799	int totlen, i, u, ivlen;
2800
2801	if (cmd->src_map == cmd->dst_map) {
2802		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2803		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2804	} else {
2805		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2806		    BUS_DMASYNC_POSTWRITE);
2807		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2808		    BUS_DMASYNC_POSTREAD);
2809	}
2810
2811	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2812		if (cmd->src_m != cmd->dst_m) {
2813			crp->crp_buf = (caddr_t)cmd->dst_m;
2814			totlen = cmd->src_mapsize;
2815			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2816				if (totlen < m->m_len) {
2817					m->m_len = totlen;
2818					totlen = 0;
2819				} else
2820					totlen -= m->m_len;
2821			}
2822			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2823			m_freem(cmd->src_m);
2824		}
2825	}
2826
2827	if (cmd->sloplen != 0) {
2828		crypto_copyback(crp->crp_flags, crp->crp_buf,
2829		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2830		    (caddr_t)&dma->slop[cmd->slopidx]);
2831	}
2832
2833	i = dma->dstk; u = dma->dstu;
2834	while (u != 0) {
2835		if (i == HIFN_D_DST_RSIZE)
2836			i = 0;
2837		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2838		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2839		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2840			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2841			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2842			break;
2843		}
2844		i++, u--;
2845	}
2846	dma->dstk = i; dma->dstu = u;
2847
2848	hifnstats.hst_obytes += cmd->dst_mapsize;
2849
2850	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2851	    HIFN_BASE_CMD_CRYPT) {
2852		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2853			if (crd->crd_alg != CRYPTO_DES_CBC &&
2854			    crd->crd_alg != CRYPTO_3DES_CBC &&
2855			    crd->crd_alg != CRYPTO_AES_CBC)
2856				continue;
2857			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2858				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2859			crypto_copydata(crp->crp_flags, crp->crp_buf,
2860			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
2861			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2862			break;
2863		}
2864	}
2865
2866	if (macbuf != NULL) {
2867		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2868                        int len;
2869
2870			if (crd->crd_alg != CRYPTO_MD5 &&
2871			    crd->crd_alg != CRYPTO_SHA1 &&
2872			    crd->crd_alg != CRYPTO_MD5_HMAC &&
2873			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
2874				continue;
2875			}
2876			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2877			crypto_copyback(crp->crp_flags, crp->crp_buf,
2878			    crd->crd_inject, len, macbuf);
2879			break;
2880		}
2881	}
2882
2883	if (cmd->src_map != cmd->dst_map) {
2884		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2885		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2886	}
2887	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2888	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2889	free(cmd, M_DEVBUF);
2890	crypto_done(crp);
2891}
2892
2893/*
2894 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2895 * and Group 1 registers; avoid conditions that could create
2896 * burst writes by doing a read in between the writes.
2897 *
2898 * NB: The read we interpose is always to the same register;
2899 *     we do this because reading from an arbitrary (e.g. last)
2900 *     register may not always work.
2901 */
2902static void
2903hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2904{
2905	if (sc->sc_flags & HIFN_IS_7811) {
2906		if (sc->sc_bar0_lastreg == reg - 4)
2907			bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2908		sc->sc_bar0_lastreg = reg;
2909	}
2910	bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2911}
2912
2913static void
2914hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2915{
2916	if (sc->sc_flags & HIFN_IS_7811) {
2917		if (sc->sc_bar1_lastreg == reg - 4)
2918			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2919		sc->sc_bar1_lastreg = reg;
2920	}
2921	bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2922}
2923
2924#ifdef HIFN_VULCANDEV
2925/*
2926 * this code provides support for mapping the PK engine's register
2927 * into a userspace program.
2928 *
2929 */
2930static int
2931vulcanpk_mmap(struct cdev *dev, vm_offset_t offset,
2932	      vm_paddr_t *paddr, int nprot)
2933{
2934	struct hifn_softc *sc;
2935	vm_paddr_t pd;
2936	void *b;
2937
2938	sc = dev->si_drv1;
2939
2940	pd = rman_get_start(sc->sc_bar1res);
2941	b = rman_get_virtual(sc->sc_bar1res);
2942
2943#if 0
2944	printf("vpk mmap: %p(%08x) offset=%d\n", b, pd, offset);
2945	hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2946#endif
2947
2948	if (offset == 0) {
2949		*paddr = pd;
2950		return (0);
2951	}
2952	return (-1);
2953}
2954
2955static struct cdevsw vulcanpk_cdevsw = {
2956	.d_version =	D_VERSION,
2957	.d_mmap =	vulcanpk_mmap,
2958	.d_name =	"vulcanpk",
2959};
2960#endif /* HIFN_VULCANDEV */
2961