hifn7751.c revision 227309
1/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
2
3/*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 *			http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested:  Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 *   notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *   notice, this list of conditions and the following disclaimer in the
23 *   documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 *   derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/dev/hifn/hifn7751.c 227309 2011-11-07 15:43:11Z ed $");
45
46/*
47 * Driver for various Hifn encryption processors.
48 */
49#include "opt_hifn.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/proc.h>
54#include <sys/errno.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/module.h>
58#include <sys/mbuf.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/sysctl.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <opencrypto/cryptodev.h>
72#include <sys/random.h>
73#include <sys/kobj.h>
74
75#include "cryptodev_if.h"
76
77#include <dev/pci/pcivar.h>
78#include <dev/pci/pcireg.h>
79
80#ifdef HIFN_RNDTEST
81#include <dev/rndtest/rndtest.h>
82#endif
83#include <dev/hifn/hifn7751reg.h>
84#include <dev/hifn/hifn7751var.h>
85
86#ifdef HIFN_VULCANDEV
87#include <sys/conf.h>
88#include <sys/uio.h>
89
90static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
91#endif
92
93/*
94 * Prototypes and count for the pci_device structure
95 */
96static	int hifn_probe(device_t);
97static	int hifn_attach(device_t);
98static	int hifn_detach(device_t);
99static	int hifn_suspend(device_t);
100static	int hifn_resume(device_t);
101static	int hifn_shutdown(device_t);
102
103static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
104static	int hifn_freesession(device_t, u_int64_t);
105static	int hifn_process(device_t, struct cryptop *, int);
106
107static device_method_t hifn_methods[] = {
108	/* Device interface */
109	DEVMETHOD(device_probe,		hifn_probe),
110	DEVMETHOD(device_attach,	hifn_attach),
111	DEVMETHOD(device_detach,	hifn_detach),
112	DEVMETHOD(device_suspend,	hifn_suspend),
113	DEVMETHOD(device_resume,	hifn_resume),
114	DEVMETHOD(device_shutdown,	hifn_shutdown),
115
116	/* bus interface */
117	DEVMETHOD(bus_print_child,	bus_generic_print_child),
118	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
119
120	/* crypto device methods */
121	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
122	DEVMETHOD(cryptodev_freesession,hifn_freesession),
123	DEVMETHOD(cryptodev_process,	hifn_process),
124
125	{ 0, 0 }
126};
127static driver_t hifn_driver = {
128	"hifn",
129	hifn_methods,
130	sizeof (struct hifn_softc)
131};
132static devclass_t hifn_devclass;
133
134DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
135MODULE_DEPEND(hifn, crypto, 1, 1, 1);
136#ifdef HIFN_RNDTEST
137MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
138#endif
139
140static	void hifn_reset_board(struct hifn_softc *, int);
141static	void hifn_reset_puc(struct hifn_softc *);
142static	void hifn_puc_wait(struct hifn_softc *);
143static	int hifn_enable_crypto(struct hifn_softc *);
144static	void hifn_set_retry(struct hifn_softc *sc);
145static	void hifn_init_dma(struct hifn_softc *);
146static	void hifn_init_pci_registers(struct hifn_softc *);
147static	int hifn_sramsize(struct hifn_softc *);
148static	int hifn_dramsize(struct hifn_softc *);
149static	int hifn_ramtype(struct hifn_softc *);
150static	void hifn_sessions(struct hifn_softc *);
151static	void hifn_intr(void *);
152static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
153static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
154static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
155static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
156static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
157static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
158static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
159static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
160static	int hifn_init_pubrng(struct hifn_softc *);
161static	void hifn_rng(void *);
162static	void hifn_tick(void *);
163static	void hifn_abort(struct hifn_softc *);
164static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
165
166static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
167static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
168
169static __inline u_int32_t
170READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
171{
172    u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
173    sc->sc_bar0_lastreg = (bus_size_t) -1;
174    return (v);
175}
176#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
177
178static __inline u_int32_t
179READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
180{
181    u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
182    sc->sc_bar1_lastreg = (bus_size_t) -1;
183    return (v);
184}
185#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
186
187static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0,
188	    "Hifn driver parameters");
189
190#ifdef HIFN_DEBUG
191static	int hifn_debug = 0;
192SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
193	    0, "control debugging msgs");
194#endif
195
196static	struct hifn_stats hifnstats;
197SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
198	    hifn_stats, "driver statistics");
199static	int hifn_maxbatch = 1;
200SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
201	    0, "max ops to batch w/o interrupt");
202
203/*
204 * Probe for a supported device.  The PCI vendor and device
205 * IDs are used to detect devices we know how to handle.
206 */
207static int
208hifn_probe(device_t dev)
209{
210	if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
211	    pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
212		return (BUS_PROBE_DEFAULT);
213	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
214	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
215	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
216	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
217	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
218	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
219		return (BUS_PROBE_DEFAULT);
220	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
221	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
222		return (BUS_PROBE_DEFAULT);
223	return (ENXIO);
224}
225
226static void
227hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
228{
229	bus_addr_t *paddr = (bus_addr_t*) arg;
230	*paddr = segs->ds_addr;
231}
232
233static const char*
234hifn_partname(struct hifn_softc *sc)
235{
236	/* XXX sprintf numbers when not decoded */
237	switch (pci_get_vendor(sc->sc_dev)) {
238	case PCI_VENDOR_HIFN:
239		switch (pci_get_device(sc->sc_dev)) {
240		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
241		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
242		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
243		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
244		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
245		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
246		}
247		return "Hifn unknown-part";
248	case PCI_VENDOR_INVERTEX:
249		switch (pci_get_device(sc->sc_dev)) {
250		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
251		}
252		return "Invertex unknown-part";
253	case PCI_VENDOR_NETSEC:
254		switch (pci_get_device(sc->sc_dev)) {
255		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
256		}
257		return "NetSec unknown-part";
258	}
259	return "Unknown-vendor unknown-part";
260}
261
262static void
263default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
264{
265	random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
266}
267
268static u_int
269checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
270{
271	if (v > max) {
272		device_printf(dev, "Warning, %s %u out of range, "
273			"using max %u\n", what, v, max);
274		v = max;
275	} else if (v < min) {
276		device_printf(dev, "Warning, %s %u out of range, "
277			"using min %u\n", what, v, min);
278		v = min;
279	}
280	return v;
281}
282
283/*
284 * Select PLL configuration for 795x parts.  This is complicated in
285 * that we cannot determine the optimal parameters without user input.
286 * The reference clock is derived from an external clock through a
287 * multiplier.  The external clock is either the host bus (i.e. PCI)
288 * or an external clock generator.  When using the PCI bus we assume
289 * the clock is either 33 or 66 MHz; for an external source we cannot
290 * tell the speed.
291 *
292 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
293 * for an external source, followed by the frequency.  We calculate
294 * the appropriate multiplier and PLL register contents accordingly.
295 * When no configuration is given we default to "pci66" since that
296 * always will allow the card to work.  If a card is using the PCI
297 * bus clock and in a 33MHz slot then it will be operating at half
298 * speed until the correct information is provided.
299 *
300 * We use a default setting of "ext66" because according to Mike Ham
301 * of HiFn, almost every board in existence has an external crystal
302 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
303 * because PCI33 can have clocks from 0 to 33Mhz, and some have
304 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
305 */
306static void
307hifn_getpllconfig(device_t dev, u_int *pll)
308{
309	const char *pllspec;
310	u_int freq, mul, fl, fh;
311	u_int32_t pllconfig;
312	char *nxt;
313
314	if (resource_string_value("hifn", device_get_unit(dev),
315	    "pllconfig", &pllspec))
316		pllspec = "ext66";
317	fl = 33, fh = 66;
318	pllconfig = 0;
319	if (strncmp(pllspec, "ext", 3) == 0) {
320		pllspec += 3;
321		pllconfig |= HIFN_PLL_REF_SEL;
322		switch (pci_get_device(dev)) {
323		case PCI_PRODUCT_HIFN_7955:
324		case PCI_PRODUCT_HIFN_7956:
325			fl = 20, fh = 100;
326			break;
327#ifdef notyet
328		case PCI_PRODUCT_HIFN_7954:
329			fl = 20, fh = 66;
330			break;
331#endif
332		}
333	} else if (strncmp(pllspec, "pci", 3) == 0)
334		pllspec += 3;
335	freq = strtoul(pllspec, &nxt, 10);
336	if (nxt == pllspec)
337		freq = 66;
338	else
339		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
340	/*
341	 * Calculate multiplier.  We target a Fck of 266 MHz,
342	 * allowing only even values, possibly rounded down.
343	 * Multipliers > 8 must set the charge pump current.
344	 */
345	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
346	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
347	if (mul > 8)
348		pllconfig |= HIFN_PLL_IS;
349	*pll = pllconfig;
350}
351
352/*
353 * Attach an interface that successfully probed.
354 */
355static int
356hifn_attach(device_t dev)
357{
358	struct hifn_softc *sc = device_get_softc(dev);
359	caddr_t kva;
360	int rseg, rid;
361	char rbase;
362	u_int16_t ena, rev;
363
364	sc->sc_dev = dev;
365
366	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
367
368	/* XXX handle power management */
369
370	/*
371	 * The 7951 and 795x have a random number generator and
372	 * public key support; note this.
373	 */
374	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
375	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
376	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
377	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
378		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
379	/*
380	 * The 7811 has a random number generator and
381	 * we also note it's identity 'cuz of some quirks.
382	 */
383	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
384	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
385		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
386
387	/*
388	 * The 795x parts support AES.
389	 */
390	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
391	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
392	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
393		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
394		/*
395		 * Select PLL configuration.  This depends on the
396		 * bus and board design and must be manually configured
397		 * if the default setting is unacceptable.
398		 */
399		hifn_getpllconfig(dev, &sc->sc_pllconfig);
400	}
401
402	/*
403	 * Setup PCI resources. Note that we record the bus
404	 * tag and handle for each register mapping, this is
405	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
406	 * and WRITE_REG_1 macros throughout the driver.
407	 */
408	pci_enable_busmaster(dev);
409
410	rid = HIFN_BAR0;
411	sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
412			 			RF_ACTIVE);
413	if (sc->sc_bar0res == NULL) {
414		device_printf(dev, "cannot map bar%d register space\n", 0);
415		goto fail_pci;
416	}
417	sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
418	sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
419	sc->sc_bar0_lastreg = (bus_size_t) -1;
420
421	rid = HIFN_BAR1;
422	sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
423						RF_ACTIVE);
424	if (sc->sc_bar1res == NULL) {
425		device_printf(dev, "cannot map bar%d register space\n", 1);
426		goto fail_io0;
427	}
428	sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
429	sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
430	sc->sc_bar1_lastreg = (bus_size_t) -1;
431
432	hifn_set_retry(sc);
433
434	/*
435	 * Setup the area where the Hifn DMA's descriptors
436	 * and associated data structures.
437	 */
438	if (bus_dma_tag_create(NULL,			/* parent */
439			       1, 0,			/* alignment,boundary */
440			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
441			       BUS_SPACE_MAXADDR,	/* highaddr */
442			       NULL, NULL,		/* filter, filterarg */
443			       HIFN_MAX_DMALEN,		/* maxsize */
444			       MAX_SCATTER,		/* nsegments */
445			       HIFN_MAX_SEGLEN,		/* maxsegsize */
446			       BUS_DMA_ALLOCNOW,	/* flags */
447			       NULL,			/* lockfunc */
448			       NULL,			/* lockarg */
449			       &sc->sc_dmat)) {
450		device_printf(dev, "cannot allocate DMA tag\n");
451		goto fail_io1;
452	}
453	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
454		device_printf(dev, "cannot create dma map\n");
455		bus_dma_tag_destroy(sc->sc_dmat);
456		goto fail_io1;
457	}
458	if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
459		device_printf(dev, "cannot alloc dma buffer\n");
460		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
461		bus_dma_tag_destroy(sc->sc_dmat);
462		goto fail_io1;
463	}
464	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
465			     sizeof (*sc->sc_dma),
466			     hifn_dmamap_cb, &sc->sc_dma_physaddr,
467			     BUS_DMA_NOWAIT)) {
468		device_printf(dev, "cannot load dma map\n");
469		bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
470		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
471		bus_dma_tag_destroy(sc->sc_dmat);
472		goto fail_io1;
473	}
474	sc->sc_dma = (struct hifn_dma *)kva;
475	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
476
477	KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
478	KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
479	KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
480	KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
481
482	/*
483	 * Reset the board and do the ``secret handshake''
484	 * to enable the crypto support.  Then complete the
485	 * initialization procedure by setting up the interrupt
486	 * and hooking in to the system crypto support so we'll
487	 * get used for system services like the crypto device,
488	 * IPsec, RNG device, etc.
489	 */
490	hifn_reset_board(sc, 0);
491
492	if (hifn_enable_crypto(sc) != 0) {
493		device_printf(dev, "crypto enabling failed\n");
494		goto fail_mem;
495	}
496	hifn_reset_puc(sc);
497
498	hifn_init_dma(sc);
499	hifn_init_pci_registers(sc);
500
501	/* XXX can't dynamically determine ram type for 795x; force dram */
502	if (sc->sc_flags & HIFN_IS_7956)
503		sc->sc_drammodel = 1;
504	else if (hifn_ramtype(sc))
505		goto fail_mem;
506
507	if (sc->sc_drammodel == 0)
508		hifn_sramsize(sc);
509	else
510		hifn_dramsize(sc);
511
512	/*
513	 * Workaround for NetSec 7751 rev A: half ram size because two
514	 * of the address lines were left floating
515	 */
516	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
517	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
518	    pci_get_revid(dev) == 0x61)	/*XXX???*/
519		sc->sc_ramsize >>= 1;
520
521	/*
522	 * Arrange the interrupt line.
523	 */
524	rid = 0;
525	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
526					    RF_SHAREABLE|RF_ACTIVE);
527	if (sc->sc_irq == NULL) {
528		device_printf(dev, "could not map interrupt\n");
529		goto fail_mem;
530	}
531	/*
532	 * NB: Network code assumes we are blocked with splimp()
533	 *     so make sure the IRQ is marked appropriately.
534	 */
535	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
536			   NULL, hifn_intr, sc, &sc->sc_intrhand)) {
537		device_printf(dev, "could not setup interrupt\n");
538		goto fail_intr2;
539	}
540
541	hifn_sessions(sc);
542
543	/*
544	 * NB: Keep only the low 16 bits; this masks the chip id
545	 *     from the 7951.
546	 */
547	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
548
549	rseg = sc->sc_ramsize / 1024;
550	rbase = 'K';
551	if (sc->sc_ramsize >= (1024 * 1024)) {
552		rbase = 'M';
553		rseg /= 1024;
554	}
555	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
556		hifn_partname(sc), rev,
557		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
558	if (sc->sc_flags & HIFN_IS_7956)
559		printf(", pll=0x%x<%s clk, %ux mult>",
560			sc->sc_pllconfig,
561			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
562			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
563	printf("\n");
564
565	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
566	if (sc->sc_cid < 0) {
567		device_printf(dev, "could not get crypto driver id\n");
568		goto fail_intr;
569	}
570
571	WRITE_REG_0(sc, HIFN_0_PUCNFG,
572	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
573	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
574
575	switch (ena) {
576	case HIFN_PUSTAT_ENA_2:
577		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
578		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
579		if (sc->sc_flags & HIFN_HAS_AES)
580			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
581		/*FALLTHROUGH*/
582	case HIFN_PUSTAT_ENA_1:
583		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
584		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
585		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
586		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
587		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
588		break;
589	}
590
591	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
592	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
593
594	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
595		hifn_init_pubrng(sc);
596
597	callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
598	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
599
600	return (0);
601
602fail_intr:
603	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
604fail_intr2:
605	/* XXX don't store rid */
606	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
607fail_mem:
608	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
609	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
610	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
611	bus_dma_tag_destroy(sc->sc_dmat);
612
613	/* Turn off DMA polling */
614	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
615	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
616fail_io1:
617	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
618fail_io0:
619	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
620fail_pci:
621	mtx_destroy(&sc->sc_mtx);
622	return (ENXIO);
623}
624
625/*
626 * Detach an interface that successfully probed.
627 */
628static int
629hifn_detach(device_t dev)
630{
631	struct hifn_softc *sc = device_get_softc(dev);
632
633	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
634
635	/* disable interrupts */
636	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
637
638	/*XXX other resources */
639	callout_stop(&sc->sc_tickto);
640	callout_stop(&sc->sc_rngto);
641#ifdef HIFN_RNDTEST
642	if (sc->sc_rndtest)
643		rndtest_detach(sc->sc_rndtest);
644#endif
645
646	/* Turn off DMA polling */
647	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
648	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
649
650	crypto_unregister_all(sc->sc_cid);
651
652	bus_generic_detach(dev);	/*XXX should be no children, right? */
653
654	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
655	/* XXX don't store rid */
656	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
657
658	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
659	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
660	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
661	bus_dma_tag_destroy(sc->sc_dmat);
662
663	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
664	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
665
666	mtx_destroy(&sc->sc_mtx);
667
668	return (0);
669}
670
671/*
672 * Stop all chip I/O so that the kernel's probe routines don't
673 * get confused by errant DMAs when rebooting.
674 */
675static int
676hifn_shutdown(device_t dev)
677{
678#ifdef notyet
679	hifn_stop(device_get_softc(dev));
680#endif
681	return (0);
682}
683
684/*
685 * Device suspend routine.  Stop the interface and save some PCI
686 * settings in case the BIOS doesn't restore them properly on
687 * resume.
688 */
689static int
690hifn_suspend(device_t dev)
691{
692	struct hifn_softc *sc = device_get_softc(dev);
693#ifdef notyet
694	hifn_stop(sc);
695#endif
696	sc->sc_suspended = 1;
697
698	return (0);
699}
700
701/*
702 * Device resume routine.  Restore some PCI settings in case the BIOS
703 * doesn't, re-enable busmastering, and restart the interface if
704 * appropriate.
705 */
706static int
707hifn_resume(device_t dev)
708{
709	struct hifn_softc *sc = device_get_softc(dev);
710#ifdef notyet
711        /* reinitialize interface if necessary */
712        if (ifp->if_flags & IFF_UP)
713                rl_init(sc);
714#endif
715	sc->sc_suspended = 0;
716
717	return (0);
718}
719
720static int
721hifn_init_pubrng(struct hifn_softc *sc)
722{
723	u_int32_t r;
724	int i;
725
726#ifdef HIFN_RNDTEST
727	sc->sc_rndtest = rndtest_attach(sc->sc_dev);
728	if (sc->sc_rndtest)
729		sc->sc_harvest = rndtest_harvest;
730	else
731		sc->sc_harvest = default_harvest;
732#else
733	sc->sc_harvest = default_harvest;
734#endif
735	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
736		/* Reset 7951 public key/rng engine */
737		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
738		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
739
740		for (i = 0; i < 100; i++) {
741			DELAY(1000);
742			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
743			    HIFN_PUBRST_RESET) == 0)
744				break;
745		}
746
747		if (i == 100) {
748			device_printf(sc->sc_dev, "public key init failed\n");
749			return (1);
750		}
751	}
752
753	/* Enable the rng, if available */
754	if (sc->sc_flags & HIFN_HAS_RNG) {
755		if (sc->sc_flags & HIFN_IS_7811) {
756			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
757			if (r & HIFN_7811_RNGENA_ENA) {
758				r &= ~HIFN_7811_RNGENA_ENA;
759				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
760			}
761			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
762			    HIFN_7811_RNGCFG_DEFL);
763			r |= HIFN_7811_RNGENA_ENA;
764			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
765		} else
766			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
767			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
768			    HIFN_RNGCFG_ENA);
769
770		sc->sc_rngfirst = 1;
771		if (hz >= 100)
772			sc->sc_rnghz = hz / 100;
773		else
774			sc->sc_rnghz = 1;
775		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
776		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
777	}
778
779	/* Enable public key engine, if available */
780	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
781		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
782		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
783		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
784#ifdef HIFN_VULCANDEV
785		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
786					UID_ROOT, GID_WHEEL, 0666,
787					"vulcanpk");
788		sc->sc_pkdev->si_drv1 = sc;
789#endif
790	}
791
792	return (0);
793}
794
795static void
796hifn_rng(void *vsc)
797{
798#define	RANDOM_BITS(n)	(n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
799	struct hifn_softc *sc = vsc;
800	u_int32_t sts, num[2];
801	int i;
802
803	if (sc->sc_flags & HIFN_IS_7811) {
804		/* ONLY VALID ON 7811!!!! */
805		for (i = 0; i < 5; i++) {
806			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
807			if (sts & HIFN_7811_RNGSTS_UFL) {
808				device_printf(sc->sc_dev,
809					      "RNG underflow: disabling\n");
810				return;
811			}
812			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
813				break;
814
815			/*
816			 * There are at least two words in the RNG FIFO
817			 * at this point.
818			 */
819			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
820			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
821			/* NB: discard first data read */
822			if (sc->sc_rngfirst)
823				sc->sc_rngfirst = 0;
824			else
825				(*sc->sc_harvest)(sc->sc_rndtest,
826					num, sizeof (num));
827		}
828	} else {
829		num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
830
831		/* NB: discard first data read */
832		if (sc->sc_rngfirst)
833			sc->sc_rngfirst = 0;
834		else
835			(*sc->sc_harvest)(sc->sc_rndtest,
836				num, sizeof (num[0]));
837	}
838
839	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
840#undef RANDOM_BITS
841}
842
843static void
844hifn_puc_wait(struct hifn_softc *sc)
845{
846	int i;
847	int reg = HIFN_0_PUCTRL;
848
849	if (sc->sc_flags & HIFN_IS_7956) {
850		reg = HIFN_0_PUCTRL2;
851	}
852
853	for (i = 5000; i > 0; i--) {
854		DELAY(1);
855		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
856			break;
857	}
858	if (!i)
859		device_printf(sc->sc_dev, "proc unit did not reset\n");
860}
861
862/*
863 * Reset the processing unit.
864 */
865static void
866hifn_reset_puc(struct hifn_softc *sc)
867{
868	/* Reset processing unit */
869	int reg = HIFN_0_PUCTRL;
870
871	if (sc->sc_flags & HIFN_IS_7956) {
872		reg = HIFN_0_PUCTRL2;
873	}
874	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
875
876	hifn_puc_wait(sc);
877}
878
879/*
880 * Set the Retry and TRDY registers; note that we set them to
881 * zero because the 7811 locks up when forced to retry (section
882 * 3.6 of "Specification Update SU-0014-04".  Not clear if we
883 * should do this for all Hifn parts, but it doesn't seem to hurt.
884 */
885static void
886hifn_set_retry(struct hifn_softc *sc)
887{
888	/* NB: RETRY only responds to 8-bit reads/writes */
889	pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
890	pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1);
891}
892
893/*
894 * Resets the board.  Values in the regesters are left as is
895 * from the reset (i.e. initial values are assigned elsewhere).
896 */
897static void
898hifn_reset_board(struct hifn_softc *sc, int full)
899{
900	u_int32_t reg;
901
902	/*
903	 * Set polling in the DMA configuration register to zero.  0x7 avoids
904	 * resetting the board and zeros out the other fields.
905	 */
906	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
907	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
908
909	/*
910	 * Now that polling has been disabled, we have to wait 1 ms
911	 * before resetting the board.
912	 */
913	DELAY(1000);
914
915	/* Reset the DMA unit */
916	if (full) {
917		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
918		DELAY(1000);
919	} else {
920		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
921		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
922		hifn_reset_puc(sc);
923	}
924
925	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
926	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
927
928	/* Bring dma unit out of reset */
929	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
930	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
931
932	hifn_puc_wait(sc);
933	hifn_set_retry(sc);
934
935	if (sc->sc_flags & HIFN_IS_7811) {
936		for (reg = 0; reg < 1000; reg++) {
937			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
938			    HIFN_MIPSRST_CRAMINIT)
939				break;
940			DELAY(1000);
941		}
942		if (reg == 1000)
943			printf(": cram init timeout\n");
944	} else {
945	  /* set up DMA configuration register #2 */
946	  /* turn off all PK and BAR0 swaps */
947	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
948		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
949		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
950		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
951		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
952	}
953
954}
955
956static u_int32_t
957hifn_next_signature(u_int32_t a, u_int cnt)
958{
959	int i;
960	u_int32_t v;
961
962	for (i = 0; i < cnt; i++) {
963
964		/* get the parity */
965		v = a & 0x80080125;
966		v ^= v >> 16;
967		v ^= v >> 8;
968		v ^= v >> 4;
969		v ^= v >> 2;
970		v ^= v >> 1;
971
972		a = (v & 1) ^ (a << 1);
973	}
974
975	return a;
976}
977
978struct pci2id {
979	u_short		pci_vendor;
980	u_short		pci_prod;
981	char		card_id[13];
982};
983static struct pci2id pci2id[] = {
984	{
985		PCI_VENDOR_HIFN,
986		PCI_PRODUCT_HIFN_7951,
987		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
988		  0x00, 0x00, 0x00, 0x00, 0x00 }
989	}, {
990		PCI_VENDOR_HIFN,
991		PCI_PRODUCT_HIFN_7955,
992		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
993		  0x00, 0x00, 0x00, 0x00, 0x00 }
994	}, {
995		PCI_VENDOR_HIFN,
996		PCI_PRODUCT_HIFN_7956,
997		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
998		  0x00, 0x00, 0x00, 0x00, 0x00 }
999	}, {
1000		PCI_VENDOR_NETSEC,
1001		PCI_PRODUCT_NETSEC_7751,
1002		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1003		  0x00, 0x00, 0x00, 0x00, 0x00 }
1004	}, {
1005		PCI_VENDOR_INVERTEX,
1006		PCI_PRODUCT_INVERTEX_AEON,
1007		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1008		  0x00, 0x00, 0x00, 0x00, 0x00 }
1009	}, {
1010		PCI_VENDOR_HIFN,
1011		PCI_PRODUCT_HIFN_7811,
1012		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1013		  0x00, 0x00, 0x00, 0x00, 0x00 }
1014	}, {
1015		/*
1016		 * Other vendors share this PCI ID as well, such as
1017		 * http://www.powercrypt.com, and obviously they also
1018		 * use the same key.
1019		 */
1020		PCI_VENDOR_HIFN,
1021		PCI_PRODUCT_HIFN_7751,
1022		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1023		  0x00, 0x00, 0x00, 0x00, 0x00 }
1024	},
1025};
1026
1027/*
1028 * Checks to see if crypto is already enabled.  If crypto isn't enable,
1029 * "hifn_enable_crypto" is called to enable it.  The check is important,
1030 * as enabling crypto twice will lock the board.
1031 */
1032static int
1033hifn_enable_crypto(struct hifn_softc *sc)
1034{
1035	u_int32_t dmacfg, ramcfg, encl, addr, i;
1036	char *offtbl = NULL;
1037
1038	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
1039		if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1040		    pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1041			offtbl = pci2id[i].card_id;
1042			break;
1043		}
1044	}
1045	if (offtbl == NULL) {
1046		device_printf(sc->sc_dev, "Unknown card!\n");
1047		return (1);
1048	}
1049
1050	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1051	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1052
1053	/*
1054	 * The RAM config register's encrypt level bit needs to be set before
1055	 * every read performed on the encryption level register.
1056	 */
1057	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1058
1059	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1060
1061	/*
1062	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
1063	 * next reboot.
1064	 */
1065	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1066#ifdef HIFN_DEBUG
1067		if (hifn_debug)
1068			device_printf(sc->sc_dev,
1069			    "Strong crypto already enabled!\n");
1070#endif
1071		goto report;
1072	}
1073
1074	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1075#ifdef HIFN_DEBUG
1076		if (hifn_debug)
1077			device_printf(sc->sc_dev,
1078			      "Unknown encryption level 0x%x\n", encl);
1079#endif
1080		return 1;
1081	}
1082
1083	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1084	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1085	DELAY(1000);
1086	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1087	DELAY(1000);
1088	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1089	DELAY(1000);
1090
1091	for (i = 0; i <= 12; i++) {
1092		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1093		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1094
1095		DELAY(1000);
1096	}
1097
1098	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1099	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1100
1101#ifdef HIFN_DEBUG
1102	if (hifn_debug) {
1103		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1104			device_printf(sc->sc_dev, "Engine is permanently "
1105				"locked until next system reset!\n");
1106		else
1107			device_printf(sc->sc_dev, "Engine enabled "
1108				"successfully!\n");
1109	}
1110#endif
1111
1112report:
1113	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1114	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1115
1116	switch (encl) {
1117	case HIFN_PUSTAT_ENA_1:
1118	case HIFN_PUSTAT_ENA_2:
1119		break;
1120	case HIFN_PUSTAT_ENA_0:
1121	default:
1122		device_printf(sc->sc_dev, "disabled");
1123		break;
1124	}
1125
1126	return 0;
1127}
1128
1129/*
1130 * Give initial values to the registers listed in the "Register Space"
1131 * section of the HIFN Software Development reference manual.
1132 */
1133static void
1134hifn_init_pci_registers(struct hifn_softc *sc)
1135{
1136	/* write fixed values needed by the Initialization registers */
1137	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1138	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1139	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1140
1141	/* write all 4 ring address registers */
1142	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1143	    offsetof(struct hifn_dma, cmdr[0]));
1144	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1145	    offsetof(struct hifn_dma, srcr[0]));
1146	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1147	    offsetof(struct hifn_dma, dstr[0]));
1148	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1149	    offsetof(struct hifn_dma, resr[0]));
1150
1151	DELAY(2000);
1152
1153	/* write status register */
1154	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1155	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1156	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1157	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1158	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1159	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1160	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1161	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1162	    HIFN_DMACSR_S_WAIT |
1163	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1164	    HIFN_DMACSR_C_WAIT |
1165	    HIFN_DMACSR_ENGINE |
1166	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1167		HIFN_DMACSR_PUBDONE : 0) |
1168	    ((sc->sc_flags & HIFN_IS_7811) ?
1169		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1170
1171	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1172	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1173	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1174	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1175	    ((sc->sc_flags & HIFN_IS_7811) ?
1176		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1177	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1178	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1179
1180
1181	if (sc->sc_flags & HIFN_IS_7956) {
1182		u_int32_t pll;
1183
1184		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1185		    HIFN_PUCNFG_TCALLPHASES |
1186		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1187
1188		/* turn off the clocks and insure bypass is set */
1189		pll = READ_REG_1(sc, HIFN_1_PLL);
1190		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1191		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
1192		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1193		DELAY(10*1000);		/* 10ms */
1194
1195		/* change configuration */
1196		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1197		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1198		DELAY(10*1000);		/* 10ms */
1199
1200		/* disable bypass */
1201		pll &= ~HIFN_PLL_BP;
1202		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1203		/* enable clocks with new configuration */
1204		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1205		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1206	} else {
1207		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1208		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1209		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1210		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1211	}
1212
1213	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1214	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1215	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1216	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1217	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1218}
1219
1220/*
1221 * The maximum number of sessions supported by the card
1222 * is dependent on the amount of context ram, which
1223 * encryption algorithms are enabled, and how compression
1224 * is configured.  This should be configured before this
1225 * routine is called.
1226 */
1227static void
1228hifn_sessions(struct hifn_softc *sc)
1229{
1230	u_int32_t pucnfg;
1231	int ctxsize;
1232
1233	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1234
1235	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1236		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1237			ctxsize = 128;
1238		else
1239			ctxsize = 512;
1240		/*
1241		 * 7955/7956 has internal context memory of 32K
1242		 */
1243		if (sc->sc_flags & HIFN_IS_7956)
1244			sc->sc_maxses = 32768 / ctxsize;
1245		else
1246			sc->sc_maxses = 1 +
1247			    ((sc->sc_ramsize - 32768) / ctxsize);
1248	} else
1249		sc->sc_maxses = sc->sc_ramsize / 16384;
1250
1251	if (sc->sc_maxses > 2048)
1252		sc->sc_maxses = 2048;
1253}
1254
1255/*
1256 * Determine ram type (sram or dram).  Board should be just out of a reset
1257 * state when this is called.
1258 */
1259static int
1260hifn_ramtype(struct hifn_softc *sc)
1261{
1262	u_int8_t data[8], dataexpect[8];
1263	int i;
1264
1265	for (i = 0; i < sizeof(data); i++)
1266		data[i] = dataexpect[i] = 0x55;
1267	if (hifn_writeramaddr(sc, 0, data))
1268		return (-1);
1269	if (hifn_readramaddr(sc, 0, data))
1270		return (-1);
1271	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1272		sc->sc_drammodel = 1;
1273		return (0);
1274	}
1275
1276	for (i = 0; i < sizeof(data); i++)
1277		data[i] = dataexpect[i] = 0xaa;
1278	if (hifn_writeramaddr(sc, 0, data))
1279		return (-1);
1280	if (hifn_readramaddr(sc, 0, data))
1281		return (-1);
1282	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1283		sc->sc_drammodel = 1;
1284		return (0);
1285	}
1286
1287	return (0);
1288}
1289
1290#define	HIFN_SRAM_MAX		(32 << 20)
1291#define	HIFN_SRAM_STEP_SIZE	16384
1292#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1293
1294static int
1295hifn_sramsize(struct hifn_softc *sc)
1296{
1297	u_int32_t a;
1298	u_int8_t data[8];
1299	u_int8_t dataexpect[sizeof(data)];
1300	int32_t i;
1301
1302	for (i = 0; i < sizeof(data); i++)
1303		data[i] = dataexpect[i] = i ^ 0x5a;
1304
1305	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1306		a = i * HIFN_SRAM_STEP_SIZE;
1307		bcopy(&i, data, sizeof(i));
1308		hifn_writeramaddr(sc, a, data);
1309	}
1310
1311	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1312		a = i * HIFN_SRAM_STEP_SIZE;
1313		bcopy(&i, dataexpect, sizeof(i));
1314		if (hifn_readramaddr(sc, a, data) < 0)
1315			return (0);
1316		if (bcmp(data, dataexpect, sizeof(data)) != 0)
1317			return (0);
1318		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1319	}
1320
1321	return (0);
1322}
1323
1324/*
1325 * XXX For dram boards, one should really try all of the
1326 * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1327 * is already set up correctly.
1328 */
1329static int
1330hifn_dramsize(struct hifn_softc *sc)
1331{
1332	u_int32_t cnfg;
1333
1334	if (sc->sc_flags & HIFN_IS_7956) {
1335		/*
1336		 * 7955/7956 have a fixed internal ram of only 32K.
1337		 */
1338		sc->sc_ramsize = 32768;
1339	} else {
1340		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1341		    HIFN_PUCNFG_DRAMMASK;
1342		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1343	}
1344	return (0);
1345}
1346
1347static void
1348hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1349{
1350	struct hifn_dma *dma = sc->sc_dma;
1351
1352	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1353		sc->sc_cmdi = 0;
1354		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1355		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1356		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1357		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1358	}
1359	*cmdp = sc->sc_cmdi++;
1360	sc->sc_cmdk = sc->sc_cmdi;
1361
1362	if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1363		sc->sc_srci = 0;
1364		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1365		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1366		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1367		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1368	}
1369	*srcp = sc->sc_srci++;
1370	sc->sc_srck = sc->sc_srci;
1371
1372	if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1373		sc->sc_dsti = 0;
1374		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1375		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1376		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1377		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1378	}
1379	*dstp = sc->sc_dsti++;
1380	sc->sc_dstk = sc->sc_dsti;
1381
1382	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1383		sc->sc_resi = 0;
1384		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1385		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1386		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1387		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1388	}
1389	*resp = sc->sc_resi++;
1390	sc->sc_resk = sc->sc_resi;
1391}
1392
1393static int
1394hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1395{
1396	struct hifn_dma *dma = sc->sc_dma;
1397	hifn_base_command_t wc;
1398	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1399	int r, cmdi, resi, srci, dsti;
1400
1401	wc.masks = htole16(3 << 13);
1402	wc.session_num = htole16(addr >> 14);
1403	wc.total_source_count = htole16(8);
1404	wc.total_dest_count = htole16(addr & 0x3fff);
1405
1406	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1407
1408	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1409	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1410	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1411
1412	/* build write command */
1413	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1414	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1415	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1416
1417	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1418	    + offsetof(struct hifn_dma, test_src));
1419	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1420	    + offsetof(struct hifn_dma, test_dst));
1421
1422	dma->cmdr[cmdi].l = htole32(16 | masks);
1423	dma->srcr[srci].l = htole32(8 | masks);
1424	dma->dstr[dsti].l = htole32(4 | masks);
1425	dma->resr[resi].l = htole32(4 | masks);
1426
1427	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1428	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1429
1430	for (r = 10000; r >= 0; r--) {
1431		DELAY(10);
1432		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1433		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1434		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1435			break;
1436		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1437		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1438	}
1439	if (r == 0) {
1440		device_printf(sc->sc_dev, "writeramaddr -- "
1441		    "result[%d](addr %d) still valid\n", resi, addr);
1442		r = -1;
1443		return (-1);
1444	} else
1445		r = 0;
1446
1447	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1448	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1449	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1450
1451	return (r);
1452}
1453
1454static int
1455hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1456{
1457	struct hifn_dma *dma = sc->sc_dma;
1458	hifn_base_command_t rc;
1459	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1460	int r, cmdi, srci, dsti, resi;
1461
1462	rc.masks = htole16(2 << 13);
1463	rc.session_num = htole16(addr >> 14);
1464	rc.total_source_count = htole16(addr & 0x3fff);
1465	rc.total_dest_count = htole16(8);
1466
1467	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1468
1469	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1470	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1471	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1472
1473	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1474	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1475
1476	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1477	    offsetof(struct hifn_dma, test_src));
1478	dma->test_src = 0;
1479	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1480	    offsetof(struct hifn_dma, test_dst));
1481	dma->test_dst = 0;
1482	dma->cmdr[cmdi].l = htole32(8 | masks);
1483	dma->srcr[srci].l = htole32(8 | masks);
1484	dma->dstr[dsti].l = htole32(8 | masks);
1485	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1486
1487	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1488	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1489
1490	for (r = 10000; r >= 0; r--) {
1491		DELAY(10);
1492		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1493		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1494		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1495			break;
1496		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1497		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1498	}
1499	if (r == 0) {
1500		device_printf(sc->sc_dev, "readramaddr -- "
1501		    "result[%d](addr %d) still valid\n", resi, addr);
1502		r = -1;
1503	} else {
1504		r = 0;
1505		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1506	}
1507
1508	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1509	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1510	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1511
1512	return (r);
1513}
1514
1515/*
1516 * Initialize the descriptor rings.
1517 */
1518static void
1519hifn_init_dma(struct hifn_softc *sc)
1520{
1521	struct hifn_dma *dma = sc->sc_dma;
1522	int i;
1523
1524	hifn_set_retry(sc);
1525
1526	/* initialize static pointer values */
1527	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1528		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1529		    offsetof(struct hifn_dma, command_bufs[i][0]));
1530	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1531		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1532		    offsetof(struct hifn_dma, result_bufs[i][0]));
1533
1534	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1535	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1536	dma->srcr[HIFN_D_SRC_RSIZE].p =
1537	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1538	dma->dstr[HIFN_D_DST_RSIZE].p =
1539	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1540	dma->resr[HIFN_D_RES_RSIZE].p =
1541	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1542
1543	sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1544	sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1545	sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1546}
1547
1548/*
1549 * Writes out the raw command buffer space.  Returns the
1550 * command buffer size.
1551 */
1552static u_int
1553hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1554{
1555	u_int8_t *buf_pos;
1556	hifn_base_command_t *base_cmd;
1557	hifn_mac_command_t *mac_cmd;
1558	hifn_crypt_command_t *cry_cmd;
1559	int using_mac, using_crypt, len, ivlen;
1560	u_int32_t dlen, slen;
1561
1562	buf_pos = buf;
1563	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1564	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1565
1566	base_cmd = (hifn_base_command_t *)buf_pos;
1567	base_cmd->masks = htole16(cmd->base_masks);
1568	slen = cmd->src_mapsize;
1569	if (cmd->sloplen)
1570		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1571	else
1572		dlen = cmd->dst_mapsize;
1573	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1574	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1575	dlen >>= 16;
1576	slen >>= 16;
1577	base_cmd->session_num = htole16(
1578	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1579	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1580	buf_pos += sizeof(hifn_base_command_t);
1581
1582	if (using_mac) {
1583		mac_cmd = (hifn_mac_command_t *)buf_pos;
1584		dlen = cmd->maccrd->crd_len;
1585		mac_cmd->source_count = htole16(dlen & 0xffff);
1586		dlen >>= 16;
1587		mac_cmd->masks = htole16(cmd->mac_masks |
1588		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1589		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1590		mac_cmd->reserved = 0;
1591		buf_pos += sizeof(hifn_mac_command_t);
1592	}
1593
1594	if (using_crypt) {
1595		cry_cmd = (hifn_crypt_command_t *)buf_pos;
1596		dlen = cmd->enccrd->crd_len;
1597		cry_cmd->source_count = htole16(dlen & 0xffff);
1598		dlen >>= 16;
1599		cry_cmd->masks = htole16(cmd->cry_masks |
1600		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1601		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1602		cry_cmd->reserved = 0;
1603		buf_pos += sizeof(hifn_crypt_command_t);
1604	}
1605
1606	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1607		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1608		buf_pos += HIFN_MAC_KEY_LENGTH;
1609	}
1610
1611	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1612		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1613		case HIFN_CRYPT_CMD_ALG_3DES:
1614			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1615			buf_pos += HIFN_3DES_KEY_LENGTH;
1616			break;
1617		case HIFN_CRYPT_CMD_ALG_DES:
1618			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1619			buf_pos += HIFN_DES_KEY_LENGTH;
1620			break;
1621		case HIFN_CRYPT_CMD_ALG_RC4:
1622			len = 256;
1623			do {
1624				int clen;
1625
1626				clen = MIN(cmd->cklen, len);
1627				bcopy(cmd->ck, buf_pos, clen);
1628				len -= clen;
1629				buf_pos += clen;
1630			} while (len > 0);
1631			bzero(buf_pos, 4);
1632			buf_pos += 4;
1633			break;
1634		case HIFN_CRYPT_CMD_ALG_AES:
1635			/*
1636			 * AES keys are variable 128, 192 and
1637			 * 256 bits (16, 24 and 32 bytes).
1638			 */
1639			bcopy(cmd->ck, buf_pos, cmd->cklen);
1640			buf_pos += cmd->cklen;
1641			break;
1642		}
1643	}
1644
1645	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1646		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1647		case HIFN_CRYPT_CMD_ALG_AES:
1648			ivlen = HIFN_AES_IV_LENGTH;
1649			break;
1650		default:
1651			ivlen = HIFN_IV_LENGTH;
1652			break;
1653		}
1654		bcopy(cmd->iv, buf_pos, ivlen);
1655		buf_pos += ivlen;
1656	}
1657
1658	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1659		bzero(buf_pos, 8);
1660		buf_pos += 8;
1661	}
1662
1663	return (buf_pos - buf);
1664}
1665
1666static int
1667hifn_dmamap_aligned(struct hifn_operand *op)
1668{
1669	int i;
1670
1671	for (i = 0; i < op->nsegs; i++) {
1672		if (op->segs[i].ds_addr & 3)
1673			return (0);
1674		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1675			return (0);
1676	}
1677	return (1);
1678}
1679
1680static __inline int
1681hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1682{
1683	struct hifn_dma *dma = sc->sc_dma;
1684
1685	if (++idx == HIFN_D_DST_RSIZE) {
1686		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1687		    HIFN_D_MASKDONEIRQ);
1688		HIFN_DSTR_SYNC(sc, idx,
1689		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1690		idx = 0;
1691	}
1692	return (idx);
1693}
1694
1695static int
1696hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1697{
1698	struct hifn_dma *dma = sc->sc_dma;
1699	struct hifn_operand *dst = &cmd->dst;
1700	u_int32_t p, l;
1701	int idx, used = 0, i;
1702
1703	idx = sc->sc_dsti;
1704	for (i = 0; i < dst->nsegs - 1; i++) {
1705		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1706		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1707		    HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1708		HIFN_DSTR_SYNC(sc, idx,
1709		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1710		used++;
1711
1712		idx = hifn_dmamap_dstwrap(sc, idx);
1713	}
1714
1715	if (cmd->sloplen == 0) {
1716		p = dst->segs[i].ds_addr;
1717		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1718		    dst->segs[i].ds_len;
1719	} else {
1720		p = sc->sc_dma_physaddr +
1721		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1722		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1723		    sizeof(u_int32_t);
1724
1725		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1726			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1727			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1728			    HIFN_D_MASKDONEIRQ |
1729			    (dst->segs[i].ds_len - cmd->sloplen));
1730			HIFN_DSTR_SYNC(sc, idx,
1731			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1732			used++;
1733
1734			idx = hifn_dmamap_dstwrap(sc, idx);
1735		}
1736	}
1737	dma->dstr[idx].p = htole32(p);
1738	dma->dstr[idx].l = htole32(l);
1739	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1740	used++;
1741
1742	idx = hifn_dmamap_dstwrap(sc, idx);
1743
1744	sc->sc_dsti = idx;
1745	sc->sc_dstu += used;
1746	return (idx);
1747}
1748
1749static __inline int
1750hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1751{
1752	struct hifn_dma *dma = sc->sc_dma;
1753
1754	if (++idx == HIFN_D_SRC_RSIZE) {
1755		dma->srcr[idx].l = htole32(HIFN_D_VALID |
1756		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1757		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1758		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1759		idx = 0;
1760	}
1761	return (idx);
1762}
1763
1764static int
1765hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1766{
1767	struct hifn_dma *dma = sc->sc_dma;
1768	struct hifn_operand *src = &cmd->src;
1769	int idx, i;
1770	u_int32_t last = 0;
1771
1772	idx = sc->sc_srci;
1773	for (i = 0; i < src->nsegs; i++) {
1774		if (i == src->nsegs - 1)
1775			last = HIFN_D_LAST;
1776
1777		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1778		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1779		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1780		HIFN_SRCR_SYNC(sc, idx,
1781		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1782
1783		idx = hifn_dmamap_srcwrap(sc, idx);
1784	}
1785	sc->sc_srci = idx;
1786	sc->sc_srcu += src->nsegs;
1787	return (idx);
1788}
1789
1790static void
1791hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1792{
1793	struct hifn_operand *op = arg;
1794
1795	KASSERT(nsegs <= MAX_SCATTER,
1796		("hifn_op_cb: too many DMA segments (%u > %u) "
1797		 "returned when mapping operand", nsegs, MAX_SCATTER));
1798	op->mapsize = mapsize;
1799	op->nsegs = nsegs;
1800	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1801}
1802
1803static int
1804hifn_crypto(
1805	struct hifn_softc *sc,
1806	struct hifn_command *cmd,
1807	struct cryptop *crp,
1808	int hint)
1809{
1810	struct	hifn_dma *dma = sc->sc_dma;
1811	u_int32_t cmdlen, csr;
1812	int cmdi, resi, err = 0;
1813
1814	/*
1815	 * need 1 cmd, and 1 res
1816	 *
1817	 * NB: check this first since it's easy.
1818	 */
1819	HIFN_LOCK(sc);
1820	if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1821	    (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1822#ifdef HIFN_DEBUG
1823		if (hifn_debug) {
1824			device_printf(sc->sc_dev,
1825				"cmd/result exhaustion, cmdu %u resu %u\n",
1826				sc->sc_cmdu, sc->sc_resu);
1827		}
1828#endif
1829		hifnstats.hst_nomem_cr++;
1830		HIFN_UNLOCK(sc);
1831		return (ERESTART);
1832	}
1833
1834	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1835		hifnstats.hst_nomem_map++;
1836		HIFN_UNLOCK(sc);
1837		return (ENOMEM);
1838	}
1839
1840	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1841		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1842		    cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1843			hifnstats.hst_nomem_load++;
1844			err = ENOMEM;
1845			goto err_srcmap1;
1846		}
1847	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1848		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1849		    cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1850			hifnstats.hst_nomem_load++;
1851			err = ENOMEM;
1852			goto err_srcmap1;
1853		}
1854	} else {
1855		err = EINVAL;
1856		goto err_srcmap1;
1857	}
1858
1859	if (hifn_dmamap_aligned(&cmd->src)) {
1860		cmd->sloplen = cmd->src_mapsize & 3;
1861		cmd->dst = cmd->src;
1862	} else {
1863		if (crp->crp_flags & CRYPTO_F_IOV) {
1864			err = EINVAL;
1865			goto err_srcmap;
1866		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1867			int totlen, len;
1868			struct mbuf *m, *m0, *mlast;
1869
1870			KASSERT(cmd->dst_m == cmd->src_m,
1871				("hifn_crypto: dst_m initialized improperly"));
1872			hifnstats.hst_unaligned++;
1873			/*
1874			 * Source is not aligned on a longword boundary.
1875			 * Copy the data to insure alignment.  If we fail
1876			 * to allocate mbufs or clusters while doing this
1877			 * we return ERESTART so the operation is requeued
1878			 * at the crypto later, but only if there are
1879			 * ops already posted to the hardware; otherwise we
1880			 * have no guarantee that we'll be re-entered.
1881			 */
1882			totlen = cmd->src_mapsize;
1883			if (cmd->src_m->m_flags & M_PKTHDR) {
1884				len = MHLEN;
1885				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1886				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1887					m_free(m0);
1888					m0 = NULL;
1889				}
1890			} else {
1891				len = MLEN;
1892				MGET(m0, M_DONTWAIT, MT_DATA);
1893			}
1894			if (m0 == NULL) {
1895				hifnstats.hst_nomem_mbuf++;
1896				err = sc->sc_cmdu ? ERESTART : ENOMEM;
1897				goto err_srcmap;
1898			}
1899			if (totlen >= MINCLSIZE) {
1900				MCLGET(m0, M_DONTWAIT);
1901				if ((m0->m_flags & M_EXT) == 0) {
1902					hifnstats.hst_nomem_mcl++;
1903					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1904					m_freem(m0);
1905					goto err_srcmap;
1906				}
1907				len = MCLBYTES;
1908			}
1909			totlen -= len;
1910			m0->m_pkthdr.len = m0->m_len = len;
1911			mlast = m0;
1912
1913			while (totlen > 0) {
1914				MGET(m, M_DONTWAIT, MT_DATA);
1915				if (m == NULL) {
1916					hifnstats.hst_nomem_mbuf++;
1917					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1918					m_freem(m0);
1919					goto err_srcmap;
1920				}
1921				len = MLEN;
1922				if (totlen >= MINCLSIZE) {
1923					MCLGET(m, M_DONTWAIT);
1924					if ((m->m_flags & M_EXT) == 0) {
1925						hifnstats.hst_nomem_mcl++;
1926						err = sc->sc_cmdu ? ERESTART : ENOMEM;
1927						mlast->m_next = m;
1928						m_freem(m0);
1929						goto err_srcmap;
1930					}
1931					len = MCLBYTES;
1932				}
1933
1934				m->m_len = len;
1935				m0->m_pkthdr.len += len;
1936				totlen -= len;
1937
1938				mlast->m_next = m;
1939				mlast = m;
1940			}
1941			cmd->dst_m = m0;
1942		}
1943	}
1944
1945	if (cmd->dst_map == NULL) {
1946		if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1947			hifnstats.hst_nomem_map++;
1948			err = ENOMEM;
1949			goto err_srcmap;
1950		}
1951		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1952			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1953			    cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1954				hifnstats.hst_nomem_map++;
1955				err = ENOMEM;
1956				goto err_dstmap1;
1957			}
1958		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1959			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1960			    cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1961				hifnstats.hst_nomem_load++;
1962				err = ENOMEM;
1963				goto err_dstmap1;
1964			}
1965		}
1966	}
1967
1968#ifdef HIFN_DEBUG
1969	if (hifn_debug) {
1970		device_printf(sc->sc_dev,
1971		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1972		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1973		    READ_REG_1(sc, HIFN_1_DMA_IER),
1974		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1975		    cmd->src_nsegs, cmd->dst_nsegs);
1976	}
1977#endif
1978
1979	if (cmd->src_map == cmd->dst_map) {
1980		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1981		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1982	} else {
1983		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1984		    BUS_DMASYNC_PREWRITE);
1985		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1986		    BUS_DMASYNC_PREREAD);
1987	}
1988
1989	/*
1990	 * need N src, and N dst
1991	 */
1992	if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1993	    (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1994#ifdef HIFN_DEBUG
1995		if (hifn_debug) {
1996			device_printf(sc->sc_dev,
1997				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1998				sc->sc_srcu, cmd->src_nsegs,
1999				sc->sc_dstu, cmd->dst_nsegs);
2000		}
2001#endif
2002		hifnstats.hst_nomem_sd++;
2003		err = ERESTART;
2004		goto err_dstmap;
2005	}
2006
2007	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
2008		sc->sc_cmdi = 0;
2009		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2010		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2011		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2012		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2013	}
2014	cmdi = sc->sc_cmdi++;
2015	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2016	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2017
2018	/* .p for command/result already set */
2019	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2020	    HIFN_D_MASKDONEIRQ);
2021	HIFN_CMDR_SYNC(sc, cmdi,
2022	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2023	sc->sc_cmdu++;
2024
2025	/*
2026	 * We don't worry about missing an interrupt (which a "command wait"
2027	 * interrupt salvages us from), unless there is more than one command
2028	 * in the queue.
2029	 */
2030	if (sc->sc_cmdu > 1) {
2031		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2032		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2033	}
2034
2035	hifnstats.hst_ipackets++;
2036	hifnstats.hst_ibytes += cmd->src_mapsize;
2037
2038	hifn_dmamap_load_src(sc, cmd);
2039
2040	/*
2041	 * Unlike other descriptors, we don't mask done interrupt from
2042	 * result descriptor.
2043	 */
2044#ifdef HIFN_DEBUG
2045	if (hifn_debug)
2046		printf("load res\n");
2047#endif
2048	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2049		sc->sc_resi = 0;
2050		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2051		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2052		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2053		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2054	}
2055	resi = sc->sc_resi++;
2056	KASSERT(sc->sc_hifn_commands[resi] == NULL,
2057		("hifn_crypto: command slot %u busy", resi));
2058	sc->sc_hifn_commands[resi] = cmd;
2059	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2060	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2061		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2062		    HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2063		sc->sc_curbatch++;
2064		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2065			hifnstats.hst_maxbatch = sc->sc_curbatch;
2066		hifnstats.hst_totbatch++;
2067	} else {
2068		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2069		    HIFN_D_VALID | HIFN_D_LAST);
2070		sc->sc_curbatch = 0;
2071	}
2072	HIFN_RESR_SYNC(sc, resi,
2073	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2074	sc->sc_resu++;
2075
2076	if (cmd->sloplen)
2077		cmd->slopidx = resi;
2078
2079	hifn_dmamap_load_dst(sc, cmd);
2080
2081	csr = 0;
2082	if (sc->sc_c_busy == 0) {
2083		csr |= HIFN_DMACSR_C_CTRL_ENA;
2084		sc->sc_c_busy = 1;
2085	}
2086	if (sc->sc_s_busy == 0) {
2087		csr |= HIFN_DMACSR_S_CTRL_ENA;
2088		sc->sc_s_busy = 1;
2089	}
2090	if (sc->sc_r_busy == 0) {
2091		csr |= HIFN_DMACSR_R_CTRL_ENA;
2092		sc->sc_r_busy = 1;
2093	}
2094	if (sc->sc_d_busy == 0) {
2095		csr |= HIFN_DMACSR_D_CTRL_ENA;
2096		sc->sc_d_busy = 1;
2097	}
2098	if (csr)
2099		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2100
2101#ifdef HIFN_DEBUG
2102	if (hifn_debug) {
2103		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2104		    READ_REG_1(sc, HIFN_1_DMA_CSR),
2105		    READ_REG_1(sc, HIFN_1_DMA_IER));
2106	}
2107#endif
2108
2109	sc->sc_active = 5;
2110	HIFN_UNLOCK(sc);
2111	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2112	return (err);		/* success */
2113
2114err_dstmap:
2115	if (cmd->src_map != cmd->dst_map)
2116		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2117err_dstmap1:
2118	if (cmd->src_map != cmd->dst_map)
2119		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2120err_srcmap:
2121	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2122		if (cmd->src_m != cmd->dst_m)
2123			m_freem(cmd->dst_m);
2124	}
2125	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2126err_srcmap1:
2127	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2128	HIFN_UNLOCK(sc);
2129	return (err);
2130}
2131
2132static void
2133hifn_tick(void* vsc)
2134{
2135	struct hifn_softc *sc = vsc;
2136
2137	HIFN_LOCK(sc);
2138	if (sc->sc_active == 0) {
2139		u_int32_t r = 0;
2140
2141		if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2142			sc->sc_c_busy = 0;
2143			r |= HIFN_DMACSR_C_CTRL_DIS;
2144		}
2145		if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2146			sc->sc_s_busy = 0;
2147			r |= HIFN_DMACSR_S_CTRL_DIS;
2148		}
2149		if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2150			sc->sc_d_busy = 0;
2151			r |= HIFN_DMACSR_D_CTRL_DIS;
2152		}
2153		if (sc->sc_resu == 0 && sc->sc_r_busy) {
2154			sc->sc_r_busy = 0;
2155			r |= HIFN_DMACSR_R_CTRL_DIS;
2156		}
2157		if (r)
2158			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2159	} else
2160		sc->sc_active--;
2161	HIFN_UNLOCK(sc);
2162	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2163}
2164
2165static void
2166hifn_intr(void *arg)
2167{
2168	struct hifn_softc *sc = arg;
2169	struct hifn_dma *dma;
2170	u_int32_t dmacsr, restart;
2171	int i, u;
2172
2173	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2174
2175	/* Nothing in the DMA unit interrupted */
2176	if ((dmacsr & sc->sc_dmaier) == 0)
2177		return;
2178
2179	HIFN_LOCK(sc);
2180
2181	dma = sc->sc_dma;
2182
2183#ifdef HIFN_DEBUG
2184	if (hifn_debug) {
2185		device_printf(sc->sc_dev,
2186		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2187		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2188		    sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2189		    sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2190		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2191	}
2192#endif
2193
2194	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2195
2196	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2197	    (dmacsr & HIFN_DMACSR_PUBDONE))
2198		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2199		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2200
2201	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2202	if (restart)
2203		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2204
2205	if (sc->sc_flags & HIFN_IS_7811) {
2206		if (dmacsr & HIFN_DMACSR_ILLR)
2207			device_printf(sc->sc_dev, "illegal read\n");
2208		if (dmacsr & HIFN_DMACSR_ILLW)
2209			device_printf(sc->sc_dev, "illegal write\n");
2210	}
2211
2212	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2213	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2214	if (restart) {
2215		device_printf(sc->sc_dev, "abort, resetting.\n");
2216		hifnstats.hst_abort++;
2217		hifn_abort(sc);
2218		HIFN_UNLOCK(sc);
2219		return;
2220	}
2221
2222	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2223		/*
2224		 * If no slots to process and we receive a "waiting on
2225		 * command" interrupt, we disable the "waiting on command"
2226		 * (by clearing it).
2227		 */
2228		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2229		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2230	}
2231
2232	/* clear the rings */
2233	i = sc->sc_resk; u = sc->sc_resu;
2234	while (u != 0) {
2235		HIFN_RESR_SYNC(sc, i,
2236		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2237		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2238			HIFN_RESR_SYNC(sc, i,
2239			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2240			break;
2241		}
2242
2243		if (i != HIFN_D_RES_RSIZE) {
2244			struct hifn_command *cmd;
2245			u_int8_t *macbuf = NULL;
2246
2247			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2248			cmd = sc->sc_hifn_commands[i];
2249			KASSERT(cmd != NULL,
2250				("hifn_intr: null command slot %u", i));
2251			sc->sc_hifn_commands[i] = NULL;
2252
2253			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2254				macbuf = dma->result_bufs[i];
2255				macbuf += 12;
2256			}
2257
2258			hifn_callback(sc, cmd, macbuf);
2259			hifnstats.hst_opackets++;
2260			u--;
2261		}
2262
2263		if (++i == (HIFN_D_RES_RSIZE + 1))
2264			i = 0;
2265	}
2266	sc->sc_resk = i; sc->sc_resu = u;
2267
2268	i = sc->sc_srck; u = sc->sc_srcu;
2269	while (u != 0) {
2270		if (i == HIFN_D_SRC_RSIZE)
2271			i = 0;
2272		HIFN_SRCR_SYNC(sc, i,
2273		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2274		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2275			HIFN_SRCR_SYNC(sc, i,
2276			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2277			break;
2278		}
2279		i++, u--;
2280	}
2281	sc->sc_srck = i; sc->sc_srcu = u;
2282
2283	i = sc->sc_cmdk; u = sc->sc_cmdu;
2284	while (u != 0) {
2285		HIFN_CMDR_SYNC(sc, i,
2286		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2287		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2288			HIFN_CMDR_SYNC(sc, i,
2289			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2290			break;
2291		}
2292		if (i != HIFN_D_CMD_RSIZE) {
2293			u--;
2294			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2295		}
2296		if (++i == (HIFN_D_CMD_RSIZE + 1))
2297			i = 0;
2298	}
2299	sc->sc_cmdk = i; sc->sc_cmdu = u;
2300
2301	HIFN_UNLOCK(sc);
2302
2303	if (sc->sc_needwakeup) {		/* XXX check high watermark */
2304		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2305#ifdef HIFN_DEBUG
2306		if (hifn_debug)
2307			device_printf(sc->sc_dev,
2308				"wakeup crypto (%x) u %d/%d/%d/%d\n",
2309				sc->sc_needwakeup,
2310				sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2311#endif
2312		sc->sc_needwakeup &= ~wakeup;
2313		crypto_unblock(sc->sc_cid, wakeup);
2314	}
2315}
2316
2317/*
2318 * Allocate a new 'session' and return an encoded session id.  'sidp'
2319 * contains our registration id, and should contain an encoded session
2320 * id on successful allocation.
2321 */
2322static int
2323hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2324{
2325	struct hifn_softc *sc = device_get_softc(dev);
2326	struct cryptoini *c;
2327	int mac = 0, cry = 0, sesn;
2328	struct hifn_session *ses = NULL;
2329
2330	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2331	if (sidp == NULL || cri == NULL || sc == NULL)
2332		return (EINVAL);
2333
2334	HIFN_LOCK(sc);
2335	if (sc->sc_sessions == NULL) {
2336		ses = sc->sc_sessions = (struct hifn_session *)malloc(
2337		    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2338		if (ses == NULL) {
2339			HIFN_UNLOCK(sc);
2340			return (ENOMEM);
2341		}
2342		sesn = 0;
2343		sc->sc_nsessions = 1;
2344	} else {
2345		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2346			if (!sc->sc_sessions[sesn].hs_used) {
2347				ses = &sc->sc_sessions[sesn];
2348				break;
2349			}
2350		}
2351
2352		if (ses == NULL) {
2353			sesn = sc->sc_nsessions;
2354			ses = (struct hifn_session *)malloc((sesn + 1) *
2355			    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2356			if (ses == NULL) {
2357				HIFN_UNLOCK(sc);
2358				return (ENOMEM);
2359			}
2360			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2361			bzero(sc->sc_sessions, sesn * sizeof(*ses));
2362			free(sc->sc_sessions, M_DEVBUF);
2363			sc->sc_sessions = ses;
2364			ses = &sc->sc_sessions[sesn];
2365			sc->sc_nsessions++;
2366		}
2367	}
2368	HIFN_UNLOCK(sc);
2369
2370	bzero(ses, sizeof(*ses));
2371	ses->hs_used = 1;
2372
2373	for (c = cri; c != NULL; c = c->cri_next) {
2374		switch (c->cri_alg) {
2375		case CRYPTO_MD5:
2376		case CRYPTO_SHA1:
2377		case CRYPTO_MD5_HMAC:
2378		case CRYPTO_SHA1_HMAC:
2379			if (mac)
2380				return (EINVAL);
2381			mac = 1;
2382			ses->hs_mlen = c->cri_mlen;
2383			if (ses->hs_mlen == 0) {
2384				switch (c->cri_alg) {
2385				case CRYPTO_MD5:
2386				case CRYPTO_MD5_HMAC:
2387					ses->hs_mlen = 16;
2388					break;
2389				case CRYPTO_SHA1:
2390				case CRYPTO_SHA1_HMAC:
2391					ses->hs_mlen = 20;
2392					break;
2393				}
2394			}
2395			break;
2396		case CRYPTO_DES_CBC:
2397		case CRYPTO_3DES_CBC:
2398		case CRYPTO_AES_CBC:
2399			/* XXX this may read fewer, does it matter? */
2400			read_random(ses->hs_iv,
2401				c->cri_alg == CRYPTO_AES_CBC ?
2402					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2403			/*FALLTHROUGH*/
2404		case CRYPTO_ARC4:
2405			if (cry)
2406				return (EINVAL);
2407			cry = 1;
2408			break;
2409		default:
2410			return (EINVAL);
2411		}
2412	}
2413	if (mac == 0 && cry == 0)
2414		return (EINVAL);
2415
2416	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2417
2418	return (0);
2419}
2420
2421/*
2422 * Deallocate a session.
2423 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2424 * XXX to blow away any keys already stored there.
2425 */
2426static int
2427hifn_freesession(device_t dev, u_int64_t tid)
2428{
2429	struct hifn_softc *sc = device_get_softc(dev);
2430	int session, error;
2431	u_int32_t sid = CRYPTO_SESID2LID(tid);
2432
2433	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2434	if (sc == NULL)
2435		return (EINVAL);
2436
2437	HIFN_LOCK(sc);
2438	session = HIFN_SESSION(sid);
2439	if (session < sc->sc_nsessions) {
2440		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2441		error = 0;
2442	} else
2443		error = EINVAL;
2444	HIFN_UNLOCK(sc);
2445
2446	return (error);
2447}
2448
2449static int
2450hifn_process(device_t dev, struct cryptop *crp, int hint)
2451{
2452	struct hifn_softc *sc = device_get_softc(dev);
2453	struct hifn_command *cmd = NULL;
2454	int session, err, ivlen;
2455	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2456
2457	if (crp == NULL || crp->crp_callback == NULL) {
2458		hifnstats.hst_invalid++;
2459		return (EINVAL);
2460	}
2461	session = HIFN_SESSION(crp->crp_sid);
2462
2463	if (sc == NULL || session >= sc->sc_nsessions) {
2464		err = EINVAL;
2465		goto errout;
2466	}
2467
2468	cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2469	if (cmd == NULL) {
2470		hifnstats.hst_nomem++;
2471		err = ENOMEM;
2472		goto errout;
2473	}
2474
2475	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2476		cmd->src_m = (struct mbuf *)crp->crp_buf;
2477		cmd->dst_m = (struct mbuf *)crp->crp_buf;
2478	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2479		cmd->src_io = (struct uio *)crp->crp_buf;
2480		cmd->dst_io = (struct uio *)crp->crp_buf;
2481	} else {
2482		err = EINVAL;
2483		goto errout;	/* XXX we don't handle contiguous buffers! */
2484	}
2485
2486	crd1 = crp->crp_desc;
2487	if (crd1 == NULL) {
2488		err = EINVAL;
2489		goto errout;
2490	}
2491	crd2 = crd1->crd_next;
2492
2493	if (crd2 == NULL) {
2494		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2495		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2496		    crd1->crd_alg == CRYPTO_SHA1 ||
2497		    crd1->crd_alg == CRYPTO_MD5) {
2498			maccrd = crd1;
2499			enccrd = NULL;
2500		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2501		    crd1->crd_alg == CRYPTO_3DES_CBC ||
2502		    crd1->crd_alg == CRYPTO_AES_CBC ||
2503		    crd1->crd_alg == CRYPTO_ARC4) {
2504			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2505				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2506			maccrd = NULL;
2507			enccrd = crd1;
2508		} else {
2509			err = EINVAL;
2510			goto errout;
2511		}
2512	} else {
2513		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2514                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2515                     crd1->crd_alg == CRYPTO_MD5 ||
2516                     crd1->crd_alg == CRYPTO_SHA1) &&
2517		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2518		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2519		     crd2->crd_alg == CRYPTO_AES_CBC ||
2520		     crd2->crd_alg == CRYPTO_ARC4) &&
2521		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2522			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2523			maccrd = crd1;
2524			enccrd = crd2;
2525		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2526		     crd1->crd_alg == CRYPTO_ARC4 ||
2527		     crd1->crd_alg == CRYPTO_3DES_CBC ||
2528		     crd1->crd_alg == CRYPTO_AES_CBC) &&
2529		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2530                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2531                     crd2->crd_alg == CRYPTO_MD5 ||
2532                     crd2->crd_alg == CRYPTO_SHA1) &&
2533		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
2534			enccrd = crd1;
2535			maccrd = crd2;
2536		} else {
2537			/*
2538			 * We cannot order the 7751 as requested
2539			 */
2540			err = EINVAL;
2541			goto errout;
2542		}
2543	}
2544
2545	if (enccrd) {
2546		cmd->enccrd = enccrd;
2547		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2548		switch (enccrd->crd_alg) {
2549		case CRYPTO_ARC4:
2550			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2551			break;
2552		case CRYPTO_DES_CBC:
2553			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2554			    HIFN_CRYPT_CMD_MODE_CBC |
2555			    HIFN_CRYPT_CMD_NEW_IV;
2556			break;
2557		case CRYPTO_3DES_CBC:
2558			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2559			    HIFN_CRYPT_CMD_MODE_CBC |
2560			    HIFN_CRYPT_CMD_NEW_IV;
2561			break;
2562		case CRYPTO_AES_CBC:
2563			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2564			    HIFN_CRYPT_CMD_MODE_CBC |
2565			    HIFN_CRYPT_CMD_NEW_IV;
2566			break;
2567		default:
2568			err = EINVAL;
2569			goto errout;
2570		}
2571		if (enccrd->crd_alg != CRYPTO_ARC4) {
2572			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2573				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2574			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2575				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2576					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2577				else
2578					bcopy(sc->sc_sessions[session].hs_iv,
2579					    cmd->iv, ivlen);
2580
2581				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2582				    == 0) {
2583					crypto_copyback(crp->crp_flags,
2584					    crp->crp_buf, enccrd->crd_inject,
2585					    ivlen, cmd->iv);
2586				}
2587			} else {
2588				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2589					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2590				else {
2591					crypto_copydata(crp->crp_flags,
2592					    crp->crp_buf, enccrd->crd_inject,
2593					    ivlen, cmd->iv);
2594				}
2595			}
2596		}
2597
2598		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2599			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2600		cmd->ck = enccrd->crd_key;
2601		cmd->cklen = enccrd->crd_klen >> 3;
2602		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2603
2604		/*
2605		 * Need to specify the size for the AES key in the masks.
2606		 */
2607		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2608		    HIFN_CRYPT_CMD_ALG_AES) {
2609			switch (cmd->cklen) {
2610			case 16:
2611				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2612				break;
2613			case 24:
2614				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2615				break;
2616			case 32:
2617				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2618				break;
2619			default:
2620				err = EINVAL;
2621				goto errout;
2622			}
2623		}
2624	}
2625
2626	if (maccrd) {
2627		cmd->maccrd = maccrd;
2628		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2629
2630		switch (maccrd->crd_alg) {
2631		case CRYPTO_MD5:
2632			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2633			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2634			    HIFN_MAC_CMD_POS_IPSEC;
2635                       break;
2636		case CRYPTO_MD5_HMAC:
2637			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2638			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2639			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2640			break;
2641		case CRYPTO_SHA1:
2642			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2643			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2644			    HIFN_MAC_CMD_POS_IPSEC;
2645			break;
2646		case CRYPTO_SHA1_HMAC:
2647			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2648			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2649			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2650			break;
2651		}
2652
2653		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2654		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2655			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2656			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2657			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2658			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2659		}
2660	}
2661
2662	cmd->crp = crp;
2663	cmd->session_num = session;
2664	cmd->softc = sc;
2665
2666	err = hifn_crypto(sc, cmd, crp, hint);
2667	if (!err) {
2668		return 0;
2669	} else if (err == ERESTART) {
2670		/*
2671		 * There weren't enough resources to dispatch the request
2672		 * to the part.  Notify the caller so they'll requeue this
2673		 * request and resubmit it again soon.
2674		 */
2675#ifdef HIFN_DEBUG
2676		if (hifn_debug)
2677			device_printf(sc->sc_dev, "requeue request\n");
2678#endif
2679		free(cmd, M_DEVBUF);
2680		sc->sc_needwakeup |= CRYPTO_SYMQ;
2681		return (err);
2682	}
2683
2684errout:
2685	if (cmd != NULL)
2686		free(cmd, M_DEVBUF);
2687	if (err == EINVAL)
2688		hifnstats.hst_invalid++;
2689	else
2690		hifnstats.hst_nomem++;
2691	crp->crp_etype = err;
2692	crypto_done(crp);
2693	return (err);
2694}
2695
2696static void
2697hifn_abort(struct hifn_softc *sc)
2698{
2699	struct hifn_dma *dma = sc->sc_dma;
2700	struct hifn_command *cmd;
2701	struct cryptop *crp;
2702	int i, u;
2703
2704	i = sc->sc_resk; u = sc->sc_resu;
2705	while (u != 0) {
2706		cmd = sc->sc_hifn_commands[i];
2707		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2708		sc->sc_hifn_commands[i] = NULL;
2709		crp = cmd->crp;
2710
2711		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2712			/* Salvage what we can. */
2713			u_int8_t *macbuf;
2714
2715			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2716				macbuf = dma->result_bufs[i];
2717				macbuf += 12;
2718			} else
2719				macbuf = NULL;
2720			hifnstats.hst_opackets++;
2721			hifn_callback(sc, cmd, macbuf);
2722		} else {
2723			if (cmd->src_map == cmd->dst_map) {
2724				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2725				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2726			} else {
2727				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2728				    BUS_DMASYNC_POSTWRITE);
2729				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2730				    BUS_DMASYNC_POSTREAD);
2731			}
2732
2733			if (cmd->src_m != cmd->dst_m) {
2734				m_freem(cmd->src_m);
2735				crp->crp_buf = (caddr_t)cmd->dst_m;
2736			}
2737
2738			/* non-shared buffers cannot be restarted */
2739			if (cmd->src_map != cmd->dst_map) {
2740				/*
2741				 * XXX should be EAGAIN, delayed until
2742				 * after the reset.
2743				 */
2744				crp->crp_etype = ENOMEM;
2745				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2746				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2747			} else
2748				crp->crp_etype = ENOMEM;
2749
2750			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2751			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2752
2753			free(cmd, M_DEVBUF);
2754			if (crp->crp_etype != EAGAIN)
2755				crypto_done(crp);
2756		}
2757
2758		if (++i == HIFN_D_RES_RSIZE)
2759			i = 0;
2760		u--;
2761	}
2762	sc->sc_resk = i; sc->sc_resu = u;
2763
2764	hifn_reset_board(sc, 1);
2765	hifn_init_dma(sc);
2766	hifn_init_pci_registers(sc);
2767}
2768
2769static void
2770hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2771{
2772	struct hifn_dma *dma = sc->sc_dma;
2773	struct cryptop *crp = cmd->crp;
2774	struct cryptodesc *crd;
2775	struct mbuf *m;
2776	int totlen, i, u, ivlen;
2777
2778	if (cmd->src_map == cmd->dst_map) {
2779		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2780		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2781	} else {
2782		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2783		    BUS_DMASYNC_POSTWRITE);
2784		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2785		    BUS_DMASYNC_POSTREAD);
2786	}
2787
2788	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2789		if (cmd->src_m != cmd->dst_m) {
2790			crp->crp_buf = (caddr_t)cmd->dst_m;
2791			totlen = cmd->src_mapsize;
2792			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2793				if (totlen < m->m_len) {
2794					m->m_len = totlen;
2795					totlen = 0;
2796				} else
2797					totlen -= m->m_len;
2798			}
2799			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2800			m_freem(cmd->src_m);
2801		}
2802	}
2803
2804	if (cmd->sloplen != 0) {
2805		crypto_copyback(crp->crp_flags, crp->crp_buf,
2806		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2807		    (caddr_t)&dma->slop[cmd->slopidx]);
2808	}
2809
2810	i = sc->sc_dstk; u = sc->sc_dstu;
2811	while (u != 0) {
2812		if (i == HIFN_D_DST_RSIZE)
2813			i = 0;
2814		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2815		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2816		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2817			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2818			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2819			break;
2820		}
2821		i++, u--;
2822	}
2823	sc->sc_dstk = i; sc->sc_dstu = u;
2824
2825	hifnstats.hst_obytes += cmd->dst_mapsize;
2826
2827	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2828	    HIFN_BASE_CMD_CRYPT) {
2829		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2830			if (crd->crd_alg != CRYPTO_DES_CBC &&
2831			    crd->crd_alg != CRYPTO_3DES_CBC &&
2832			    crd->crd_alg != CRYPTO_AES_CBC)
2833				continue;
2834			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2835				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2836			crypto_copydata(crp->crp_flags, crp->crp_buf,
2837			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
2838			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2839			break;
2840		}
2841	}
2842
2843	if (macbuf != NULL) {
2844		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2845                        int len;
2846
2847			if (crd->crd_alg != CRYPTO_MD5 &&
2848			    crd->crd_alg != CRYPTO_SHA1 &&
2849			    crd->crd_alg != CRYPTO_MD5_HMAC &&
2850			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
2851				continue;
2852			}
2853			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2854			crypto_copyback(crp->crp_flags, crp->crp_buf,
2855			    crd->crd_inject, len, macbuf);
2856			break;
2857		}
2858	}
2859
2860	if (cmd->src_map != cmd->dst_map) {
2861		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2862		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2863	}
2864	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2865	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2866	free(cmd, M_DEVBUF);
2867	crypto_done(crp);
2868}
2869
2870/*
2871 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2872 * and Group 1 registers; avoid conditions that could create
2873 * burst writes by doing a read in between the writes.
2874 *
2875 * NB: The read we interpose is always to the same register;
2876 *     we do this because reading from an arbitrary (e.g. last)
2877 *     register may not always work.
2878 */
2879static void
2880hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2881{
2882	if (sc->sc_flags & HIFN_IS_7811) {
2883		if (sc->sc_bar0_lastreg == reg - 4)
2884			bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2885		sc->sc_bar0_lastreg = reg;
2886	}
2887	bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2888}
2889
2890static void
2891hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2892{
2893	if (sc->sc_flags & HIFN_IS_7811) {
2894		if (sc->sc_bar1_lastreg == reg - 4)
2895			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2896		sc->sc_bar1_lastreg = reg;
2897	}
2898	bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2899}
2900
2901#ifdef HIFN_VULCANDEV
2902/*
2903 * this code provides support for mapping the PK engine's register
2904 * into a userspace program.
2905 *
2906 */
2907static int
2908vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2909	      vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2910{
2911	struct hifn_softc *sc;
2912	vm_paddr_t pd;
2913	void *b;
2914
2915	sc = dev->si_drv1;
2916
2917	pd = rman_get_start(sc->sc_bar1res);
2918	b = rman_get_virtual(sc->sc_bar1res);
2919
2920#if 0
2921	printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2922	    (unsigned long long)pd, offset);
2923	hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2924#endif
2925
2926	if (offset == 0) {
2927		*paddr = pd;
2928		return (0);
2929	}
2930	return (-1);
2931}
2932
2933static struct cdevsw vulcanpk_cdevsw = {
2934	.d_version =	D_VERSION,
2935	.d_mmap =	vulcanpk_mmap,
2936	.d_name =	"vulcanpk",
2937};
2938#endif /* HIFN_VULCANDEV */
2939