hifn7751.c revision 213091
1/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
2
3/*-
4 * Invertex AEON / Hifn 7751 driver
5 * Copyright (c) 1999 Invertex Inc. All rights reserved.
6 * Copyright (c) 1999 Theo de Raadt
7 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8 *			http://www.netsec.net
9 * Copyright (c) 2003 Hifn Inc.
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested:  Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 *   notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *   notice, this list of conditions and the following disclaimer in the
23 *   documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 *   derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/dev/hifn/hifn7751.c 213091 2010-09-24 04:46:46Z gonzo $");
45
46/*
47 * Driver for various Hifn encryption processors.
48 */
49#include "opt_hifn.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/proc.h>
54#include <sys/errno.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/module.h>
58#include <sys/mbuf.h>
59#include <sys/lock.h>
60#include <sys/mutex.h>
61#include <sys/sysctl.h>
62
63#include <vm/vm.h>
64#include <vm/pmap.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <sys/bus.h>
69#include <sys/rman.h>
70
71#include <opencrypto/cryptodev.h>
72#include <sys/random.h>
73#include <sys/kobj.h>
74
75#include "cryptodev_if.h"
76
77#include <dev/pci/pcivar.h>
78#include <dev/pci/pcireg.h>
79
80#ifdef HIFN_RNDTEST
81#include <dev/rndtest/rndtest.h>
82#endif
83#include <dev/hifn/hifn7751reg.h>
84#include <dev/hifn/hifn7751var.h>
85
86#ifdef HIFN_VULCANDEV
87#include <sys/conf.h>
88#include <sys/uio.h>
89
90static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
91#endif
92
93/*
94 * Prototypes and count for the pci_device structure
95 */
96static	int hifn_probe(device_t);
97static	int hifn_attach(device_t);
98static	int hifn_detach(device_t);
99static	int hifn_suspend(device_t);
100static	int hifn_resume(device_t);
101static	int hifn_shutdown(device_t);
102
103static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
104static	int hifn_freesession(device_t, u_int64_t);
105static	int hifn_process(device_t, struct cryptop *, int);
106
107static device_method_t hifn_methods[] = {
108	/* Device interface */
109	DEVMETHOD(device_probe,		hifn_probe),
110	DEVMETHOD(device_attach,	hifn_attach),
111	DEVMETHOD(device_detach,	hifn_detach),
112	DEVMETHOD(device_suspend,	hifn_suspend),
113	DEVMETHOD(device_resume,	hifn_resume),
114	DEVMETHOD(device_shutdown,	hifn_shutdown),
115
116	/* bus interface */
117	DEVMETHOD(bus_print_child,	bus_generic_print_child),
118	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
119
120	/* crypto device methods */
121	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
122	DEVMETHOD(cryptodev_freesession,hifn_freesession),
123	DEVMETHOD(cryptodev_process,	hifn_process),
124
125	{ 0, 0 }
126};
127static driver_t hifn_driver = {
128	"hifn",
129	hifn_methods,
130	sizeof (struct hifn_softc)
131};
132static devclass_t hifn_devclass;
133
134DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
135MODULE_DEPEND(hifn, crypto, 1, 1, 1);
136#ifdef HIFN_RNDTEST
137MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
138#endif
139
140static	void hifn_reset_board(struct hifn_softc *, int);
141static	void hifn_reset_puc(struct hifn_softc *);
142static	void hifn_puc_wait(struct hifn_softc *);
143static	int hifn_enable_crypto(struct hifn_softc *);
144static	void hifn_set_retry(struct hifn_softc *sc);
145static	void hifn_init_dma(struct hifn_softc *);
146static	void hifn_init_pci_registers(struct hifn_softc *);
147static	int hifn_sramsize(struct hifn_softc *);
148static	int hifn_dramsize(struct hifn_softc *);
149static	int hifn_ramtype(struct hifn_softc *);
150static	void hifn_sessions(struct hifn_softc *);
151static	void hifn_intr(void *);
152static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
153static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
154static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
155static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
156static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
157static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
158static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
159static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
160static	int hifn_init_pubrng(struct hifn_softc *);
161static	void hifn_rng(void *);
162static	void hifn_tick(void *);
163static	void hifn_abort(struct hifn_softc *);
164static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
165
166static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
167static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
168
169static __inline u_int32_t
170READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
171{
172    u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
173    sc->sc_bar0_lastreg = (bus_size_t) -1;
174    return (v);
175}
176#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
177
178static __inline u_int32_t
179READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
180{
181    u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
182    sc->sc_bar1_lastreg = (bus_size_t) -1;
183    return (v);
184}
185#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
186
187SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
188
189#ifdef HIFN_DEBUG
190static	int hifn_debug = 0;
191SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
192	    0, "control debugging msgs");
193#endif
194
195static	struct hifn_stats hifnstats;
196SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
197	    hifn_stats, "driver statistics");
198static	int hifn_maxbatch = 1;
199SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
200	    0, "max ops to batch w/o interrupt");
201
202/*
203 * Probe for a supported device.  The PCI vendor and device
204 * IDs are used to detect devices we know how to handle.
205 */
206static int
207hifn_probe(device_t dev)
208{
209	if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
210	    pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
211		return (BUS_PROBE_DEFAULT);
212	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
213	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
214	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
215	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
216	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
217	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
218		return (BUS_PROBE_DEFAULT);
219	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
220	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
221		return (BUS_PROBE_DEFAULT);
222	return (ENXIO);
223}
224
225static void
226hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
227{
228	bus_addr_t *paddr = (bus_addr_t*) arg;
229	*paddr = segs->ds_addr;
230}
231
232static const char*
233hifn_partname(struct hifn_softc *sc)
234{
235	/* XXX sprintf numbers when not decoded */
236	switch (pci_get_vendor(sc->sc_dev)) {
237	case PCI_VENDOR_HIFN:
238		switch (pci_get_device(sc->sc_dev)) {
239		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
240		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
241		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
242		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
243		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
244		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
245		}
246		return "Hifn unknown-part";
247	case PCI_VENDOR_INVERTEX:
248		switch (pci_get_device(sc->sc_dev)) {
249		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
250		}
251		return "Invertex unknown-part";
252	case PCI_VENDOR_NETSEC:
253		switch (pci_get_device(sc->sc_dev)) {
254		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
255		}
256		return "NetSec unknown-part";
257	}
258	return "Unknown-vendor unknown-part";
259}
260
261static void
262default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
263{
264	random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
265}
266
267static u_int
268checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max)
269{
270	if (v > max) {
271		device_printf(dev, "Warning, %s %u out of range, "
272			"using max %u\n", what, v, max);
273		v = max;
274	} else if (v < min) {
275		device_printf(dev, "Warning, %s %u out of range, "
276			"using min %u\n", what, v, min);
277		v = min;
278	}
279	return v;
280}
281
282/*
283 * Select PLL configuration for 795x parts.  This is complicated in
284 * that we cannot determine the optimal parameters without user input.
285 * The reference clock is derived from an external clock through a
286 * multiplier.  The external clock is either the host bus (i.e. PCI)
287 * or an external clock generator.  When using the PCI bus we assume
288 * the clock is either 33 or 66 MHz; for an external source we cannot
289 * tell the speed.
290 *
291 * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
292 * for an external source, followed by the frequency.  We calculate
293 * the appropriate multiplier and PLL register contents accordingly.
294 * When no configuration is given we default to "pci66" since that
295 * always will allow the card to work.  If a card is using the PCI
296 * bus clock and in a 33MHz slot then it will be operating at half
297 * speed until the correct information is provided.
298 *
299 * We use a default setting of "ext66" because according to Mike Ham
300 * of HiFn, almost every board in existence has an external crystal
301 * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
302 * because PCI33 can have clocks from 0 to 33Mhz, and some have
303 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
304 */
305static void
306hifn_getpllconfig(device_t dev, u_int *pll)
307{
308	const char *pllspec;
309	u_int freq, mul, fl, fh;
310	u_int32_t pllconfig;
311	char *nxt;
312
313	if (resource_string_value("hifn", device_get_unit(dev),
314	    "pllconfig", &pllspec))
315		pllspec = "ext66";
316	fl = 33, fh = 66;
317	pllconfig = 0;
318	if (strncmp(pllspec, "ext", 3) == 0) {
319		pllspec += 3;
320		pllconfig |= HIFN_PLL_REF_SEL;
321		switch (pci_get_device(dev)) {
322		case PCI_PRODUCT_HIFN_7955:
323		case PCI_PRODUCT_HIFN_7956:
324			fl = 20, fh = 100;
325			break;
326#ifdef notyet
327		case PCI_PRODUCT_HIFN_7954:
328			fl = 20, fh = 66;
329			break;
330#endif
331		}
332	} else if (strncmp(pllspec, "pci", 3) == 0)
333		pllspec += 3;
334	freq = strtoul(pllspec, &nxt, 10);
335	if (nxt == pllspec)
336		freq = 66;
337	else
338		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
339	/*
340	 * Calculate multiplier.  We target a Fck of 266 MHz,
341	 * allowing only even values, possibly rounded down.
342	 * Multipliers > 8 must set the charge pump current.
343	 */
344	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
345	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
346	if (mul > 8)
347		pllconfig |= HIFN_PLL_IS;
348	*pll = pllconfig;
349}
350
351/*
352 * Attach an interface that successfully probed.
353 */
354static int
355hifn_attach(device_t dev)
356{
357	struct hifn_softc *sc = device_get_softc(dev);
358	u_int32_t cmd;
359	caddr_t kva;
360	int rseg, rid;
361	char rbase;
362	u_int16_t ena, rev;
363
364	KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
365	bzero(sc, sizeof (*sc));
366	sc->sc_dev = dev;
367
368	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF);
369
370	/* XXX handle power management */
371
372	/*
373	 * The 7951 and 795x have a random number generator and
374	 * public key support; note this.
375	 */
376	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
377	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
378	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
379	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
380		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
381	/*
382	 * The 7811 has a random number generator and
383	 * we also note it's identity 'cuz of some quirks.
384	 */
385	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
386	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
387		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
388
389	/*
390	 * The 795x parts support AES.
391	 */
392	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
393	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
394	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
395		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
396		/*
397		 * Select PLL configuration.  This depends on the
398		 * bus and board design and must be manually configured
399		 * if the default setting is unacceptable.
400		 */
401		hifn_getpllconfig(dev, &sc->sc_pllconfig);
402	}
403
404	/*
405	 * Configure support for memory-mapped access to
406	 * registers and for DMA operations.
407	 */
408#define	PCIM_ENA	(PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
409	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
410	cmd |= PCIM_ENA;
411	pci_write_config(dev, PCIR_COMMAND, cmd, 4);
412	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
413	if ((cmd & PCIM_ENA) != PCIM_ENA) {
414		device_printf(dev, "failed to enable %s\n",
415			(cmd & PCIM_ENA) == 0 ?
416				"memory mapping & bus mastering" :
417			(cmd & PCIM_CMD_MEMEN) == 0 ?
418				"memory mapping" : "bus mastering");
419		goto fail_pci;
420	}
421#undef PCIM_ENA
422
423	/*
424	 * Setup PCI resources. Note that we record the bus
425	 * tag and handle for each register mapping, this is
426	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
427	 * and WRITE_REG_1 macros throughout the driver.
428	 */
429	rid = HIFN_BAR0;
430	sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
431			 			RF_ACTIVE);
432	if (sc->sc_bar0res == NULL) {
433		device_printf(dev, "cannot map bar%d register space\n", 0);
434		goto fail_pci;
435	}
436	sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
437	sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
438	sc->sc_bar0_lastreg = (bus_size_t) -1;
439
440	rid = HIFN_BAR1;
441	sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
442						RF_ACTIVE);
443	if (sc->sc_bar1res == NULL) {
444		device_printf(dev, "cannot map bar%d register space\n", 1);
445		goto fail_io0;
446	}
447	sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
448	sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
449	sc->sc_bar1_lastreg = (bus_size_t) -1;
450
451	hifn_set_retry(sc);
452
453	/*
454	 * Setup the area where the Hifn DMA's descriptors
455	 * and associated data structures.
456	 */
457	if (bus_dma_tag_create(NULL,			/* parent */
458			       1, 0,			/* alignment,boundary */
459			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
460			       BUS_SPACE_MAXADDR,	/* highaddr */
461			       NULL, NULL,		/* filter, filterarg */
462			       HIFN_MAX_DMALEN,		/* maxsize */
463			       MAX_SCATTER,		/* nsegments */
464			       HIFN_MAX_SEGLEN,		/* maxsegsize */
465			       BUS_DMA_ALLOCNOW,	/* flags */
466			       NULL,			/* lockfunc */
467			       NULL,			/* lockarg */
468			       &sc->sc_dmat)) {
469		device_printf(dev, "cannot allocate DMA tag\n");
470		goto fail_io1;
471	}
472	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
473		device_printf(dev, "cannot create dma map\n");
474		bus_dma_tag_destroy(sc->sc_dmat);
475		goto fail_io1;
476	}
477	if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
478		device_printf(dev, "cannot alloc dma buffer\n");
479		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
480		bus_dma_tag_destroy(sc->sc_dmat);
481		goto fail_io1;
482	}
483	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
484			     sizeof (*sc->sc_dma),
485			     hifn_dmamap_cb, &sc->sc_dma_physaddr,
486			     BUS_DMA_NOWAIT)) {
487		device_printf(dev, "cannot load dma map\n");
488		bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
489		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
490		bus_dma_tag_destroy(sc->sc_dmat);
491		goto fail_io1;
492	}
493	sc->sc_dma = (struct hifn_dma *)kva;
494	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
495
496	KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!"));
497	KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!"));
498	KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!"));
499	KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!"));
500
501	/*
502	 * Reset the board and do the ``secret handshake''
503	 * to enable the crypto support.  Then complete the
504	 * initialization procedure by setting up the interrupt
505	 * and hooking in to the system crypto support so we'll
506	 * get used for system services like the crypto device,
507	 * IPsec, RNG device, etc.
508	 */
509	hifn_reset_board(sc, 0);
510
511	if (hifn_enable_crypto(sc) != 0) {
512		device_printf(dev, "crypto enabling failed\n");
513		goto fail_mem;
514	}
515	hifn_reset_puc(sc);
516
517	hifn_init_dma(sc);
518	hifn_init_pci_registers(sc);
519
520	/* XXX can't dynamically determine ram type for 795x; force dram */
521	if (sc->sc_flags & HIFN_IS_7956)
522		sc->sc_drammodel = 1;
523	else if (hifn_ramtype(sc))
524		goto fail_mem;
525
526	if (sc->sc_drammodel == 0)
527		hifn_sramsize(sc);
528	else
529		hifn_dramsize(sc);
530
531	/*
532	 * Workaround for NetSec 7751 rev A: half ram size because two
533	 * of the address lines were left floating
534	 */
535	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
536	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
537	    pci_get_revid(dev) == 0x61)	/*XXX???*/
538		sc->sc_ramsize >>= 1;
539
540	/*
541	 * Arrange the interrupt line.
542	 */
543	rid = 0;
544	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
545					    RF_SHAREABLE|RF_ACTIVE);
546	if (sc->sc_irq == NULL) {
547		device_printf(dev, "could not map interrupt\n");
548		goto fail_mem;
549	}
550	/*
551	 * NB: Network code assumes we are blocked with splimp()
552	 *     so make sure the IRQ is marked appropriately.
553	 */
554	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
555			   NULL, hifn_intr, sc, &sc->sc_intrhand)) {
556		device_printf(dev, "could not setup interrupt\n");
557		goto fail_intr2;
558	}
559
560	hifn_sessions(sc);
561
562	/*
563	 * NB: Keep only the low 16 bits; this masks the chip id
564	 *     from the 7951.
565	 */
566	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
567
568	rseg = sc->sc_ramsize / 1024;
569	rbase = 'K';
570	if (sc->sc_ramsize >= (1024 * 1024)) {
571		rbase = 'M';
572		rseg /= 1024;
573	}
574	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
575		hifn_partname(sc), rev,
576		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
577	if (sc->sc_flags & HIFN_IS_7956)
578		printf(", pll=0x%x<%s clk, %ux mult>",
579			sc->sc_pllconfig,
580			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
581			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
582	printf("\n");
583
584	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
585	if (sc->sc_cid < 0) {
586		device_printf(dev, "could not get crypto driver id\n");
587		goto fail_intr;
588	}
589
590	WRITE_REG_0(sc, HIFN_0_PUCNFG,
591	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
592	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
593
594	switch (ena) {
595	case HIFN_PUSTAT_ENA_2:
596		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
597		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
598		if (sc->sc_flags & HIFN_HAS_AES)
599			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
600		/*FALLTHROUGH*/
601	case HIFN_PUSTAT_ENA_1:
602		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
603		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
604		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
605		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
606		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
607		break;
608	}
609
610	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
611	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
612
613	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
614		hifn_init_pubrng(sc);
615
616	callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
617	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
618
619	return (0);
620
621fail_intr:
622	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
623fail_intr2:
624	/* XXX don't store rid */
625	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
626fail_mem:
627	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
628	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
629	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
630	bus_dma_tag_destroy(sc->sc_dmat);
631
632	/* Turn off DMA polling */
633	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
634	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
635fail_io1:
636	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
637fail_io0:
638	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
639fail_pci:
640	mtx_destroy(&sc->sc_mtx);
641	return (ENXIO);
642}
643
644/*
645 * Detach an interface that successfully probed.
646 */
647static int
648hifn_detach(device_t dev)
649{
650	struct hifn_softc *sc = device_get_softc(dev);
651
652	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
653
654	/* disable interrupts */
655	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
656
657	/*XXX other resources */
658	callout_stop(&sc->sc_tickto);
659	callout_stop(&sc->sc_rngto);
660#ifdef HIFN_RNDTEST
661	if (sc->sc_rndtest)
662		rndtest_detach(sc->sc_rndtest);
663#endif
664
665	/* Turn off DMA polling */
666	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
667	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
668
669	crypto_unregister_all(sc->sc_cid);
670
671	bus_generic_detach(dev);	/*XXX should be no children, right? */
672
673	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
674	/* XXX don't store rid */
675	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
676
677	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
678	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
679	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
680	bus_dma_tag_destroy(sc->sc_dmat);
681
682	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
683	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
684
685	mtx_destroy(&sc->sc_mtx);
686
687	return (0);
688}
689
690/*
691 * Stop all chip I/O so that the kernel's probe routines don't
692 * get confused by errant DMAs when rebooting.
693 */
694static int
695hifn_shutdown(device_t dev)
696{
697#ifdef notyet
698	hifn_stop(device_get_softc(dev));
699#endif
700	return (0);
701}
702
703/*
704 * Device suspend routine.  Stop the interface and save some PCI
705 * settings in case the BIOS doesn't restore them properly on
706 * resume.
707 */
708static int
709hifn_suspend(device_t dev)
710{
711	struct hifn_softc *sc = device_get_softc(dev);
712#ifdef notyet
713	hifn_stop(sc);
714#endif
715	sc->sc_suspended = 1;
716
717	return (0);
718}
719
720/*
721 * Device resume routine.  Restore some PCI settings in case the BIOS
722 * doesn't, re-enable busmastering, and restart the interface if
723 * appropriate.
724 */
725static int
726hifn_resume(device_t dev)
727{
728	struct hifn_softc *sc = device_get_softc(dev);
729#ifdef notyet
730	/* reenable busmastering */
731	pci_enable_busmaster(dev);
732	pci_enable_io(dev, HIFN_RES);
733
734        /* reinitialize interface if necessary */
735        if (ifp->if_flags & IFF_UP)
736                rl_init(sc);
737#endif
738	sc->sc_suspended = 0;
739
740	return (0);
741}
742
743static int
744hifn_init_pubrng(struct hifn_softc *sc)
745{
746	u_int32_t r;
747	int i;
748
749#ifdef HIFN_RNDTEST
750	sc->sc_rndtest = rndtest_attach(sc->sc_dev);
751	if (sc->sc_rndtest)
752		sc->sc_harvest = rndtest_harvest;
753	else
754		sc->sc_harvest = default_harvest;
755#else
756	sc->sc_harvest = default_harvest;
757#endif
758	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
759		/* Reset 7951 public key/rng engine */
760		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
761		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
762
763		for (i = 0; i < 100; i++) {
764			DELAY(1000);
765			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
766			    HIFN_PUBRST_RESET) == 0)
767				break;
768		}
769
770		if (i == 100) {
771			device_printf(sc->sc_dev, "public key init failed\n");
772			return (1);
773		}
774	}
775
776	/* Enable the rng, if available */
777	if (sc->sc_flags & HIFN_HAS_RNG) {
778		if (sc->sc_flags & HIFN_IS_7811) {
779			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
780			if (r & HIFN_7811_RNGENA_ENA) {
781				r &= ~HIFN_7811_RNGENA_ENA;
782				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
783			}
784			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
785			    HIFN_7811_RNGCFG_DEFL);
786			r |= HIFN_7811_RNGENA_ENA;
787			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
788		} else
789			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
790			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
791			    HIFN_RNGCFG_ENA);
792
793		sc->sc_rngfirst = 1;
794		if (hz >= 100)
795			sc->sc_rnghz = hz / 100;
796		else
797			sc->sc_rnghz = 1;
798		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
799		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
800	}
801
802	/* Enable public key engine, if available */
803	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
804		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
805		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
806		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
807#ifdef HIFN_VULCANDEV
808		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
809					UID_ROOT, GID_WHEEL, 0666,
810					"vulcanpk");
811		sc->sc_pkdev->si_drv1 = sc;
812#endif
813	}
814
815	return (0);
816}
817
818static void
819hifn_rng(void *vsc)
820{
821#define	RANDOM_BITS(n)	(n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
822	struct hifn_softc *sc = vsc;
823	u_int32_t sts, num[2];
824	int i;
825
826	if (sc->sc_flags & HIFN_IS_7811) {
827		/* ONLY VALID ON 7811!!!! */
828		for (i = 0; i < 5; i++) {
829			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
830			if (sts & HIFN_7811_RNGSTS_UFL) {
831				device_printf(sc->sc_dev,
832					      "RNG underflow: disabling\n");
833				return;
834			}
835			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
836				break;
837
838			/*
839			 * There are at least two words in the RNG FIFO
840			 * at this point.
841			 */
842			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
843			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
844			/* NB: discard first data read */
845			if (sc->sc_rngfirst)
846				sc->sc_rngfirst = 0;
847			else
848				(*sc->sc_harvest)(sc->sc_rndtest,
849					num, sizeof (num));
850		}
851	} else {
852		num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
853
854		/* NB: discard first data read */
855		if (sc->sc_rngfirst)
856			sc->sc_rngfirst = 0;
857		else
858			(*sc->sc_harvest)(sc->sc_rndtest,
859				num, sizeof (num[0]));
860	}
861
862	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
863#undef RANDOM_BITS
864}
865
866static void
867hifn_puc_wait(struct hifn_softc *sc)
868{
869	int i;
870	int reg = HIFN_0_PUCTRL;
871
872	if (sc->sc_flags & HIFN_IS_7956) {
873		reg = HIFN_0_PUCTRL2;
874	}
875
876	for (i = 5000; i > 0; i--) {
877		DELAY(1);
878		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
879			break;
880	}
881	if (!i)
882		device_printf(sc->sc_dev, "proc unit did not reset\n");
883}
884
885/*
886 * Reset the processing unit.
887 */
888static void
889hifn_reset_puc(struct hifn_softc *sc)
890{
891	/* Reset processing unit */
892	int reg = HIFN_0_PUCTRL;
893
894	if (sc->sc_flags & HIFN_IS_7956) {
895		reg = HIFN_0_PUCTRL2;
896	}
897	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
898
899	hifn_puc_wait(sc);
900}
901
902/*
903 * Set the Retry and TRDY registers; note that we set them to
904 * zero because the 7811 locks up when forced to retry (section
905 * 3.6 of "Specification Update SU-0014-04".  Not clear if we
906 * should do this for all Hifn parts, but it doesn't seem to hurt.
907 */
908static void
909hifn_set_retry(struct hifn_softc *sc)
910{
911	/* NB: RETRY only responds to 8-bit reads/writes */
912	pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
913	pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
914}
915
916/*
917 * Resets the board.  Values in the regesters are left as is
918 * from the reset (i.e. initial values are assigned elsewhere).
919 */
920static void
921hifn_reset_board(struct hifn_softc *sc, int full)
922{
923	u_int32_t reg;
924
925	/*
926	 * Set polling in the DMA configuration register to zero.  0x7 avoids
927	 * resetting the board and zeros out the other fields.
928	 */
929	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
930	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
931
932	/*
933	 * Now that polling has been disabled, we have to wait 1 ms
934	 * before resetting the board.
935	 */
936	DELAY(1000);
937
938	/* Reset the DMA unit */
939	if (full) {
940		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
941		DELAY(1000);
942	} else {
943		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
944		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
945		hifn_reset_puc(sc);
946	}
947
948	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
949	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
950
951	/* Bring dma unit out of reset */
952	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
953	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
954
955	hifn_puc_wait(sc);
956	hifn_set_retry(sc);
957
958	if (sc->sc_flags & HIFN_IS_7811) {
959		for (reg = 0; reg < 1000; reg++) {
960			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
961			    HIFN_MIPSRST_CRAMINIT)
962				break;
963			DELAY(1000);
964		}
965		if (reg == 1000)
966			printf(": cram init timeout\n");
967	} else {
968	  /* set up DMA configuration register #2 */
969	  /* turn off all PK and BAR0 swaps */
970	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
971		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
972		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
973		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
974		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
975	}
976
977}
978
979static u_int32_t
980hifn_next_signature(u_int32_t a, u_int cnt)
981{
982	int i;
983	u_int32_t v;
984
985	for (i = 0; i < cnt; i++) {
986
987		/* get the parity */
988		v = a & 0x80080125;
989		v ^= v >> 16;
990		v ^= v >> 8;
991		v ^= v >> 4;
992		v ^= v >> 2;
993		v ^= v >> 1;
994
995		a = (v & 1) ^ (a << 1);
996	}
997
998	return a;
999}
1000
1001struct pci2id {
1002	u_short		pci_vendor;
1003	u_short		pci_prod;
1004	char		card_id[13];
1005};
1006static struct pci2id pci2id[] = {
1007	{
1008		PCI_VENDOR_HIFN,
1009		PCI_PRODUCT_HIFN_7951,
1010		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1011		  0x00, 0x00, 0x00, 0x00, 0x00 }
1012	}, {
1013		PCI_VENDOR_HIFN,
1014		PCI_PRODUCT_HIFN_7955,
1015		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1016		  0x00, 0x00, 0x00, 0x00, 0x00 }
1017	}, {
1018		PCI_VENDOR_HIFN,
1019		PCI_PRODUCT_HIFN_7956,
1020		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021		  0x00, 0x00, 0x00, 0x00, 0x00 }
1022	}, {
1023		PCI_VENDOR_NETSEC,
1024		PCI_PRODUCT_NETSEC_7751,
1025		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1026		  0x00, 0x00, 0x00, 0x00, 0x00 }
1027	}, {
1028		PCI_VENDOR_INVERTEX,
1029		PCI_PRODUCT_INVERTEX_AEON,
1030		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1031		  0x00, 0x00, 0x00, 0x00, 0x00 }
1032	}, {
1033		PCI_VENDOR_HIFN,
1034		PCI_PRODUCT_HIFN_7811,
1035		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036		  0x00, 0x00, 0x00, 0x00, 0x00 }
1037	}, {
1038		/*
1039		 * Other vendors share this PCI ID as well, such as
1040		 * http://www.powercrypt.com, and obviously they also
1041		 * use the same key.
1042		 */
1043		PCI_VENDOR_HIFN,
1044		PCI_PRODUCT_HIFN_7751,
1045		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046		  0x00, 0x00, 0x00, 0x00, 0x00 }
1047	},
1048};
1049
1050/*
1051 * Checks to see if crypto is already enabled.  If crypto isn't enable,
1052 * "hifn_enable_crypto" is called to enable it.  The check is important,
1053 * as enabling crypto twice will lock the board.
1054 */
1055static int
1056hifn_enable_crypto(struct hifn_softc *sc)
1057{
1058	u_int32_t dmacfg, ramcfg, encl, addr, i;
1059	char *offtbl = NULL;
1060
1061	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
1062		if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
1063		    pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
1064			offtbl = pci2id[i].card_id;
1065			break;
1066		}
1067	}
1068	if (offtbl == NULL) {
1069		device_printf(sc->sc_dev, "Unknown card!\n");
1070		return (1);
1071	}
1072
1073	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1074	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
1075
1076	/*
1077	 * The RAM config register's encrypt level bit needs to be set before
1078	 * every read performed on the encryption level register.
1079	 */
1080	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1081
1082	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1083
1084	/*
1085	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
1086	 * next reboot.
1087	 */
1088	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
1089#ifdef HIFN_DEBUG
1090		if (hifn_debug)
1091			device_printf(sc->sc_dev,
1092			    "Strong crypto already enabled!\n");
1093#endif
1094		goto report;
1095	}
1096
1097	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
1098#ifdef HIFN_DEBUG
1099		if (hifn_debug)
1100			device_printf(sc->sc_dev,
1101			      "Unknown encryption level 0x%x\n", encl);
1102#endif
1103		return 1;
1104	}
1105
1106	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
1107	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
1108	DELAY(1000);
1109	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
1110	DELAY(1000);
1111	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1112	DELAY(1000);
1113
1114	for (i = 0; i <= 12; i++) {
1115		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1116		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1117
1118		DELAY(1000);
1119	}
1120
1121	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1122	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1123
1124#ifdef HIFN_DEBUG
1125	if (hifn_debug) {
1126		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1127			device_printf(sc->sc_dev, "Engine is permanently "
1128				"locked until next system reset!\n");
1129		else
1130			device_printf(sc->sc_dev, "Engine enabled "
1131				"successfully!\n");
1132	}
1133#endif
1134
1135report:
1136	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1137	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1138
1139	switch (encl) {
1140	case HIFN_PUSTAT_ENA_1:
1141	case HIFN_PUSTAT_ENA_2:
1142		break;
1143	case HIFN_PUSTAT_ENA_0:
1144	default:
1145		device_printf(sc->sc_dev, "disabled");
1146		break;
1147	}
1148
1149	return 0;
1150}
1151
1152/*
1153 * Give initial values to the registers listed in the "Register Space"
1154 * section of the HIFN Software Development reference manual.
1155 */
1156static void
1157hifn_init_pci_registers(struct hifn_softc *sc)
1158{
1159	/* write fixed values needed by the Initialization registers */
1160	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1161	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1162	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1163
1164	/* write all 4 ring address registers */
1165	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1166	    offsetof(struct hifn_dma, cmdr[0]));
1167	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1168	    offsetof(struct hifn_dma, srcr[0]));
1169	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1170	    offsetof(struct hifn_dma, dstr[0]));
1171	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1172	    offsetof(struct hifn_dma, resr[0]));
1173
1174	DELAY(2000);
1175
1176	/* write status register */
1177	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1178	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1179	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1180	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1181	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1182	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1183	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1184	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1185	    HIFN_DMACSR_S_WAIT |
1186	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1187	    HIFN_DMACSR_C_WAIT |
1188	    HIFN_DMACSR_ENGINE |
1189	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1190		HIFN_DMACSR_PUBDONE : 0) |
1191	    ((sc->sc_flags & HIFN_IS_7811) ?
1192		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1193
1194	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1195	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1196	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1197	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1198	    ((sc->sc_flags & HIFN_IS_7811) ?
1199		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1200	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1201	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1202
1203
1204	if (sc->sc_flags & HIFN_IS_7956) {
1205		u_int32_t pll;
1206
1207		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1208		    HIFN_PUCNFG_TCALLPHASES |
1209		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1210
1211		/* turn off the clocks and insure bypass is set */
1212		pll = READ_REG_1(sc, HIFN_1_PLL);
1213		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
1214		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
1215		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1216		DELAY(10*1000);		/* 10ms */
1217
1218		/* change configuration */
1219		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
1220		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1221		DELAY(10*1000);		/* 10ms */
1222
1223		/* disable bypass */
1224		pll &= ~HIFN_PLL_BP;
1225		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1226		/* enable clocks with new configuration */
1227		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
1228		WRITE_REG_1(sc, HIFN_1_PLL, pll);
1229	} else {
1230		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1231		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1232		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1233		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1234	}
1235
1236	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1237	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1238	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1239	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1240	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1241}
1242
1243/*
1244 * The maximum number of sessions supported by the card
1245 * is dependent on the amount of context ram, which
1246 * encryption algorithms are enabled, and how compression
1247 * is configured.  This should be configured before this
1248 * routine is called.
1249 */
1250static void
1251hifn_sessions(struct hifn_softc *sc)
1252{
1253	u_int32_t pucnfg;
1254	int ctxsize;
1255
1256	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1257
1258	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1259		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1260			ctxsize = 128;
1261		else
1262			ctxsize = 512;
1263		/*
1264		 * 7955/7956 has internal context memory of 32K
1265		 */
1266		if (sc->sc_flags & HIFN_IS_7956)
1267			sc->sc_maxses = 32768 / ctxsize;
1268		else
1269			sc->sc_maxses = 1 +
1270			    ((sc->sc_ramsize - 32768) / ctxsize);
1271	} else
1272		sc->sc_maxses = sc->sc_ramsize / 16384;
1273
1274	if (sc->sc_maxses > 2048)
1275		sc->sc_maxses = 2048;
1276}
1277
1278/*
1279 * Determine ram type (sram or dram).  Board should be just out of a reset
1280 * state when this is called.
1281 */
1282static int
1283hifn_ramtype(struct hifn_softc *sc)
1284{
1285	u_int8_t data[8], dataexpect[8];
1286	int i;
1287
1288	for (i = 0; i < sizeof(data); i++)
1289		data[i] = dataexpect[i] = 0x55;
1290	if (hifn_writeramaddr(sc, 0, data))
1291		return (-1);
1292	if (hifn_readramaddr(sc, 0, data))
1293		return (-1);
1294	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1295		sc->sc_drammodel = 1;
1296		return (0);
1297	}
1298
1299	for (i = 0; i < sizeof(data); i++)
1300		data[i] = dataexpect[i] = 0xaa;
1301	if (hifn_writeramaddr(sc, 0, data))
1302		return (-1);
1303	if (hifn_readramaddr(sc, 0, data))
1304		return (-1);
1305	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1306		sc->sc_drammodel = 1;
1307		return (0);
1308	}
1309
1310	return (0);
1311}
1312
1313#define	HIFN_SRAM_MAX		(32 << 20)
1314#define	HIFN_SRAM_STEP_SIZE	16384
1315#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1316
1317static int
1318hifn_sramsize(struct hifn_softc *sc)
1319{
1320	u_int32_t a;
1321	u_int8_t data[8];
1322	u_int8_t dataexpect[sizeof(data)];
1323	int32_t i;
1324
1325	for (i = 0; i < sizeof(data); i++)
1326		data[i] = dataexpect[i] = i ^ 0x5a;
1327
1328	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1329		a = i * HIFN_SRAM_STEP_SIZE;
1330		bcopy(&i, data, sizeof(i));
1331		hifn_writeramaddr(sc, a, data);
1332	}
1333
1334	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1335		a = i * HIFN_SRAM_STEP_SIZE;
1336		bcopy(&i, dataexpect, sizeof(i));
1337		if (hifn_readramaddr(sc, a, data) < 0)
1338			return (0);
1339		if (bcmp(data, dataexpect, sizeof(data)) != 0)
1340			return (0);
1341		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1342	}
1343
1344	return (0);
1345}
1346
1347/*
1348 * XXX For dram boards, one should really try all of the
1349 * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1350 * is already set up correctly.
1351 */
1352static int
1353hifn_dramsize(struct hifn_softc *sc)
1354{
1355	u_int32_t cnfg;
1356
1357	if (sc->sc_flags & HIFN_IS_7956) {
1358		/*
1359		 * 7955/7956 have a fixed internal ram of only 32K.
1360		 */
1361		sc->sc_ramsize = 32768;
1362	} else {
1363		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1364		    HIFN_PUCNFG_DRAMMASK;
1365		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1366	}
1367	return (0);
1368}
1369
1370static void
1371hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1372{
1373	struct hifn_dma *dma = sc->sc_dma;
1374
1375	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
1376		sc->sc_cmdi = 0;
1377		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1378		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1379		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1380		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1381	}
1382	*cmdp = sc->sc_cmdi++;
1383	sc->sc_cmdk = sc->sc_cmdi;
1384
1385	if (sc->sc_srci == HIFN_D_SRC_RSIZE) {
1386		sc->sc_srci = 0;
1387		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1388		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1389		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1390		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1391	}
1392	*srcp = sc->sc_srci++;
1393	sc->sc_srck = sc->sc_srci;
1394
1395	if (sc->sc_dsti == HIFN_D_DST_RSIZE) {
1396		sc->sc_dsti = 0;
1397		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1398		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1399		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1400		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1401	}
1402	*dstp = sc->sc_dsti++;
1403	sc->sc_dstk = sc->sc_dsti;
1404
1405	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
1406		sc->sc_resi = 0;
1407		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1408		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1409		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1410		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1411	}
1412	*resp = sc->sc_resi++;
1413	sc->sc_resk = sc->sc_resi;
1414}
1415
1416static int
1417hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1418{
1419	struct hifn_dma *dma = sc->sc_dma;
1420	hifn_base_command_t wc;
1421	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1422	int r, cmdi, resi, srci, dsti;
1423
1424	wc.masks = htole16(3 << 13);
1425	wc.session_num = htole16(addr >> 14);
1426	wc.total_source_count = htole16(8);
1427	wc.total_dest_count = htole16(addr & 0x3fff);
1428
1429	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1430
1431	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1432	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1433	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1434
1435	/* build write command */
1436	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1437	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1438	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1439
1440	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1441	    + offsetof(struct hifn_dma, test_src));
1442	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1443	    + offsetof(struct hifn_dma, test_dst));
1444
1445	dma->cmdr[cmdi].l = htole32(16 | masks);
1446	dma->srcr[srci].l = htole32(8 | masks);
1447	dma->dstr[dsti].l = htole32(4 | masks);
1448	dma->resr[resi].l = htole32(4 | masks);
1449
1450	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1451	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1452
1453	for (r = 10000; r >= 0; r--) {
1454		DELAY(10);
1455		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1456		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1457		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1458			break;
1459		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1460		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1461	}
1462	if (r == 0) {
1463		device_printf(sc->sc_dev, "writeramaddr -- "
1464		    "result[%d](addr %d) still valid\n", resi, addr);
1465		r = -1;
1466		return (-1);
1467	} else
1468		r = 0;
1469
1470	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1471	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1472	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1473
1474	return (r);
1475}
1476
1477static int
1478hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1479{
1480	struct hifn_dma *dma = sc->sc_dma;
1481	hifn_base_command_t rc;
1482	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1483	int r, cmdi, srci, dsti, resi;
1484
1485	rc.masks = htole16(2 << 13);
1486	rc.session_num = htole16(addr >> 14);
1487	rc.total_source_count = htole16(addr & 0x3fff);
1488	rc.total_dest_count = htole16(8);
1489
1490	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1491
1492	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1493	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1494	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1495
1496	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1497	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1498
1499	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1500	    offsetof(struct hifn_dma, test_src));
1501	dma->test_src = 0;
1502	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1503	    offsetof(struct hifn_dma, test_dst));
1504	dma->test_dst = 0;
1505	dma->cmdr[cmdi].l = htole32(8 | masks);
1506	dma->srcr[srci].l = htole32(8 | masks);
1507	dma->dstr[dsti].l = htole32(8 | masks);
1508	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1509
1510	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1511	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1512
1513	for (r = 10000; r >= 0; r--) {
1514		DELAY(10);
1515		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1516		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1517		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1518			break;
1519		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1520		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1521	}
1522	if (r == 0) {
1523		device_printf(sc->sc_dev, "readramaddr -- "
1524		    "result[%d](addr %d) still valid\n", resi, addr);
1525		r = -1;
1526	} else {
1527		r = 0;
1528		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1529	}
1530
1531	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1532	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1533	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1534
1535	return (r);
1536}
1537
1538/*
1539 * Initialize the descriptor rings.
1540 */
1541static void
1542hifn_init_dma(struct hifn_softc *sc)
1543{
1544	struct hifn_dma *dma = sc->sc_dma;
1545	int i;
1546
1547	hifn_set_retry(sc);
1548
1549	/* initialize static pointer values */
1550	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1551		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1552		    offsetof(struct hifn_dma, command_bufs[i][0]));
1553	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1554		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1555		    offsetof(struct hifn_dma, result_bufs[i][0]));
1556
1557	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1558	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1559	dma->srcr[HIFN_D_SRC_RSIZE].p =
1560	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1561	dma->dstr[HIFN_D_DST_RSIZE].p =
1562	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1563	dma->resr[HIFN_D_RES_RSIZE].p =
1564	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1565
1566	sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0;
1567	sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0;
1568	sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0;
1569}
1570
1571/*
1572 * Writes out the raw command buffer space.  Returns the
1573 * command buffer size.
1574 */
1575static u_int
1576hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1577{
1578	u_int8_t *buf_pos;
1579	hifn_base_command_t *base_cmd;
1580	hifn_mac_command_t *mac_cmd;
1581	hifn_crypt_command_t *cry_cmd;
1582	int using_mac, using_crypt, len, ivlen;
1583	u_int32_t dlen, slen;
1584
1585	buf_pos = buf;
1586	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1587	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1588
1589	base_cmd = (hifn_base_command_t *)buf_pos;
1590	base_cmd->masks = htole16(cmd->base_masks);
1591	slen = cmd->src_mapsize;
1592	if (cmd->sloplen)
1593		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1594	else
1595		dlen = cmd->dst_mapsize;
1596	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1597	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1598	dlen >>= 16;
1599	slen >>= 16;
1600	base_cmd->session_num = htole16(
1601	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1602	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1603	buf_pos += sizeof(hifn_base_command_t);
1604
1605	if (using_mac) {
1606		mac_cmd = (hifn_mac_command_t *)buf_pos;
1607		dlen = cmd->maccrd->crd_len;
1608		mac_cmd->source_count = htole16(dlen & 0xffff);
1609		dlen >>= 16;
1610		mac_cmd->masks = htole16(cmd->mac_masks |
1611		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1612		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1613		mac_cmd->reserved = 0;
1614		buf_pos += sizeof(hifn_mac_command_t);
1615	}
1616
1617	if (using_crypt) {
1618		cry_cmd = (hifn_crypt_command_t *)buf_pos;
1619		dlen = cmd->enccrd->crd_len;
1620		cry_cmd->source_count = htole16(dlen & 0xffff);
1621		dlen >>= 16;
1622		cry_cmd->masks = htole16(cmd->cry_masks |
1623		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1624		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1625		cry_cmd->reserved = 0;
1626		buf_pos += sizeof(hifn_crypt_command_t);
1627	}
1628
1629	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1630		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1631		buf_pos += HIFN_MAC_KEY_LENGTH;
1632	}
1633
1634	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1635		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1636		case HIFN_CRYPT_CMD_ALG_3DES:
1637			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1638			buf_pos += HIFN_3DES_KEY_LENGTH;
1639			break;
1640		case HIFN_CRYPT_CMD_ALG_DES:
1641			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1642			buf_pos += HIFN_DES_KEY_LENGTH;
1643			break;
1644		case HIFN_CRYPT_CMD_ALG_RC4:
1645			len = 256;
1646			do {
1647				int clen;
1648
1649				clen = MIN(cmd->cklen, len);
1650				bcopy(cmd->ck, buf_pos, clen);
1651				len -= clen;
1652				buf_pos += clen;
1653			} while (len > 0);
1654			bzero(buf_pos, 4);
1655			buf_pos += 4;
1656			break;
1657		case HIFN_CRYPT_CMD_ALG_AES:
1658			/*
1659			 * AES keys are variable 128, 192 and
1660			 * 256 bits (16, 24 and 32 bytes).
1661			 */
1662			bcopy(cmd->ck, buf_pos, cmd->cklen);
1663			buf_pos += cmd->cklen;
1664			break;
1665		}
1666	}
1667
1668	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1669		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1670		case HIFN_CRYPT_CMD_ALG_AES:
1671			ivlen = HIFN_AES_IV_LENGTH;
1672			break;
1673		default:
1674			ivlen = HIFN_IV_LENGTH;
1675			break;
1676		}
1677		bcopy(cmd->iv, buf_pos, ivlen);
1678		buf_pos += ivlen;
1679	}
1680
1681	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1682		bzero(buf_pos, 8);
1683		buf_pos += 8;
1684	}
1685
1686	return (buf_pos - buf);
1687}
1688
1689static int
1690hifn_dmamap_aligned(struct hifn_operand *op)
1691{
1692	int i;
1693
1694	for (i = 0; i < op->nsegs; i++) {
1695		if (op->segs[i].ds_addr & 3)
1696			return (0);
1697		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1698			return (0);
1699	}
1700	return (1);
1701}
1702
1703static __inline int
1704hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
1705{
1706	struct hifn_dma *dma = sc->sc_dma;
1707
1708	if (++idx == HIFN_D_DST_RSIZE) {
1709		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1710		    HIFN_D_MASKDONEIRQ);
1711		HIFN_DSTR_SYNC(sc, idx,
1712		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1713		idx = 0;
1714	}
1715	return (idx);
1716}
1717
1718static int
1719hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1720{
1721	struct hifn_dma *dma = sc->sc_dma;
1722	struct hifn_operand *dst = &cmd->dst;
1723	u_int32_t p, l;
1724	int idx, used = 0, i;
1725
1726	idx = sc->sc_dsti;
1727	for (i = 0; i < dst->nsegs - 1; i++) {
1728		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1729		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1730		    HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1731		HIFN_DSTR_SYNC(sc, idx,
1732		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1733		used++;
1734
1735		idx = hifn_dmamap_dstwrap(sc, idx);
1736	}
1737
1738	if (cmd->sloplen == 0) {
1739		p = dst->segs[i].ds_addr;
1740		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1741		    dst->segs[i].ds_len;
1742	} else {
1743		p = sc->sc_dma_physaddr +
1744		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1745		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1746		    sizeof(u_int32_t);
1747
1748		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1749			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1750			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1751			    HIFN_D_MASKDONEIRQ |
1752			    (dst->segs[i].ds_len - cmd->sloplen));
1753			HIFN_DSTR_SYNC(sc, idx,
1754			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1755			used++;
1756
1757			idx = hifn_dmamap_dstwrap(sc, idx);
1758		}
1759	}
1760	dma->dstr[idx].p = htole32(p);
1761	dma->dstr[idx].l = htole32(l);
1762	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1763	used++;
1764
1765	idx = hifn_dmamap_dstwrap(sc, idx);
1766
1767	sc->sc_dsti = idx;
1768	sc->sc_dstu += used;
1769	return (idx);
1770}
1771
1772static __inline int
1773hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
1774{
1775	struct hifn_dma *dma = sc->sc_dma;
1776
1777	if (++idx == HIFN_D_SRC_RSIZE) {
1778		dma->srcr[idx].l = htole32(HIFN_D_VALID |
1779		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1780		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1781		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1782		idx = 0;
1783	}
1784	return (idx);
1785}
1786
1787static int
1788hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1789{
1790	struct hifn_dma *dma = sc->sc_dma;
1791	struct hifn_operand *src = &cmd->src;
1792	int idx, i;
1793	u_int32_t last = 0;
1794
1795	idx = sc->sc_srci;
1796	for (i = 0; i < src->nsegs; i++) {
1797		if (i == src->nsegs - 1)
1798			last = HIFN_D_LAST;
1799
1800		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1801		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1802		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1803		HIFN_SRCR_SYNC(sc, idx,
1804		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1805
1806		idx = hifn_dmamap_srcwrap(sc, idx);
1807	}
1808	sc->sc_srci = idx;
1809	sc->sc_srcu += src->nsegs;
1810	return (idx);
1811}
1812
1813static void
1814hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1815{
1816	struct hifn_operand *op = arg;
1817
1818	KASSERT(nsegs <= MAX_SCATTER,
1819		("hifn_op_cb: too many DMA segments (%u > %u) "
1820		 "returned when mapping operand", nsegs, MAX_SCATTER));
1821	op->mapsize = mapsize;
1822	op->nsegs = nsegs;
1823	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1824}
1825
1826static int
1827hifn_crypto(
1828	struct hifn_softc *sc,
1829	struct hifn_command *cmd,
1830	struct cryptop *crp,
1831	int hint)
1832{
1833	struct	hifn_dma *dma = sc->sc_dma;
1834	u_int32_t cmdlen, csr;
1835	int cmdi, resi, err = 0;
1836
1837	/*
1838	 * need 1 cmd, and 1 res
1839	 *
1840	 * NB: check this first since it's easy.
1841	 */
1842	HIFN_LOCK(sc);
1843	if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE ||
1844	    (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) {
1845#ifdef HIFN_DEBUG
1846		if (hifn_debug) {
1847			device_printf(sc->sc_dev,
1848				"cmd/result exhaustion, cmdu %u resu %u\n",
1849				sc->sc_cmdu, sc->sc_resu);
1850		}
1851#endif
1852		hifnstats.hst_nomem_cr++;
1853		HIFN_UNLOCK(sc);
1854		return (ERESTART);
1855	}
1856
1857	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1858		hifnstats.hst_nomem_map++;
1859		HIFN_UNLOCK(sc);
1860		return (ENOMEM);
1861	}
1862
1863	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1864		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1865		    cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1866			hifnstats.hst_nomem_load++;
1867			err = ENOMEM;
1868			goto err_srcmap1;
1869		}
1870	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1871		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1872		    cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1873			hifnstats.hst_nomem_load++;
1874			err = ENOMEM;
1875			goto err_srcmap1;
1876		}
1877	} else {
1878		err = EINVAL;
1879		goto err_srcmap1;
1880	}
1881
1882	if (hifn_dmamap_aligned(&cmd->src)) {
1883		cmd->sloplen = cmd->src_mapsize & 3;
1884		cmd->dst = cmd->src;
1885	} else {
1886		if (crp->crp_flags & CRYPTO_F_IOV) {
1887			err = EINVAL;
1888			goto err_srcmap;
1889		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1890			int totlen, len;
1891			struct mbuf *m, *m0, *mlast;
1892
1893			KASSERT(cmd->dst_m == cmd->src_m,
1894				("hifn_crypto: dst_m initialized improperly"));
1895			hifnstats.hst_unaligned++;
1896			/*
1897			 * Source is not aligned on a longword boundary.
1898			 * Copy the data to insure alignment.  If we fail
1899			 * to allocate mbufs or clusters while doing this
1900			 * we return ERESTART so the operation is requeued
1901			 * at the crypto later, but only if there are
1902			 * ops already posted to the hardware; otherwise we
1903			 * have no guarantee that we'll be re-entered.
1904			 */
1905			totlen = cmd->src_mapsize;
1906			if (cmd->src_m->m_flags & M_PKTHDR) {
1907				len = MHLEN;
1908				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1909				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1910					m_free(m0);
1911					m0 = NULL;
1912				}
1913			} else {
1914				len = MLEN;
1915				MGET(m0, M_DONTWAIT, MT_DATA);
1916			}
1917			if (m0 == NULL) {
1918				hifnstats.hst_nomem_mbuf++;
1919				err = sc->sc_cmdu ? ERESTART : ENOMEM;
1920				goto err_srcmap;
1921			}
1922			if (totlen >= MINCLSIZE) {
1923				MCLGET(m0, M_DONTWAIT);
1924				if ((m0->m_flags & M_EXT) == 0) {
1925					hifnstats.hst_nomem_mcl++;
1926					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1927					m_freem(m0);
1928					goto err_srcmap;
1929				}
1930				len = MCLBYTES;
1931			}
1932			totlen -= len;
1933			m0->m_pkthdr.len = m0->m_len = len;
1934			mlast = m0;
1935
1936			while (totlen > 0) {
1937				MGET(m, M_DONTWAIT, MT_DATA);
1938				if (m == NULL) {
1939					hifnstats.hst_nomem_mbuf++;
1940					err = sc->sc_cmdu ? ERESTART : ENOMEM;
1941					m_freem(m0);
1942					goto err_srcmap;
1943				}
1944				len = MLEN;
1945				if (totlen >= MINCLSIZE) {
1946					MCLGET(m, M_DONTWAIT);
1947					if ((m->m_flags & M_EXT) == 0) {
1948						hifnstats.hst_nomem_mcl++;
1949						err = sc->sc_cmdu ? ERESTART : ENOMEM;
1950						mlast->m_next = m;
1951						m_freem(m0);
1952						goto err_srcmap;
1953					}
1954					len = MCLBYTES;
1955				}
1956
1957				m->m_len = len;
1958				m0->m_pkthdr.len += len;
1959				totlen -= len;
1960
1961				mlast->m_next = m;
1962				mlast = m;
1963			}
1964			cmd->dst_m = m0;
1965		}
1966	}
1967
1968	if (cmd->dst_map == NULL) {
1969		if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1970			hifnstats.hst_nomem_map++;
1971			err = ENOMEM;
1972			goto err_srcmap;
1973		}
1974		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1975			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1976			    cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1977				hifnstats.hst_nomem_map++;
1978				err = ENOMEM;
1979				goto err_dstmap1;
1980			}
1981		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1982			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1983			    cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1984				hifnstats.hst_nomem_load++;
1985				err = ENOMEM;
1986				goto err_dstmap1;
1987			}
1988		}
1989	}
1990
1991#ifdef HIFN_DEBUG
1992	if (hifn_debug) {
1993		device_printf(sc->sc_dev,
1994		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1995		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1996		    READ_REG_1(sc, HIFN_1_DMA_IER),
1997		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu,
1998		    cmd->src_nsegs, cmd->dst_nsegs);
1999	}
2000#endif
2001
2002	if (cmd->src_map == cmd->dst_map) {
2003		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2004		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2005	} else {
2006		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2007		    BUS_DMASYNC_PREWRITE);
2008		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2009		    BUS_DMASYNC_PREREAD);
2010	}
2011
2012	/*
2013	 * need N src, and N dst
2014	 */
2015	if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
2016	    (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
2017#ifdef HIFN_DEBUG
2018		if (hifn_debug) {
2019			device_printf(sc->sc_dev,
2020				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
2021				sc->sc_srcu, cmd->src_nsegs,
2022				sc->sc_dstu, cmd->dst_nsegs);
2023		}
2024#endif
2025		hifnstats.hst_nomem_sd++;
2026		err = ERESTART;
2027		goto err_dstmap;
2028	}
2029
2030	if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) {
2031		sc->sc_cmdi = 0;
2032		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2033		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2034		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2035		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2036	}
2037	cmdi = sc->sc_cmdi++;
2038	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2039	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2040
2041	/* .p for command/result already set */
2042	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2043	    HIFN_D_MASKDONEIRQ);
2044	HIFN_CMDR_SYNC(sc, cmdi,
2045	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2046	sc->sc_cmdu++;
2047
2048	/*
2049	 * We don't worry about missing an interrupt (which a "command wait"
2050	 * interrupt salvages us from), unless there is more than one command
2051	 * in the queue.
2052	 */
2053	if (sc->sc_cmdu > 1) {
2054		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2055		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2056	}
2057
2058	hifnstats.hst_ipackets++;
2059	hifnstats.hst_ibytes += cmd->src_mapsize;
2060
2061	hifn_dmamap_load_src(sc, cmd);
2062
2063	/*
2064	 * Unlike other descriptors, we don't mask done interrupt from
2065	 * result descriptor.
2066	 */
2067#ifdef HIFN_DEBUG
2068	if (hifn_debug)
2069		printf("load res\n");
2070#endif
2071	if (sc->sc_resi == HIFN_D_RES_RSIZE) {
2072		sc->sc_resi = 0;
2073		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2074		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2075		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2076		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2077	}
2078	resi = sc->sc_resi++;
2079	KASSERT(sc->sc_hifn_commands[resi] == NULL,
2080		("hifn_crypto: command slot %u busy", resi));
2081	sc->sc_hifn_commands[resi] = cmd;
2082	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2083	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
2084		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2085		    HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
2086		sc->sc_curbatch++;
2087		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
2088			hifnstats.hst_maxbatch = sc->sc_curbatch;
2089		hifnstats.hst_totbatch++;
2090	} else {
2091		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2092		    HIFN_D_VALID | HIFN_D_LAST);
2093		sc->sc_curbatch = 0;
2094	}
2095	HIFN_RESR_SYNC(sc, resi,
2096	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2097	sc->sc_resu++;
2098
2099	if (cmd->sloplen)
2100		cmd->slopidx = resi;
2101
2102	hifn_dmamap_load_dst(sc, cmd);
2103
2104	csr = 0;
2105	if (sc->sc_c_busy == 0) {
2106		csr |= HIFN_DMACSR_C_CTRL_ENA;
2107		sc->sc_c_busy = 1;
2108	}
2109	if (sc->sc_s_busy == 0) {
2110		csr |= HIFN_DMACSR_S_CTRL_ENA;
2111		sc->sc_s_busy = 1;
2112	}
2113	if (sc->sc_r_busy == 0) {
2114		csr |= HIFN_DMACSR_R_CTRL_ENA;
2115		sc->sc_r_busy = 1;
2116	}
2117	if (sc->sc_d_busy == 0) {
2118		csr |= HIFN_DMACSR_D_CTRL_ENA;
2119		sc->sc_d_busy = 1;
2120	}
2121	if (csr)
2122		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
2123
2124#ifdef HIFN_DEBUG
2125	if (hifn_debug) {
2126		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
2127		    READ_REG_1(sc, HIFN_1_DMA_CSR),
2128		    READ_REG_1(sc, HIFN_1_DMA_IER));
2129	}
2130#endif
2131
2132	sc->sc_active = 5;
2133	HIFN_UNLOCK(sc);
2134	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
2135	return (err);		/* success */
2136
2137err_dstmap:
2138	if (cmd->src_map != cmd->dst_map)
2139		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2140err_dstmap1:
2141	if (cmd->src_map != cmd->dst_map)
2142		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2143err_srcmap:
2144	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2145		if (cmd->src_m != cmd->dst_m)
2146			m_freem(cmd->dst_m);
2147	}
2148	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2149err_srcmap1:
2150	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2151	HIFN_UNLOCK(sc);
2152	return (err);
2153}
2154
2155static void
2156hifn_tick(void* vsc)
2157{
2158	struct hifn_softc *sc = vsc;
2159
2160	HIFN_LOCK(sc);
2161	if (sc->sc_active == 0) {
2162		u_int32_t r = 0;
2163
2164		if (sc->sc_cmdu == 0 && sc->sc_c_busy) {
2165			sc->sc_c_busy = 0;
2166			r |= HIFN_DMACSR_C_CTRL_DIS;
2167		}
2168		if (sc->sc_srcu == 0 && sc->sc_s_busy) {
2169			sc->sc_s_busy = 0;
2170			r |= HIFN_DMACSR_S_CTRL_DIS;
2171		}
2172		if (sc->sc_dstu == 0 && sc->sc_d_busy) {
2173			sc->sc_d_busy = 0;
2174			r |= HIFN_DMACSR_D_CTRL_DIS;
2175		}
2176		if (sc->sc_resu == 0 && sc->sc_r_busy) {
2177			sc->sc_r_busy = 0;
2178			r |= HIFN_DMACSR_R_CTRL_DIS;
2179		}
2180		if (r)
2181			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2182	} else
2183		sc->sc_active--;
2184	HIFN_UNLOCK(sc);
2185	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2186}
2187
2188static void
2189hifn_intr(void *arg)
2190{
2191	struct hifn_softc *sc = arg;
2192	struct hifn_dma *dma;
2193	u_int32_t dmacsr, restart;
2194	int i, u;
2195
2196	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2197
2198	/* Nothing in the DMA unit interrupted */
2199	if ((dmacsr & sc->sc_dmaier) == 0)
2200		return;
2201
2202	HIFN_LOCK(sc);
2203
2204	dma = sc->sc_dma;
2205
2206#ifdef HIFN_DEBUG
2207	if (hifn_debug) {
2208		device_printf(sc->sc_dev,
2209		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2210		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2211		    sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi,
2212		    sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk,
2213		    sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2214	}
2215#endif
2216
2217	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2218
2219	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2220	    (dmacsr & HIFN_DMACSR_PUBDONE))
2221		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2222		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2223
2224	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2225	if (restart)
2226		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2227
2228	if (sc->sc_flags & HIFN_IS_7811) {
2229		if (dmacsr & HIFN_DMACSR_ILLR)
2230			device_printf(sc->sc_dev, "illegal read\n");
2231		if (dmacsr & HIFN_DMACSR_ILLW)
2232			device_printf(sc->sc_dev, "illegal write\n");
2233	}
2234
2235	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2236	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2237	if (restart) {
2238		device_printf(sc->sc_dev, "abort, resetting.\n");
2239		hifnstats.hst_abort++;
2240		hifn_abort(sc);
2241		HIFN_UNLOCK(sc);
2242		return;
2243	}
2244
2245	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) {
2246		/*
2247		 * If no slots to process and we receive a "waiting on
2248		 * command" interrupt, we disable the "waiting on command"
2249		 * (by clearing it).
2250		 */
2251		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2252		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2253	}
2254
2255	/* clear the rings */
2256	i = sc->sc_resk; u = sc->sc_resu;
2257	while (u != 0) {
2258		HIFN_RESR_SYNC(sc, i,
2259		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2260		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2261			HIFN_RESR_SYNC(sc, i,
2262			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2263			break;
2264		}
2265
2266		if (i != HIFN_D_RES_RSIZE) {
2267			struct hifn_command *cmd;
2268			u_int8_t *macbuf = NULL;
2269
2270			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2271			cmd = sc->sc_hifn_commands[i];
2272			KASSERT(cmd != NULL,
2273				("hifn_intr: null command slot %u", i));
2274			sc->sc_hifn_commands[i] = NULL;
2275
2276			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2277				macbuf = dma->result_bufs[i];
2278				macbuf += 12;
2279			}
2280
2281			hifn_callback(sc, cmd, macbuf);
2282			hifnstats.hst_opackets++;
2283			u--;
2284		}
2285
2286		if (++i == (HIFN_D_RES_RSIZE + 1))
2287			i = 0;
2288	}
2289	sc->sc_resk = i; sc->sc_resu = u;
2290
2291	i = sc->sc_srck; u = sc->sc_srcu;
2292	while (u != 0) {
2293		if (i == HIFN_D_SRC_RSIZE)
2294			i = 0;
2295		HIFN_SRCR_SYNC(sc, i,
2296		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2297		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2298			HIFN_SRCR_SYNC(sc, i,
2299			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2300			break;
2301		}
2302		i++, u--;
2303	}
2304	sc->sc_srck = i; sc->sc_srcu = u;
2305
2306	i = sc->sc_cmdk; u = sc->sc_cmdu;
2307	while (u != 0) {
2308		HIFN_CMDR_SYNC(sc, i,
2309		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2310		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2311			HIFN_CMDR_SYNC(sc, i,
2312			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2313			break;
2314		}
2315		if (i != HIFN_D_CMD_RSIZE) {
2316			u--;
2317			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2318		}
2319		if (++i == (HIFN_D_CMD_RSIZE + 1))
2320			i = 0;
2321	}
2322	sc->sc_cmdk = i; sc->sc_cmdu = u;
2323
2324	HIFN_UNLOCK(sc);
2325
2326	if (sc->sc_needwakeup) {		/* XXX check high watermark */
2327		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2328#ifdef HIFN_DEBUG
2329		if (hifn_debug)
2330			device_printf(sc->sc_dev,
2331				"wakeup crypto (%x) u %d/%d/%d/%d\n",
2332				sc->sc_needwakeup,
2333				sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu);
2334#endif
2335		sc->sc_needwakeup &= ~wakeup;
2336		crypto_unblock(sc->sc_cid, wakeup);
2337	}
2338}
2339
2340/*
2341 * Allocate a new 'session' and return an encoded session id.  'sidp'
2342 * contains our registration id, and should contain an encoded session
2343 * id on successful allocation.
2344 */
2345static int
2346hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
2347{
2348	struct hifn_softc *sc = device_get_softc(dev);
2349	struct cryptoini *c;
2350	int mac = 0, cry = 0, sesn;
2351	struct hifn_session *ses = NULL;
2352
2353	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2354	if (sidp == NULL || cri == NULL || sc == NULL)
2355		return (EINVAL);
2356
2357	HIFN_LOCK(sc);
2358	if (sc->sc_sessions == NULL) {
2359		ses = sc->sc_sessions = (struct hifn_session *)malloc(
2360		    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2361		if (ses == NULL) {
2362			HIFN_UNLOCK(sc);
2363			return (ENOMEM);
2364		}
2365		sesn = 0;
2366		sc->sc_nsessions = 1;
2367	} else {
2368		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2369			if (!sc->sc_sessions[sesn].hs_used) {
2370				ses = &sc->sc_sessions[sesn];
2371				break;
2372			}
2373		}
2374
2375		if (ses == NULL) {
2376			sesn = sc->sc_nsessions;
2377			ses = (struct hifn_session *)malloc((sesn + 1) *
2378			    sizeof(*ses), M_DEVBUF, M_NOWAIT);
2379			if (ses == NULL) {
2380				HIFN_UNLOCK(sc);
2381				return (ENOMEM);
2382			}
2383			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2384			bzero(sc->sc_sessions, sesn * sizeof(*ses));
2385			free(sc->sc_sessions, M_DEVBUF);
2386			sc->sc_sessions = ses;
2387			ses = &sc->sc_sessions[sesn];
2388			sc->sc_nsessions++;
2389		}
2390	}
2391	HIFN_UNLOCK(sc);
2392
2393	bzero(ses, sizeof(*ses));
2394	ses->hs_used = 1;
2395
2396	for (c = cri; c != NULL; c = c->cri_next) {
2397		switch (c->cri_alg) {
2398		case CRYPTO_MD5:
2399		case CRYPTO_SHA1:
2400		case CRYPTO_MD5_HMAC:
2401		case CRYPTO_SHA1_HMAC:
2402			if (mac)
2403				return (EINVAL);
2404			mac = 1;
2405			ses->hs_mlen = c->cri_mlen;
2406			if (ses->hs_mlen == 0) {
2407				switch (c->cri_alg) {
2408				case CRYPTO_MD5:
2409				case CRYPTO_MD5_HMAC:
2410					ses->hs_mlen = 16;
2411					break;
2412				case CRYPTO_SHA1:
2413				case CRYPTO_SHA1_HMAC:
2414					ses->hs_mlen = 20;
2415					break;
2416				}
2417			}
2418			break;
2419		case CRYPTO_DES_CBC:
2420		case CRYPTO_3DES_CBC:
2421		case CRYPTO_AES_CBC:
2422			/* XXX this may read fewer, does it matter? */
2423			read_random(ses->hs_iv,
2424				c->cri_alg == CRYPTO_AES_CBC ?
2425					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2426			/*FALLTHROUGH*/
2427		case CRYPTO_ARC4:
2428			if (cry)
2429				return (EINVAL);
2430			cry = 1;
2431			break;
2432		default:
2433			return (EINVAL);
2434		}
2435	}
2436	if (mac == 0 && cry == 0)
2437		return (EINVAL);
2438
2439	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2440
2441	return (0);
2442}
2443
2444/*
2445 * Deallocate a session.
2446 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2447 * XXX to blow away any keys already stored there.
2448 */
2449static int
2450hifn_freesession(device_t dev, u_int64_t tid)
2451{
2452	struct hifn_softc *sc = device_get_softc(dev);
2453	int session, error;
2454	u_int32_t sid = CRYPTO_SESID2LID(tid);
2455
2456	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2457	if (sc == NULL)
2458		return (EINVAL);
2459
2460	HIFN_LOCK(sc);
2461	session = HIFN_SESSION(sid);
2462	if (session < sc->sc_nsessions) {
2463		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
2464		error = 0;
2465	} else
2466		error = EINVAL;
2467	HIFN_UNLOCK(sc);
2468
2469	return (error);
2470}
2471
2472static int
2473hifn_process(device_t dev, struct cryptop *crp, int hint)
2474{
2475	struct hifn_softc *sc = device_get_softc(dev);
2476	struct hifn_command *cmd = NULL;
2477	int session, err, ivlen;
2478	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2479
2480	if (crp == NULL || crp->crp_callback == NULL) {
2481		hifnstats.hst_invalid++;
2482		return (EINVAL);
2483	}
2484	session = HIFN_SESSION(crp->crp_sid);
2485
2486	if (sc == NULL || session >= sc->sc_nsessions) {
2487		err = EINVAL;
2488		goto errout;
2489	}
2490
2491	cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2492	if (cmd == NULL) {
2493		hifnstats.hst_nomem++;
2494		err = ENOMEM;
2495		goto errout;
2496	}
2497
2498	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2499		cmd->src_m = (struct mbuf *)crp->crp_buf;
2500		cmd->dst_m = (struct mbuf *)crp->crp_buf;
2501	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2502		cmd->src_io = (struct uio *)crp->crp_buf;
2503		cmd->dst_io = (struct uio *)crp->crp_buf;
2504	} else {
2505		err = EINVAL;
2506		goto errout;	/* XXX we don't handle contiguous buffers! */
2507	}
2508
2509	crd1 = crp->crp_desc;
2510	if (crd1 == NULL) {
2511		err = EINVAL;
2512		goto errout;
2513	}
2514	crd2 = crd1->crd_next;
2515
2516	if (crd2 == NULL) {
2517		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2518		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2519		    crd1->crd_alg == CRYPTO_SHA1 ||
2520		    crd1->crd_alg == CRYPTO_MD5) {
2521			maccrd = crd1;
2522			enccrd = NULL;
2523		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2524		    crd1->crd_alg == CRYPTO_3DES_CBC ||
2525		    crd1->crd_alg == CRYPTO_AES_CBC ||
2526		    crd1->crd_alg == CRYPTO_ARC4) {
2527			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2528				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2529			maccrd = NULL;
2530			enccrd = crd1;
2531		} else {
2532			err = EINVAL;
2533			goto errout;
2534		}
2535	} else {
2536		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2537                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2538                     crd1->crd_alg == CRYPTO_MD5 ||
2539                     crd1->crd_alg == CRYPTO_SHA1) &&
2540		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2541		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2542		     crd2->crd_alg == CRYPTO_AES_CBC ||
2543		     crd2->crd_alg == CRYPTO_ARC4) &&
2544		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2545			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2546			maccrd = crd1;
2547			enccrd = crd2;
2548		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2549		     crd1->crd_alg == CRYPTO_ARC4 ||
2550		     crd1->crd_alg == CRYPTO_3DES_CBC ||
2551		     crd1->crd_alg == CRYPTO_AES_CBC) &&
2552		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2553                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2554                     crd2->crd_alg == CRYPTO_MD5 ||
2555                     crd2->crd_alg == CRYPTO_SHA1) &&
2556		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
2557			enccrd = crd1;
2558			maccrd = crd2;
2559		} else {
2560			/*
2561			 * We cannot order the 7751 as requested
2562			 */
2563			err = EINVAL;
2564			goto errout;
2565		}
2566	}
2567
2568	if (enccrd) {
2569		cmd->enccrd = enccrd;
2570		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2571		switch (enccrd->crd_alg) {
2572		case CRYPTO_ARC4:
2573			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2574			break;
2575		case CRYPTO_DES_CBC:
2576			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2577			    HIFN_CRYPT_CMD_MODE_CBC |
2578			    HIFN_CRYPT_CMD_NEW_IV;
2579			break;
2580		case CRYPTO_3DES_CBC:
2581			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2582			    HIFN_CRYPT_CMD_MODE_CBC |
2583			    HIFN_CRYPT_CMD_NEW_IV;
2584			break;
2585		case CRYPTO_AES_CBC:
2586			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2587			    HIFN_CRYPT_CMD_MODE_CBC |
2588			    HIFN_CRYPT_CMD_NEW_IV;
2589			break;
2590		default:
2591			err = EINVAL;
2592			goto errout;
2593		}
2594		if (enccrd->crd_alg != CRYPTO_ARC4) {
2595			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2596				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2597			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2598				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2599					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2600				else
2601					bcopy(sc->sc_sessions[session].hs_iv,
2602					    cmd->iv, ivlen);
2603
2604				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2605				    == 0) {
2606					crypto_copyback(crp->crp_flags,
2607					    crp->crp_buf, enccrd->crd_inject,
2608					    ivlen, cmd->iv);
2609				}
2610			} else {
2611				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2612					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2613				else {
2614					crypto_copydata(crp->crp_flags,
2615					    crp->crp_buf, enccrd->crd_inject,
2616					    ivlen, cmd->iv);
2617				}
2618			}
2619		}
2620
2621		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
2622			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2623		cmd->ck = enccrd->crd_key;
2624		cmd->cklen = enccrd->crd_klen >> 3;
2625		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2626
2627		/*
2628		 * Need to specify the size for the AES key in the masks.
2629		 */
2630		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2631		    HIFN_CRYPT_CMD_ALG_AES) {
2632			switch (cmd->cklen) {
2633			case 16:
2634				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2635				break;
2636			case 24:
2637				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2638				break;
2639			case 32:
2640				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2641				break;
2642			default:
2643				err = EINVAL;
2644				goto errout;
2645			}
2646		}
2647	}
2648
2649	if (maccrd) {
2650		cmd->maccrd = maccrd;
2651		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2652
2653		switch (maccrd->crd_alg) {
2654		case CRYPTO_MD5:
2655			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2656			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2657			    HIFN_MAC_CMD_POS_IPSEC;
2658                       break;
2659		case CRYPTO_MD5_HMAC:
2660			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2661			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2662			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2663			break;
2664		case CRYPTO_SHA1:
2665			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2666			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2667			    HIFN_MAC_CMD_POS_IPSEC;
2668			break;
2669		case CRYPTO_SHA1_HMAC:
2670			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2671			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2672			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2673			break;
2674		}
2675
2676		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2677		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2678			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2679			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2680			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2681			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2682		}
2683	}
2684
2685	cmd->crp = crp;
2686	cmd->session_num = session;
2687	cmd->softc = sc;
2688
2689	err = hifn_crypto(sc, cmd, crp, hint);
2690	if (!err) {
2691		return 0;
2692	} else if (err == ERESTART) {
2693		/*
2694		 * There weren't enough resources to dispatch the request
2695		 * to the part.  Notify the caller so they'll requeue this
2696		 * request and resubmit it again soon.
2697		 */
2698#ifdef HIFN_DEBUG
2699		if (hifn_debug)
2700			device_printf(sc->sc_dev, "requeue request\n");
2701#endif
2702		free(cmd, M_DEVBUF);
2703		sc->sc_needwakeup |= CRYPTO_SYMQ;
2704		return (err);
2705	}
2706
2707errout:
2708	if (cmd != NULL)
2709		free(cmd, M_DEVBUF);
2710	if (err == EINVAL)
2711		hifnstats.hst_invalid++;
2712	else
2713		hifnstats.hst_nomem++;
2714	crp->crp_etype = err;
2715	crypto_done(crp);
2716	return (err);
2717}
2718
2719static void
2720hifn_abort(struct hifn_softc *sc)
2721{
2722	struct hifn_dma *dma = sc->sc_dma;
2723	struct hifn_command *cmd;
2724	struct cryptop *crp;
2725	int i, u;
2726
2727	i = sc->sc_resk; u = sc->sc_resu;
2728	while (u != 0) {
2729		cmd = sc->sc_hifn_commands[i];
2730		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2731		sc->sc_hifn_commands[i] = NULL;
2732		crp = cmd->crp;
2733
2734		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2735			/* Salvage what we can. */
2736			u_int8_t *macbuf;
2737
2738			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2739				macbuf = dma->result_bufs[i];
2740				macbuf += 12;
2741			} else
2742				macbuf = NULL;
2743			hifnstats.hst_opackets++;
2744			hifn_callback(sc, cmd, macbuf);
2745		} else {
2746			if (cmd->src_map == cmd->dst_map) {
2747				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2748				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2749			} else {
2750				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2751				    BUS_DMASYNC_POSTWRITE);
2752				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2753				    BUS_DMASYNC_POSTREAD);
2754			}
2755
2756			if (cmd->src_m != cmd->dst_m) {
2757				m_freem(cmd->src_m);
2758				crp->crp_buf = (caddr_t)cmd->dst_m;
2759			}
2760
2761			/* non-shared buffers cannot be restarted */
2762			if (cmd->src_map != cmd->dst_map) {
2763				/*
2764				 * XXX should be EAGAIN, delayed until
2765				 * after the reset.
2766				 */
2767				crp->crp_etype = ENOMEM;
2768				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2769				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2770			} else
2771				crp->crp_etype = ENOMEM;
2772
2773			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2774			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2775
2776			free(cmd, M_DEVBUF);
2777			if (crp->crp_etype != EAGAIN)
2778				crypto_done(crp);
2779		}
2780
2781		if (++i == HIFN_D_RES_RSIZE)
2782			i = 0;
2783		u--;
2784	}
2785	sc->sc_resk = i; sc->sc_resu = u;
2786
2787	hifn_reset_board(sc, 1);
2788	hifn_init_dma(sc);
2789	hifn_init_pci_registers(sc);
2790}
2791
2792static void
2793hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2794{
2795	struct hifn_dma *dma = sc->sc_dma;
2796	struct cryptop *crp = cmd->crp;
2797	struct cryptodesc *crd;
2798	struct mbuf *m;
2799	int totlen, i, u, ivlen;
2800
2801	if (cmd->src_map == cmd->dst_map) {
2802		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2803		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2804	} else {
2805		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2806		    BUS_DMASYNC_POSTWRITE);
2807		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2808		    BUS_DMASYNC_POSTREAD);
2809	}
2810
2811	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2812		if (cmd->src_m != cmd->dst_m) {
2813			crp->crp_buf = (caddr_t)cmd->dst_m;
2814			totlen = cmd->src_mapsize;
2815			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2816				if (totlen < m->m_len) {
2817					m->m_len = totlen;
2818					totlen = 0;
2819				} else
2820					totlen -= m->m_len;
2821			}
2822			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2823			m_freem(cmd->src_m);
2824		}
2825	}
2826
2827	if (cmd->sloplen != 0) {
2828		crypto_copyback(crp->crp_flags, crp->crp_buf,
2829		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
2830		    (caddr_t)&dma->slop[cmd->slopidx]);
2831	}
2832
2833	i = sc->sc_dstk; u = sc->sc_dstu;
2834	while (u != 0) {
2835		if (i == HIFN_D_DST_RSIZE)
2836			i = 0;
2837		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2838		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2839		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2840			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2841			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2842			break;
2843		}
2844		i++, u--;
2845	}
2846	sc->sc_dstk = i; sc->sc_dstu = u;
2847
2848	hifnstats.hst_obytes += cmd->dst_mapsize;
2849
2850	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2851	    HIFN_BASE_CMD_CRYPT) {
2852		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2853			if (crd->crd_alg != CRYPTO_DES_CBC &&
2854			    crd->crd_alg != CRYPTO_3DES_CBC &&
2855			    crd->crd_alg != CRYPTO_AES_CBC)
2856				continue;
2857			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2858				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2859			crypto_copydata(crp->crp_flags, crp->crp_buf,
2860			    crd->crd_skip + crd->crd_len - ivlen, ivlen,
2861			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2862			break;
2863		}
2864	}
2865
2866	if (macbuf != NULL) {
2867		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2868                        int len;
2869
2870			if (crd->crd_alg != CRYPTO_MD5 &&
2871			    crd->crd_alg != CRYPTO_SHA1 &&
2872			    crd->crd_alg != CRYPTO_MD5_HMAC &&
2873			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
2874				continue;
2875			}
2876			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
2877			crypto_copyback(crp->crp_flags, crp->crp_buf,
2878			    crd->crd_inject, len, macbuf);
2879			break;
2880		}
2881	}
2882
2883	if (cmd->src_map != cmd->dst_map) {
2884		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2885		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2886	}
2887	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2888	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2889	free(cmd, M_DEVBUF);
2890	crypto_done(crp);
2891}
2892
2893/*
2894 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2895 * and Group 1 registers; avoid conditions that could create
2896 * burst writes by doing a read in between the writes.
2897 *
2898 * NB: The read we interpose is always to the same register;
2899 *     we do this because reading from an arbitrary (e.g. last)
2900 *     register may not always work.
2901 */
2902static void
2903hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2904{
2905	if (sc->sc_flags & HIFN_IS_7811) {
2906		if (sc->sc_bar0_lastreg == reg - 4)
2907			bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2908		sc->sc_bar0_lastreg = reg;
2909	}
2910	bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2911}
2912
2913static void
2914hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2915{
2916	if (sc->sc_flags & HIFN_IS_7811) {
2917		if (sc->sc_bar1_lastreg == reg - 4)
2918			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2919		sc->sc_bar1_lastreg = reg;
2920	}
2921	bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2922}
2923
2924#ifdef HIFN_VULCANDEV
2925/*
2926 * this code provides support for mapping the PK engine's register
2927 * into a userspace program.
2928 *
2929 */
2930static int
2931vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset,
2932	      vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
2933{
2934	struct hifn_softc *sc;
2935	vm_paddr_t pd;
2936	void *b;
2937
2938	sc = dev->si_drv1;
2939
2940	pd = rman_get_start(sc->sc_bar1res);
2941	b = rman_get_virtual(sc->sc_bar1res);
2942
2943#if 0
2944	printf("vpk mmap: %p(%016llx) offset=%lld\n", b,
2945	    (unsigned long long)pd, offset);
2946	hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0);
2947#endif
2948
2949	if (offset == 0) {
2950		*paddr = pd;
2951		return (0);
2952	}
2953	return (-1);
2954}
2955
2956static struct cdevsw vulcanpk_cdevsw = {
2957	.d_version =	D_VERSION,
2958	.d_mmap =	vulcanpk_mmap,
2959	.d_name =	"vulcanpk",
2960};
2961#endif /* HIFN_VULCANDEV */
2962