hifn7751.c revision 104477
1/* $FreeBSD: head/sys/dev/hifn/hifn7751.c 104477 2002-10-04 20:32:37Z sam $ */
2/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
3
4/*
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 *			http://www.netsec.net
10 *
11 * This driver is based on a previous driver by Invertex, for which they
12 * requested:  Please send any comments, feedback, bug-fixes, or feature
13 * requests to software@invertex.com.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 *   notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *   notice, this list of conditions and the following disclaimer in the
23 *   documentation and/or other materials provided with the distribution.
24 * 3. The name of the author may not be used to endorse or promote products
25 *   derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41 *
42 */
43
44#define HIFN_DEBUG
45
46/*
47 * Driver for the Hifn 7751 encryption processor.
48 */
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/proc.h>
53#include <sys/errno.h>
54#include <sys/malloc.h>
55#include <sys/kernel.h>
56#include <sys/mbuf.h>
57#include <sys/lock.h>
58#include <sys/mutex.h>
59#include <sys/sysctl.h>
60
61#include <vm/vm.h>
62#include <vm/pmap.h>
63
64#include <machine/clock.h>
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69
70#include <opencrypto/cryptodev.h>
71#include <sys/random.h>
72
73#include <pci/pcivar.h>
74#include <pci/pcireg.h>
75#include <dev/hifn/hifn7751reg.h>
76#include <dev/hifn/hifn7751var.h>
77
78/*
79 * Prototypes and count for the pci_device structure
80 */
81static	int hifn_probe(device_t);
82static	int hifn_attach(device_t);
83static	int hifn_detach(device_t);
84static	int hifn_suspend(device_t);
85static	int hifn_resume(device_t);
86static	void hifn_shutdown(device_t);
87
88static device_method_t hifn_methods[] = {
89	/* Device interface */
90	DEVMETHOD(device_probe,		hifn_probe),
91	DEVMETHOD(device_attach,	hifn_attach),
92	DEVMETHOD(device_detach,	hifn_detach),
93	DEVMETHOD(device_suspend,	hifn_suspend),
94	DEVMETHOD(device_resume,	hifn_resume),
95	DEVMETHOD(device_shutdown,	hifn_shutdown),
96
97	/* bus interface */
98	DEVMETHOD(bus_print_child,	bus_generic_print_child),
99	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
100
101	{ 0, 0 }
102};
103static driver_t hifn_driver = {
104	"hifn",
105	hifn_methods,
106	sizeof (struct hifn_softc)
107};
108static devclass_t hifn_devclass;
109
110DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
111
112static	void hifn_reset_board(struct hifn_softc *, int);
113static	void hifn_reset_puc(struct hifn_softc *);
114static	void hifn_puc_wait(struct hifn_softc *);
115static	int hifn_enable_crypto(struct hifn_softc *);
116static	void hifn_set_retry(struct hifn_softc *sc);
117static	void hifn_init_dma(struct hifn_softc *);
118static	void hifn_init_pci_registers(struct hifn_softc *);
119static	int hifn_sramsize(struct hifn_softc *);
120static	int hifn_dramsize(struct hifn_softc *);
121static	int hifn_ramtype(struct hifn_softc *);
122static	void hifn_sessions(struct hifn_softc *);
123static	void hifn_intr(void *);
124static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
125static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
126static	int hifn_newsession(void *, u_int32_t *, struct cryptoini *);
127static	int hifn_freesession(void *, u_int64_t);
128static	int hifn_process(void *, struct cryptop *, int);
129static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
130static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
131static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
132static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
133static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
134static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
135static	int hifn_init_pubrng(struct hifn_softc *);
136static	void hifn_rng(void *);
137static	void hifn_tick(void *);
138static	void hifn_abort(struct hifn_softc *);
139static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
140
141static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
142static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
143
144static __inline__ u_int32_t
145READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
146{
147    u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
148    sc->sc_bar0_lastreg = (bus_size_t) -1;
149    return (v);
150}
151#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
152
153static __inline__ u_int32_t
154READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
155{
156    u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
157    sc->sc_bar1_lastreg = (bus_size_t) -1;
158    return (v);
159}
160#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
161
162#ifdef HIFN_DEBUG
163static	int hifn_debug = 0;
164SYSCTL_INT(_debug, OID_AUTO, hifn, CTLFLAG_RW, &hifn_debug,
165	    0, "Hifn driver debugging printfs");
166#endif
167
168static	struct hifn_stats hifnstats;
169SYSCTL_STRUCT(_kern, OID_AUTO, hifn_stats, CTLFLAG_RD, &hifnstats,
170	    hifn_stats, "Hifn driver statistics");
171static	int hifn_maxbatch = 2;		/* XXX tune based on part+sys speed */
172SYSCTL_INT(_kern, OID_AUTO, hifn_maxbatch, CTLFLAG_RW, &hifn_maxbatch,
173	    0, "Hifn driver: max ops to batch w/o interrupt");
174
175/*
176 * Probe for a supported device.  The PCI vendor and device
177 * IDs are used to detect devices we know how to handle.
178 */
179static int
180hifn_probe(device_t dev)
181{
182	if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
183	    pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
184		return (0);
185	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
186	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
187	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
188	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
189		return (0);
190	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
191	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
192		return (0);
193	return (ENXIO);
194}
195
196static void
197hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
198{
199	bus_addr_t *paddr = (bus_addr_t*) arg;
200	*paddr = segs->ds_addr;
201}
202
203static const char*
204hifn_partname(struct hifn_softc *sc)
205{
206	/* XXX sprintf numbers when not decoded */
207	switch (pci_get_vendor(sc->sc_dev)) {
208	case PCI_VENDOR_HIFN:
209		switch (pci_get_device(sc->sc_dev)) {
210		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
211		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
212		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
213		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
214		}
215		return "Hifn unknown-part";
216	case PCI_VENDOR_INVERTEX:
217		switch (pci_get_device(sc->sc_dev)) {
218		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
219		}
220		return "Invertex unknown-part";
221	case PCI_VENDOR_NETSEC:
222		switch (pci_get_device(sc->sc_dev)) {
223		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
224		}
225		return "NetSec unknown-part";
226	}
227	return "Unknown-vendor unknown-part";
228}
229
230/*
231 * Attach an interface that successfully probed.
232 */
233static int
234hifn_attach(device_t dev)
235{
236	struct hifn_softc *sc = device_get_softc(dev);
237	u_int32_t cmd;
238	caddr_t kva;
239	int rseg, rid;
240	char rbase;
241	u_int16_t ena, rev;
242
243	KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
244	bzero(sc, sizeof (*sc));
245	sc->sc_dev = dev;
246
247	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "crypto driver", MTX_DEF);
248
249	/* XXX handle power management */
250
251	/*
252	 * The 7951 has a random number generator and
253	 * public key support; note this.
254	 */
255	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
256	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7951)
257		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
258	/*
259	 * The 7811 has a random number generator and
260	 * we also note it's identity 'cuz of some quirks.
261	 */
262	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
263	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
264		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
265
266	/*
267	 * Configure support for memory-mapped access to
268	 * registers and for DMA operations.
269	 */
270#define	PCIM_ENA	(PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
271	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
272	cmd |= PCIM_ENA;
273	pci_write_config(dev, PCIR_COMMAND, cmd, 4);
274	cmd = pci_read_config(dev, PCIR_COMMAND, 4);
275	if ((cmd & PCIM_ENA) != PCIM_ENA) {
276		device_printf(dev, "failed to enable %s\n",
277			(cmd & PCIM_ENA) == 0 ?
278				"memory mapping & bus mastering" :
279			(cmd & PCIM_CMD_MEMEN) == 0 ?
280				"memory mapping" : "bus mastering");
281		goto fail_pci;
282	}
283#undef PCIM_ENA
284
285	/*
286	 * Setup PCI resources. Note that we record the bus
287	 * tag and handle for each register mapping, this is
288	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
289	 * and WRITE_REG_1 macros throughout the driver.
290	 */
291	rid = HIFN_BAR0;
292	sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
293			 		    0, ~0, 1, RF_ACTIVE);
294	if (sc->sc_bar0res == NULL) {
295		device_printf(dev, "cannot map bar%d register space\n", 0);
296		goto fail_pci;
297	}
298	sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
299	sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
300	sc->sc_bar0_lastreg = (bus_size_t) -1;
301
302	rid = HIFN_BAR1;
303	sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
304					    0, ~0, 1, RF_ACTIVE);
305	if (sc->sc_bar1res == NULL) {
306		device_printf(dev, "cannot map bar%d register space\n", 1);
307		goto fail_io0;
308	}
309	sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
310	sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
311	sc->sc_bar1_lastreg = (bus_size_t) -1;
312
313	hifn_set_retry(sc);
314
315	/*
316	 * Setup the area where the Hifn DMA's descriptors
317	 * and associated data structures.
318	 */
319	if (bus_dma_tag_create(NULL,			/* parent */
320			       1, 0,			/* alignment,boundary */
321			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
322			       BUS_SPACE_MAXADDR,	/* highaddr */
323			       NULL, NULL,		/* filter, filterarg */
324			       HIFN_MAX_DMALEN,		/* maxsize */
325			       MAX_SCATTER,		/* nsegments */
326			       HIFN_MAX_SEGLEN,		/* maxsegsize */
327			       BUS_DMA_ALLOCNOW,	/* flags */
328			       &sc->sc_dmat)) {
329		device_printf(dev, "cannot allocate DMA tag\n");
330		goto fail_io1;
331	}
332	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
333		device_printf(dev, "cannot create dma map\n");
334		bus_dma_tag_destroy(sc->sc_dmat);
335		goto fail_io1;
336	}
337	if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
338		device_printf(dev, "cannot alloc dma buffer\n");
339		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
340		bus_dma_tag_destroy(sc->sc_dmat);
341		goto fail_io1;
342	}
343	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
344			     sizeof (*sc->sc_dma),
345			     hifn_dmamap_cb, &sc->sc_dma_physaddr,
346			     BUS_DMA_NOWAIT)) {
347		device_printf(dev, "cannot load dma map\n");
348		bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
349		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
350		bus_dma_tag_destroy(sc->sc_dmat);
351		goto fail_io1;
352	}
353	sc->sc_dma = (struct hifn_dma *)kva;
354	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
355
356	KASSERT(sc->sc_st0 != NULL, ("hifn_attach: null bar0 tag!"));
357	KASSERT(sc->sc_sh0 != NULL, ("hifn_attach: null bar0 handle!"));
358	KASSERT(sc->sc_st1 != NULL, ("hifn_attach: null bar1 tag!"));
359	KASSERT(sc->sc_sh1 != NULL, ("hifn_attach: null bar1 handle!"));
360
361	/*
362	 * Reset the board and do the ``secret handshake''
363	 * to enable the crypto support.  Then complete the
364	 * initialization procedure by setting up the interrupt
365	 * and hooking in to the system crypto support so we'll
366	 * get used for system services like the crypto device,
367	 * IPsec, RNG device, etc.
368	 */
369	hifn_reset_board(sc, 0);
370
371	if (hifn_enable_crypto(sc) != 0) {
372		device_printf(dev, "crypto enabling failed\n");
373		goto fail_mem;
374	}
375	hifn_reset_puc(sc);
376
377	hifn_init_dma(sc);
378	hifn_init_pci_registers(sc);
379
380	if (hifn_ramtype(sc))
381		goto fail_mem;
382
383	if (sc->sc_drammodel == 0)
384		hifn_sramsize(sc);
385	else
386		hifn_dramsize(sc);
387
388	/*
389	 * Workaround for NetSec 7751 rev A: half ram size because two
390	 * of the address lines were left floating
391	 */
392	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
393	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
394	    pci_get_revid(dev) == 0x61)	/*XXX???*/
395		sc->sc_ramsize >>= 1;
396
397	/*
398	 * Arrange the interrupt line.
399	 */
400	rid = 0;
401	sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
402					0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
403	if (sc->sc_irq == NULL) {
404		device_printf(dev, "could not map interrupt\n");
405		goto fail_mem;
406	}
407	/*
408	 * NB: Network code assumes we are blocked with splimp()
409	 *     so make sure the IRQ is marked appropriately.
410	 */
411	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET,
412			   hifn_intr, sc, &sc->sc_intrhand)) {
413		device_printf(dev, "could not setup interrupt\n");
414		goto fail_intr2;
415	}
416
417	hifn_sessions(sc);
418
419	/*
420	 * NB: Keep only the low 16 bits; this masks the chip id
421	 *     from the 7951.
422	 */
423	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
424
425	rseg = sc->sc_ramsize / 1024;
426	rbase = 'K';
427	if (sc->sc_ramsize >= (1024 * 1024)) {
428		rbase = 'M';
429		rseg /= 1024;
430	}
431	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n",
432		hifn_partname(sc), rev,
433		rseg, rbase, sc->sc_drammodel ? 'd' : 's',
434		sc->sc_maxses);
435
436	sc->sc_cid = crypto_get_driverid(0);
437	if (sc->sc_cid < 0) {
438		device_printf(dev, "could not get crypto driver id\n");
439		goto fail_intr;
440	}
441
442	WRITE_REG_0(sc, HIFN_0_PUCNFG,
443	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
444	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
445
446	switch (ena) {
447	case HIFN_PUSTAT_ENA_2:
448		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
449		    hifn_newsession, hifn_freesession, hifn_process, sc);
450		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
451		    hifn_newsession, hifn_freesession, hifn_process, sc);
452		/*FALLTHROUGH*/
453	case HIFN_PUSTAT_ENA_1:
454		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
455		    hifn_newsession, hifn_freesession, hifn_process, sc);
456		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
457		    hifn_newsession, hifn_freesession, hifn_process, sc);
458		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
459		    hifn_newsession, hifn_freesession, hifn_process, sc);
460		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
461		    hifn_newsession, hifn_freesession, hifn_process, sc);
462		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
463		    hifn_newsession, hifn_freesession, hifn_process, sc);
464		break;
465	}
466
467	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
468	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
469
470	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
471		hifn_init_pubrng(sc);
472
473	callout_init(&sc->sc_tickto, 0);
474	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
475
476	return (0);
477
478fail_intr:
479	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
480fail_intr2:
481	/* XXX don't store rid */
482	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
483fail_mem:
484	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
485	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
486	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
487	bus_dma_tag_destroy(sc->sc_dmat);
488
489	/* Turn off DMA polling */
490	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
491	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
492fail_io1:
493	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
494fail_io0:
495	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
496fail_pci:
497	mtx_destroy(&sc->sc_mtx);
498	return (ENXIO);
499}
500
501/*
502 * Detach an interface that successfully probed.
503 */
504static int
505hifn_detach(device_t dev)
506{
507	struct hifn_softc *sc = device_get_softc(dev);
508
509	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
510
511	HIFN_LOCK(sc);
512
513	/*XXX other resources */
514	callout_stop(&sc->sc_tickto);
515	callout_stop(&sc->sc_rngto);
516
517	/* Turn off DMA polling */
518	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
519	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
520
521	crypto_unregister_all(sc->sc_cid);
522
523	bus_generic_detach(dev);	/*XXX should be no children, right? */
524
525	bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
526	/* XXX don't store rid */
527	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
528
529	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
530	bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
531	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
532	bus_dma_tag_destroy(sc->sc_dmat);
533
534	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
535	bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
536
537	HIFN_UNLOCK(sc);
538
539	mtx_destroy(&sc->sc_mtx);
540
541	return (0);
542}
543
544/*
545 * Stop all chip I/O so that the kernel's probe routines don't
546 * get confused by errant DMAs when rebooting.
547 */
548static void
549hifn_shutdown(device_t dev)
550{
551#ifdef notyet
552	hifn_stop(device_get_softc(dev));
553#endif
554}
555
556/*
557 * Device suspend routine.  Stop the interface and save some PCI
558 * settings in case the BIOS doesn't restore them properly on
559 * resume.
560 */
561static int
562hifn_suspend(device_t dev)
563{
564	struct hifn_softc *sc = device_get_softc(dev);
565#ifdef notyet
566	int i;
567
568	hifn_stop(sc);
569	for (i = 0; i < 5; i++)
570		sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
571	sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
572	sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
573	sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
574	sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
575#endif
576	sc->sc_suspended = 1;
577
578	return (0);
579}
580
581/*
582 * Device resume routine.  Restore some PCI settings in case the BIOS
583 * doesn't, re-enable busmastering, and restart the interface if
584 * appropriate.
585 */
586static int
587hifn_resume(device_t dev)
588{
589	struct hifn_softc *sc = device_get_softc(dev);
590#ifdef notyet
591	int i;
592
593	/* better way to do this? */
594	for (i = 0; i < 5; i++)
595		pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
596	pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
597	pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
598	pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
599	pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
600
601	/* reenable busmastering */
602	pci_enable_busmaster(dev);
603	pci_enable_io(dev, HIFN_RES);
604
605        /* reinitialize interface if necessary */
606        if (ifp->if_flags & IFF_UP)
607                rl_init(sc);
608#endif
609	sc->sc_suspended = 0;
610
611	return (0);
612}
613
614static int
615hifn_init_pubrng(struct hifn_softc *sc)
616{
617	u_int32_t r;
618	int i;
619
620	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
621		/* Reset 7951 public key/rng engine */
622		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
623		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
624
625		for (i = 0; i < 100; i++) {
626			DELAY(1000);
627			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
628			    HIFN_PUBRST_RESET) == 0)
629				break;
630		}
631
632		if (i == 100) {
633			device_printf(sc->sc_dev, "public key init failed\n");
634			return (1);
635		}
636	}
637
638	/* Enable the rng, if available */
639	if (sc->sc_flags & HIFN_HAS_RNG) {
640		if (sc->sc_flags & HIFN_IS_7811) {
641			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
642			if (r & HIFN_7811_RNGENA_ENA) {
643				r &= ~HIFN_7811_RNGENA_ENA;
644				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
645			}
646			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
647			    HIFN_7811_RNGCFG_DEFL);
648			r |= HIFN_7811_RNGENA_ENA;
649			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
650		} else
651			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
652			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
653			    HIFN_RNGCFG_ENA);
654
655		sc->sc_rngfirst = 1;
656		if (hz >= 100)
657			sc->sc_rnghz = hz / 100;
658		else
659			sc->sc_rnghz = 1;
660		callout_init(&sc->sc_rngto, 0);
661		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
662	}
663
664	/* Enable public key engine, if available */
665	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
666		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
667		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
668		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
669	}
670
671	return (0);
672}
673
674static void
675hifn_rng(void *vsc)
676{
677#define	RANDOM_BITS(n)	(n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
678	struct hifn_softc *sc = vsc;
679	u_int32_t sts, num[2];
680	int i;
681
682	if (sc->sc_flags & HIFN_IS_7811) {
683		for (i = 0; i < 5; i++) {
684			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
685			if (sts & HIFN_7811_RNGSTS_UFL) {
686				device_printf(sc->sc_dev,
687					      "RNG underflow: disabling\n");
688				return;
689			}
690			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
691				break;
692
693			/*
694			 * There are at least two words in the RNG FIFO
695			 * at this point.
696			 */
697			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
698			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
699			/* NB: discard first data read */
700			if (sc->sc_rngfirst)
701				sc->sc_rngfirst = 0;
702			else
703				random_harvest(num, RANDOM_BITS(2), RANDOM_PURE);
704		}
705	} else {
706		num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
707
708		/* NB: discard first data read */
709		if (sc->sc_rngfirst)
710			sc->sc_rngfirst = 0;
711		else
712			random_harvest(num, RANDOM_BITS(1), RANDOM_PURE);
713	}
714
715	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
716#undef RANDOM_BITS
717}
718
719static void
720hifn_puc_wait(struct hifn_softc *sc)
721{
722	int i;
723
724	for (i = 5000; i > 0; i--) {
725		DELAY(1);
726		if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
727			break;
728	}
729	if (!i)
730		device_printf(sc->sc_dev, "proc unit did not reset\n");
731}
732
733/*
734 * Reset the processing unit.
735 */
736static void
737hifn_reset_puc(struct hifn_softc *sc)
738{
739	/* Reset processing unit */
740	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
741	hifn_puc_wait(sc);
742}
743
744/*
745 * Set the Retry and TRDY registers; note that we set them to
746 * zero because the 7811 locks up when forced to retry (section
747 * 3.6 of "Specification Update SU-0014-04".  Not clear if we
748 * should do this for all Hifn parts, but it doesn't seem to hurt.
749 */
750static void
751hifn_set_retry(struct hifn_softc *sc)
752{
753	/* NB: RETRY only responds to 8-bit reads/writes */
754	pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
755	pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
756}
757
758/*
759 * Resets the board.  Values in the regesters are left as is
760 * from the reset (i.e. initial values are assigned elsewhere).
761 */
762static void
763hifn_reset_board(struct hifn_softc *sc, int full)
764{
765	u_int32_t reg;
766
767	/*
768	 * Set polling in the DMA configuration register to zero.  0x7 avoids
769	 * resetting the board and zeros out the other fields.
770	 */
771	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
772	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
773
774	/*
775	 * Now that polling has been disabled, we have to wait 1 ms
776	 * before resetting the board.
777	 */
778	DELAY(1000);
779
780	/* Reset the DMA unit */
781	if (full) {
782		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
783		DELAY(1000);
784	} else {
785		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
786		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
787		hifn_reset_puc(sc);
788	}
789
790	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
791	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
792
793	/* Bring dma unit out of reset */
794	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
795	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
796
797	hifn_puc_wait(sc);
798	hifn_set_retry(sc);
799
800	if (sc->sc_flags & HIFN_IS_7811) {
801		for (reg = 0; reg < 1000; reg++) {
802			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
803			    HIFN_MIPSRST_CRAMINIT)
804				break;
805			DELAY(1000);
806		}
807		if (reg == 1000)
808			printf(": cram init timeout\n");
809	}
810}
811
812static u_int32_t
813hifn_next_signature(u_int32_t a, u_int cnt)
814{
815	int i;
816	u_int32_t v;
817
818	for (i = 0; i < cnt; i++) {
819
820		/* get the parity */
821		v = a & 0x80080125;
822		v ^= v >> 16;
823		v ^= v >> 8;
824		v ^= v >> 4;
825		v ^= v >> 2;
826		v ^= v >> 1;
827
828		a = (v & 1) ^ (a << 1);
829	}
830
831	return a;
832}
833
834struct pci2id {
835	u_short		pci_vendor;
836	u_short		pci_prod;
837	char		card_id[13];
838};
839static struct pci2id pci2id[] = {
840	{
841		PCI_VENDOR_HIFN,
842		PCI_PRODUCT_HIFN_7951,
843		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
844		  0x00, 0x00, 0x00, 0x00, 0x00 }
845	}, {
846		PCI_VENDOR_NETSEC,
847		PCI_PRODUCT_NETSEC_7751,
848		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
849		  0x00, 0x00, 0x00, 0x00, 0x00 }
850	}, {
851		PCI_VENDOR_INVERTEX,
852		PCI_PRODUCT_INVERTEX_AEON,
853		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
854		  0x00, 0x00, 0x00, 0x00, 0x00 }
855	}, {
856		PCI_VENDOR_HIFN,
857		PCI_PRODUCT_HIFN_7811,
858		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
859		  0x00, 0x00, 0x00, 0x00, 0x00 }
860	}, {
861		/*
862		 * Other vendors share this PCI ID as well, such as
863		 * http://www.powercrypt.com, and obviously they also
864		 * use the same key.
865		 */
866		PCI_VENDOR_HIFN,
867		PCI_PRODUCT_HIFN_7751,
868		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
869		  0x00, 0x00, 0x00, 0x00, 0x00 }
870	},
871};
872
873/*
874 * Checks to see if crypto is already enabled.  If crypto isn't enable,
875 * "hifn_enable_crypto" is called to enable it.  The check is important,
876 * as enabling crypto twice will lock the board.
877 */
878static int
879hifn_enable_crypto(struct hifn_softc *sc)
880{
881	u_int32_t dmacfg, ramcfg, encl, addr, i;
882	char *offtbl = NULL;
883
884	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
885		if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
886		    pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
887			offtbl = pci2id[i].card_id;
888			break;
889		}
890	}
891	if (offtbl == NULL) {
892		device_printf(sc->sc_dev, "Unknown card!\n");
893		return (1);
894	}
895
896	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
897	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
898
899	/*
900	 * The RAM config register's encrypt level bit needs to be set before
901	 * every read performed on the encryption level register.
902	 */
903	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
904
905	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
906
907	/*
908	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
909	 * next reboot.
910	 */
911	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
912#ifdef HIFN_DEBUG
913		if (hifn_debug)
914			device_printf(sc->sc_dev,
915			    "Strong crypto already enabled!\n");
916#endif
917		goto report;
918	}
919
920	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
921#ifdef HIFN_DEBUG
922		if (hifn_debug)
923			device_printf(sc->sc_dev,
924			      "Unknown encryption level 0x%x\n", encl);
925#endif
926		return 1;
927	}
928
929	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
930	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
931	DELAY(1000);
932	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
933	DELAY(1000);
934	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
935	DELAY(1000);
936
937	for (i = 0; i <= 12; i++) {
938		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
939		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
940
941		DELAY(1000);
942	}
943
944	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
945	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
946
947#ifdef HIFN_DEBUG
948	if (hifn_debug) {
949		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
950			device_printf(sc->sc_dev, "Engine is permanently "
951				"locked until next system reset!\n");
952		else
953			device_printf(sc->sc_dev, "Engine enabled "
954				"successfully!\n");
955	}
956#endif
957
958report:
959	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
960	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
961
962	switch (encl) {
963	case HIFN_PUSTAT_ENA_1:
964	case HIFN_PUSTAT_ENA_2:
965		break;
966	case HIFN_PUSTAT_ENA_0:
967	default:
968		device_printf(sc->sc_dev, "disabled");
969		break;
970	}
971
972	return 0;
973}
974
975/*
976 * Give initial values to the registers listed in the "Register Space"
977 * section of the HIFN Software Development reference manual.
978 */
979static void
980hifn_init_pci_registers(struct hifn_softc *sc)
981{
982	/* write fixed values needed by the Initialization registers */
983	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
984	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
985	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
986
987	/* write all 4 ring address registers */
988	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
989	    offsetof(struct hifn_dma, cmdr[0]));
990	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
991	    offsetof(struct hifn_dma, srcr[0]));
992	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
993	    offsetof(struct hifn_dma, dstr[0]));
994	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
995	    offsetof(struct hifn_dma, resr[0]));
996
997	DELAY(2000);
998
999	/* write status register */
1000	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1001	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1002	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1003	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1004	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1005	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1006	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1007	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1008	    HIFN_DMACSR_S_WAIT |
1009	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1010	    HIFN_DMACSR_C_WAIT |
1011	    HIFN_DMACSR_ENGINE |
1012	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1013		HIFN_DMACSR_PUBDONE : 0) |
1014	    ((sc->sc_flags & HIFN_IS_7811) ?
1015		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1016
1017	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1018	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1019	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1020	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1021	    ((sc->sc_flags & HIFN_IS_7811) ?
1022		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1023	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1024	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1025
1026	WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1027	    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1028	    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1029	    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1030
1031	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1032	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1033	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1034	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1035	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1036}
1037
1038/*
1039 * The maximum number of sessions supported by the card
1040 * is dependent on the amount of context ram, which
1041 * encryption algorithms are enabled, and how compression
1042 * is configured.  This should be configured before this
1043 * routine is called.
1044 */
1045static void
1046hifn_sessions(struct hifn_softc *sc)
1047{
1048	u_int32_t pucnfg;
1049	int ctxsize;
1050
1051	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1052
1053	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1054		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1055			ctxsize = 128;
1056		else
1057			ctxsize = 512;
1058		sc->sc_maxses = 1 +
1059		    ((sc->sc_ramsize - 32768) / ctxsize);
1060	} else
1061		sc->sc_maxses = sc->sc_ramsize / 16384;
1062
1063	if (sc->sc_maxses > 2048)
1064		sc->sc_maxses = 2048;
1065}
1066
1067/*
1068 * Determine ram type (sram or dram).  Board should be just out of a reset
1069 * state when this is called.
1070 */
1071static int
1072hifn_ramtype(struct hifn_softc *sc)
1073{
1074	u_int8_t data[8], dataexpect[8];
1075	int i;
1076
1077	for (i = 0; i < sizeof(data); i++)
1078		data[i] = dataexpect[i] = 0x55;
1079	if (hifn_writeramaddr(sc, 0, data))
1080		return (-1);
1081	if (hifn_readramaddr(sc, 0, data))
1082		return (-1);
1083	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1084		sc->sc_drammodel = 1;
1085		return (0);
1086	}
1087
1088	for (i = 0; i < sizeof(data); i++)
1089		data[i] = dataexpect[i] = 0xaa;
1090	if (hifn_writeramaddr(sc, 0, data))
1091		return (-1);
1092	if (hifn_readramaddr(sc, 0, data))
1093		return (-1);
1094	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1095		sc->sc_drammodel = 1;
1096		return (0);
1097	}
1098
1099	return (0);
1100}
1101
1102#define	HIFN_SRAM_MAX		(32 << 20)
1103#define	HIFN_SRAM_STEP_SIZE	16384
1104#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1105
1106static int
1107hifn_sramsize(struct hifn_softc *sc)
1108{
1109	u_int32_t a;
1110	u_int8_t data[8];
1111	u_int8_t dataexpect[sizeof(data)];
1112	int32_t i;
1113
1114	for (i = 0; i < sizeof(data); i++)
1115		data[i] = dataexpect[i] = i ^ 0x5a;
1116
1117	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1118		a = i * HIFN_SRAM_STEP_SIZE;
1119		bcopy(&i, data, sizeof(i));
1120		hifn_writeramaddr(sc, a, data);
1121	}
1122
1123	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1124		a = i * HIFN_SRAM_STEP_SIZE;
1125		bcopy(&i, dataexpect, sizeof(i));
1126		if (hifn_readramaddr(sc, a, data) < 0)
1127			return (0);
1128		if (bcmp(data, dataexpect, sizeof(data)) != 0)
1129			return (0);
1130		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1131	}
1132
1133	return (0);
1134}
1135
1136/*
1137 * XXX For dram boards, one should really try all of the
1138 * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1139 * is already set up correctly.
1140 */
1141static int
1142hifn_dramsize(struct hifn_softc *sc)
1143{
1144	u_int32_t cnfg;
1145
1146	cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1147	    HIFN_PUCNFG_DRAMMASK;
1148	sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1149	return (0);
1150}
1151
1152static void
1153hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1154{
1155	struct hifn_dma *dma = sc->sc_dma;
1156
1157	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1158		dma->cmdi = 0;
1159		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1160		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1161		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1162		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1163	}
1164	*cmdp = dma->cmdi++;
1165	dma->cmdk = dma->cmdi;
1166
1167	if (dma->srci == HIFN_D_SRC_RSIZE) {
1168		dma->srci = 0;
1169		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1170		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1171		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1172		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1173	}
1174	*srcp = dma->srci++;
1175	dma->srck = dma->srci;
1176
1177	if (dma->dsti == HIFN_D_DST_RSIZE) {
1178		dma->dsti = 0;
1179		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1180		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1181		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1182		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1183	}
1184	*dstp = dma->dsti++;
1185	dma->dstk = dma->dsti;
1186
1187	if (dma->resi == HIFN_D_RES_RSIZE) {
1188		dma->resi = 0;
1189		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1190		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1191		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1192		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1193	}
1194	*resp = dma->resi++;
1195	dma->resk = dma->resi;
1196}
1197
1198static int
1199hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1200{
1201	struct hifn_dma *dma = sc->sc_dma;
1202	hifn_base_command_t wc;
1203	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1204	int r, cmdi, resi, srci, dsti;
1205
1206	wc.masks = htole16(3 << 13);
1207	wc.session_num = htole16(addr >> 14);
1208	wc.total_source_count = htole16(8);
1209	wc.total_dest_count = htole16(addr & 0x3fff);
1210
1211	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1212
1213	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1214	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1215	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1216
1217	/* build write command */
1218	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1219	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1220	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1221
1222	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1223	    + offsetof(struct hifn_dma, test_src));
1224	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1225	    + offsetof(struct hifn_dma, test_dst));
1226
1227	dma->cmdr[cmdi].l = htole32(16 | masks);
1228	dma->srcr[srci].l = htole32(8 | masks);
1229	dma->dstr[dsti].l = htole32(4 | masks);
1230	dma->resr[resi].l = htole32(4 | masks);
1231
1232	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1233	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1234
1235	for (r = 10000; r >= 0; r--) {
1236		DELAY(10);
1237		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1238		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1239		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1240			break;
1241		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1242		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1243	}
1244	if (r == 0) {
1245		device_printf(sc->sc_dev, "writeramaddr -- "
1246		    "result[%d](addr %d) still valid\n", resi, addr);
1247		r = -1;
1248		return (-1);
1249	} else
1250		r = 0;
1251
1252	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1253	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1254	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1255
1256	return (r);
1257}
1258
1259static int
1260hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1261{
1262	struct hifn_dma *dma = sc->sc_dma;
1263	hifn_base_command_t rc;
1264	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1265	int r, cmdi, srci, dsti, resi;
1266
1267	rc.masks = htole16(2 << 13);
1268	rc.session_num = htole16(addr >> 14);
1269	rc.total_source_count = htole16(addr & 0x3fff);
1270	rc.total_dest_count = htole16(8);
1271
1272	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1273
1274	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1275	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1276	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1277
1278	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1279	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1280
1281	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1282	    offsetof(struct hifn_dma, test_src));
1283	dma->test_src = 0;
1284	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
1285	    offsetof(struct hifn_dma, test_dst));
1286	dma->test_dst = 0;
1287	dma->cmdr[cmdi].l = htole32(8 | masks);
1288	dma->srcr[srci].l = htole32(8 | masks);
1289	dma->dstr[dsti].l = htole32(8 | masks);
1290	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1291
1292	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1293	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1294
1295	for (r = 10000; r >= 0; r--) {
1296		DELAY(10);
1297		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1298		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1299		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1300			break;
1301		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1302		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1303	}
1304	if (r == 0) {
1305		device_printf(sc->sc_dev, "readramaddr -- "
1306		    "result[%d](addr %d) still valid\n", resi, addr);
1307		r = -1;
1308	} else {
1309		r = 0;
1310		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1311	}
1312
1313	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1314	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1315	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1316
1317	return (r);
1318}
1319
1320/*
1321 * Initialize the descriptor rings.
1322 */
1323static void
1324hifn_init_dma(struct hifn_softc *sc)
1325{
1326	struct hifn_dma *dma = sc->sc_dma;
1327	int i;
1328
1329	hifn_set_retry(sc);
1330
1331	/* initialize static pointer values */
1332	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1333		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1334		    offsetof(struct hifn_dma, command_bufs[i][0]));
1335	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1336		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1337		    offsetof(struct hifn_dma, result_bufs[i][0]));
1338
1339	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1340	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1341	dma->srcr[HIFN_D_SRC_RSIZE].p =
1342	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1343	dma->dstr[HIFN_D_DST_RSIZE].p =
1344	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1345	dma->resr[HIFN_D_RES_RSIZE].p =
1346	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1347
1348	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1349	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1350	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1351}
1352
1353/*
1354 * Writes out the raw command buffer space.  Returns the
1355 * command buffer size.
1356 */
1357static u_int
1358hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1359{
1360#define	MIN(a,b)	((a)<(b)?(a):(b))
1361	u_int8_t *buf_pos;
1362	hifn_base_command_t *base_cmd;
1363	hifn_mac_command_t *mac_cmd;
1364	hifn_crypt_command_t *cry_cmd;
1365	int using_mac, using_crypt, len;
1366	u_int32_t dlen, slen;
1367
1368	buf_pos = buf;
1369	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1370	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1371
1372	base_cmd = (hifn_base_command_t *)buf_pos;
1373	base_cmd->masks = htole16(cmd->base_masks);
1374	slen = cmd->src_mapsize;
1375	if (cmd->sloplen)
1376		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1377	else
1378		dlen = cmd->dst_mapsize;
1379	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1380	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1381	dlen >>= 16;
1382	slen >>= 16;
1383	base_cmd->session_num = htole16(cmd->session_num |
1384	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1385	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1386	buf_pos += sizeof(hifn_base_command_t);
1387
1388	if (using_mac) {
1389		mac_cmd = (hifn_mac_command_t *)buf_pos;
1390		dlen = cmd->maccrd->crd_len;
1391		mac_cmd->source_count = htole16(dlen & 0xffff);
1392		dlen >>= 16;
1393		mac_cmd->masks = htole16(cmd->mac_masks |
1394		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1395		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1396		mac_cmd->reserved = 0;
1397		buf_pos += sizeof(hifn_mac_command_t);
1398	}
1399
1400	if (using_crypt) {
1401		cry_cmd = (hifn_crypt_command_t *)buf_pos;
1402		dlen = cmd->enccrd->crd_len;
1403		cry_cmd->source_count = htole16(dlen & 0xffff);
1404		dlen >>= 16;
1405		cry_cmd->masks = htole16(cmd->cry_masks |
1406		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1407		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1408		cry_cmd->reserved = 0;
1409		buf_pos += sizeof(hifn_crypt_command_t);
1410	}
1411
1412	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1413		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1414		buf_pos += HIFN_MAC_KEY_LENGTH;
1415	}
1416
1417	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1418		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1419		case HIFN_CRYPT_CMD_ALG_3DES:
1420			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1421			buf_pos += HIFN_3DES_KEY_LENGTH;
1422			break;
1423		case HIFN_CRYPT_CMD_ALG_DES:
1424			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1425			buf_pos += cmd->cklen;
1426			break;
1427		case HIFN_CRYPT_CMD_ALG_RC4:
1428			len = 256;
1429			do {
1430				int clen;
1431
1432				clen = MIN(cmd->cklen, len);
1433				bcopy(cmd->ck, buf_pos, clen);
1434				len -= clen;
1435				buf_pos += clen;
1436			} while (len > 0);
1437			bzero(buf_pos, 4);
1438			buf_pos += 4;
1439			break;
1440		}
1441	}
1442
1443	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1444		bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH);
1445		buf_pos += HIFN_IV_LENGTH;
1446	}
1447
1448	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1449		bzero(buf_pos, 8);
1450		buf_pos += 8;
1451	}
1452
1453	return (buf_pos - buf);
1454#undef	MIN
1455}
1456
1457static int
1458hifn_dmamap_aligned(struct hifn_operand *op)
1459{
1460	int i;
1461
1462	for (i = 0; i < op->nsegs; i++) {
1463		if (op->segs[i].ds_addr & 3)
1464			return (0);
1465		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1466			return (0);
1467	}
1468	return (1);
1469}
1470
1471static int
1472hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1473{
1474	struct hifn_dma *dma = sc->sc_dma;
1475	struct hifn_operand *dst = &cmd->dst;
1476	u_int32_t p, l;
1477	int idx, used = 0, i;
1478
1479	idx = dma->dsti;
1480	for (i = 0; i < dst->nsegs - 1; i++) {
1481		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1482		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1483		    HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1484		HIFN_DSTR_SYNC(sc, idx,
1485		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1486		used++;
1487
1488		if (++idx == HIFN_D_DST_RSIZE) {
1489			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1490			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1491			HIFN_DSTR_SYNC(sc, idx,
1492			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1493			idx = 0;
1494		}
1495	}
1496
1497	if (cmd->sloplen == 0) {
1498		p = dst->segs[i].ds_addr;
1499		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1500		    dst->segs[i].ds_len;
1501	} else {
1502		p = sc->sc_dma_physaddr +
1503		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1504		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1505		    sizeof(u_int32_t);
1506
1507		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1508			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1509			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1510			    HIFN_D_MASKDONEIRQ |
1511			    (dst->segs[i].ds_len - cmd->sloplen));
1512			HIFN_DSTR_SYNC(sc, idx,
1513			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1514			used++;
1515
1516			if (++idx == HIFN_D_DST_RSIZE) {
1517				dma->dstr[idx].l = htole32(HIFN_D_VALID |
1518				    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1519				HIFN_DSTR_SYNC(sc, idx,
1520				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1521				idx = 0;
1522			}
1523		}
1524	}
1525	dma->dstr[idx].p = htole32(p);
1526	dma->dstr[idx].l = htole32(l);
1527	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1528	used++;
1529
1530	if (++idx == HIFN_D_DST_RSIZE) {
1531		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1532		    HIFN_D_MASKDONEIRQ);
1533		HIFN_DSTR_SYNC(sc, idx,
1534		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1535		idx = 0;
1536	}
1537
1538	dma->dsti = idx;
1539	dma->dstu += used;
1540	return (idx);
1541}
1542
1543static int
1544hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1545{
1546	struct hifn_dma *dma = sc->sc_dma;
1547	struct hifn_operand *src = &cmd->src;
1548	int idx, i;
1549	u_int32_t last = 0;
1550
1551	idx = dma->srci;
1552	for (i = 0; i < src->nsegs; i++) {
1553		if (i == src->nsegs - 1)
1554			last = HIFN_D_LAST;
1555
1556		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1557		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1558		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1559		HIFN_SRCR_SYNC(sc, idx,
1560		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1561
1562		if (++idx == HIFN_D_SRC_RSIZE) {
1563			dma->srcr[idx].l = htole32(HIFN_D_VALID |
1564			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1565			HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1566			    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1567			idx = 0;
1568		}
1569	}
1570	dma->srci = idx;
1571	dma->srcu += src->nsegs;
1572	return (idx);
1573}
1574
1575static void
1576hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1577{
1578	struct hifn_operand *op = arg;
1579
1580	KASSERT(nsegs <= MAX_SCATTER,
1581		("hifn_op_cb: too many DMA segments (%u > %u) "
1582		 "returned when mapping operand", nsegs, MAX_SCATTER));
1583	op->mapsize = mapsize;
1584	op->nsegs = nsegs;
1585	bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1586}
1587
1588static int
1589hifn_crypto(
1590	struct hifn_softc *sc,
1591	struct hifn_command *cmd,
1592	struct cryptop *crp,
1593	int hint)
1594{
1595	struct	hifn_dma *dma = sc->sc_dma;
1596	u_int32_t cmdlen;
1597	int cmdi, resi, err = 0;
1598
1599	/*
1600	 * need 1 cmd, and 1 res
1601	 *
1602	 * NB: check this first since it's easy.
1603	 */
1604	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1605	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1606#ifdef HIFN_DEBUG
1607		if (hifn_debug) {
1608			device_printf(sc->sc_dev,
1609				"cmd/result exhaustion, cmdu %u resu %u\n",
1610				dma->cmdu, dma->resu);
1611		}
1612#endif
1613		hifnstats.hst_nomem_cr++;
1614		return (ERESTART);
1615	}
1616
1617	if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1618		hifnstats.hst_nomem_map++;
1619		return (ENOMEM);
1620	}
1621
1622	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1623		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1624		    cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1625			hifnstats.hst_nomem_load++;
1626			err = ENOMEM;
1627			goto err_srcmap1;
1628		}
1629	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1630		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1631		    cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1632			hifnstats.hst_nomem_load++;
1633			err = ENOMEM;
1634			goto err_srcmap1;
1635		}
1636	} else {
1637		err = EINVAL;
1638		goto err_srcmap1;
1639	}
1640
1641	if (hifn_dmamap_aligned(&cmd->src)) {
1642		cmd->sloplen = cmd->src_mapsize & 3;
1643		cmd->dst = cmd->src;
1644	} else {
1645		if (crp->crp_flags & CRYPTO_F_IOV) {
1646			err = EINVAL;
1647			goto err_srcmap;
1648		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1649			int totlen, len;
1650			struct mbuf *m, *m0, *mlast;
1651
1652			KASSERT(cmd->dst_m == cmd->src_m,
1653				("hifn_crypto: dst_m initialized improperly"));
1654			hifnstats.hst_unaligned++;
1655			/*
1656			 * Source is not aligned on a longword boundary.
1657			 * Copy the data to insure alignment.  If we fail
1658			 * to allocate mbufs or clusters while doing this
1659			 * we return ERESTART so the operation is requeued
1660			 * at the crypto later, but only if there are
1661			 * ops already posted to the hardware; otherwise we
1662			 * have no guarantee that we'll be re-entered.
1663			 */
1664			totlen = cmd->src_mapsize;
1665			if (cmd->src_m->m_flags & M_PKTHDR) {
1666				len = MHLEN;
1667				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1668			} else {
1669				len = MLEN;
1670				MGET(m0, M_DONTWAIT, MT_DATA);
1671			}
1672			if (m0 == NULL) {
1673				hifnstats.hst_nomem_mbuf++;
1674				err = dma->cmdu ? ERESTART : ENOMEM;
1675				goto err_srcmap;
1676			}
1677			if (len == MHLEN) {
1678				M_COPY_PKTHDR(m0, cmd->src_m);
1679			}
1680			if (totlen >= MINCLSIZE) {
1681				MCLGET(m0, M_DONTWAIT);
1682				if ((m0->m_flags & M_EXT) == 0) {
1683					hifnstats.hst_nomem_mcl++;
1684					err = dma->cmdu ? ERESTART : ENOMEM;
1685					m_freem(m0);
1686					goto err_srcmap;
1687				}
1688				len = MCLBYTES;
1689			}
1690			totlen -= len;
1691			m0->m_pkthdr.len = m0->m_len = len;
1692			mlast = m0;
1693
1694			while (totlen > 0) {
1695				MGET(m, M_DONTWAIT, MT_DATA);
1696				if (m == NULL) {
1697					hifnstats.hst_nomem_mbuf++;
1698					err = dma->cmdu ? ERESTART : ENOMEM;
1699					m_freem(m0);
1700					goto err_srcmap;
1701				}
1702				len = MLEN;
1703				if (totlen >= MINCLSIZE) {
1704					MCLGET(m, M_DONTWAIT);
1705					if ((m->m_flags & M_EXT) == 0) {
1706						hifnstats.hst_nomem_mcl++;
1707						err = dma->cmdu ? ERESTART : ENOMEM;
1708						mlast->m_next = m;
1709						m_freem(m0);
1710						goto err_srcmap;
1711					}
1712					len = MCLBYTES;
1713				}
1714
1715				m->m_len = len;
1716				m0->m_pkthdr.len += len;
1717				totlen -= len;
1718
1719				mlast->m_next = m;
1720				mlast = m;
1721			}
1722			cmd->dst_m = m0;
1723		}
1724	}
1725
1726	if (cmd->dst_map == NULL) {
1727		if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1728			hifnstats.hst_nomem_map++;
1729			err = ENOMEM;
1730			goto err_srcmap;
1731		}
1732		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1733			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1734			    cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1735				hifnstats.hst_nomem_map++;
1736				err = ENOMEM;
1737				goto err_dstmap1;
1738			}
1739		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1740			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1741			    cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1742				hifnstats.hst_nomem_load++;
1743				err = ENOMEM;
1744				goto err_dstmap1;
1745			}
1746		}
1747	}
1748
1749#ifdef HIFN_DEBUG
1750	if (hifn_debug) {
1751		device_printf(sc->sc_dev,
1752		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1753		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1754		    READ_REG_1(sc, HIFN_1_DMA_IER),
1755		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1756		    cmd->src_nsegs, cmd->dst_nsegs);
1757	}
1758#endif
1759
1760	if (cmd->src_map == cmd->dst_map) {
1761		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1762		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1763	} else {
1764		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1765		    BUS_DMASYNC_PREWRITE);
1766		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1767		    BUS_DMASYNC_PREREAD);
1768	}
1769
1770	/*
1771	 * need N src, and N dst
1772	 */
1773	if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1774	    (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1775#ifdef HIFN_DEBUG
1776		if (hifn_debug) {
1777			device_printf(sc->sc_dev,
1778				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1779				dma->srcu, cmd->src_nsegs,
1780				dma->dstu, cmd->dst_nsegs);
1781		}
1782#endif
1783		hifnstats.hst_nomem_sd++;
1784		err = ERESTART;
1785		goto err_dstmap;
1786	}
1787
1788	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1789		dma->cmdi = 0;
1790		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1791		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1792		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1793		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1794	}
1795	cmdi = dma->cmdi++;
1796	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1797	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1798
1799	/* .p for command/result already set */
1800	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1801	    HIFN_D_MASKDONEIRQ);
1802	HIFN_CMDR_SYNC(sc, cmdi,
1803	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1804	dma->cmdu++;
1805	if (sc->sc_c_busy == 0) {
1806		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1807		sc->sc_c_busy = 1;
1808	}
1809
1810	/*
1811	 * We don't worry about missing an interrupt (which a "command wait"
1812	 * interrupt salvages us from), unless there is more than one command
1813	 * in the queue.
1814	 */
1815	if (dma->cmdu > 1) {
1816		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1817		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1818	}
1819
1820	hifnstats.hst_ipackets++;
1821	hifnstats.hst_ibytes += cmd->src_mapsize;
1822
1823	hifn_dmamap_load_src(sc, cmd);
1824	if (sc->sc_s_busy == 0) {
1825		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1826		sc->sc_s_busy = 1;
1827	}
1828
1829	/*
1830	 * Unlike other descriptors, we don't mask done interrupt from
1831	 * result descriptor.
1832	 */
1833#ifdef HIFN_DEBUG
1834	if (hifn_debug)
1835		printf("load res\n");
1836#endif
1837	if (dma->resi == HIFN_D_RES_RSIZE) {
1838		dma->resi = 0;
1839		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1840		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1841		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1842		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1843	}
1844	resi = dma->resi++;
1845	KASSERT(dma->hifn_commands[resi] == NULL,
1846		("hifn_crypto: command slot %u busy", resi));
1847	dma->hifn_commands[resi] = cmd;
1848	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1849	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
1850		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1851		    HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1852		sc->sc_curbatch++;
1853		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
1854			hifnstats.hst_maxbatch = sc->sc_curbatch;
1855		hifnstats.hst_totbatch++;
1856	} else {
1857		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1858		    HIFN_D_VALID | HIFN_D_LAST);
1859		sc->sc_curbatch = 0;
1860	}
1861	HIFN_RESR_SYNC(sc, resi,
1862	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1863	dma->resu++;
1864	if (sc->sc_r_busy == 0) {
1865		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1866		sc->sc_r_busy = 1;
1867	}
1868
1869	if (cmd->sloplen)
1870		cmd->slopidx = resi;
1871
1872	hifn_dmamap_load_dst(sc, cmd);
1873
1874	if (sc->sc_d_busy == 0) {
1875		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1876		sc->sc_d_busy = 1;
1877	}
1878
1879#ifdef HIFN_DEBUG
1880	if (hifn_debug) {
1881		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
1882		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1883		    READ_REG_1(sc, HIFN_1_DMA_IER));
1884	}
1885#endif
1886
1887	sc->sc_active = 5;
1888	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
1889	return (err);		/* success */
1890
1891err_dstmap:
1892	if (cmd->src_map != cmd->dst_map)
1893		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1894err_dstmap1:
1895	if (cmd->src_map != cmd->dst_map)
1896		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1897err_srcmap:
1898	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1899		if (cmd->src_m != cmd->dst_m)
1900			m_freem(cmd->dst_m);
1901	}
1902	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1903err_srcmap1:
1904	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1905	return (err);
1906}
1907
1908static void
1909hifn_tick(void* vsc)
1910{
1911	struct hifn_softc *sc = vsc;
1912
1913	HIFN_LOCK(sc);
1914	if (sc->sc_active == 0) {
1915		struct hifn_dma *dma = sc->sc_dma;
1916		u_int32_t r = 0;
1917
1918		if (dma->cmdu == 0 && sc->sc_c_busy) {
1919			sc->sc_c_busy = 0;
1920			r |= HIFN_DMACSR_C_CTRL_DIS;
1921		}
1922		if (dma->srcu == 0 && sc->sc_s_busy) {
1923			sc->sc_s_busy = 0;
1924			r |= HIFN_DMACSR_S_CTRL_DIS;
1925		}
1926		if (dma->dstu == 0 && sc->sc_d_busy) {
1927			sc->sc_d_busy = 0;
1928			r |= HIFN_DMACSR_D_CTRL_DIS;
1929		}
1930		if (dma->resu == 0 && sc->sc_r_busy) {
1931			sc->sc_r_busy = 0;
1932			r |= HIFN_DMACSR_R_CTRL_DIS;
1933		}
1934		if (r)
1935			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1936	} else
1937		sc->sc_active--;
1938	HIFN_UNLOCK(sc);
1939	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1940}
1941
1942static void
1943hifn_intr(void *arg)
1944{
1945	struct hifn_softc *sc = arg;
1946	struct hifn_dma *dma;
1947	u_int32_t dmacsr, restart;
1948	int i, u;
1949
1950	HIFN_LOCK(sc);
1951	dma = sc->sc_dma;
1952
1953	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1954
1955#ifdef HIFN_DEBUG
1956	if (hifn_debug) {
1957		device_printf(sc->sc_dev,
1958		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
1959		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
1960		    dma->cmdi, dma->srci, dma->dsti, dma->resi,
1961		    dma->cmdk, dma->srck, dma->dstk, dma->resk,
1962		    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1963	}
1964#endif
1965
1966	/* Nothing in the DMA unit interrupted */
1967	if ((dmacsr & sc->sc_dmaier) == 0) {
1968		hifnstats.hst_noirq++;
1969		HIFN_UNLOCK(sc);
1970		return;
1971	}
1972
1973	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1974
1975	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1976	    (dmacsr & HIFN_DMACSR_PUBDONE))
1977		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1978		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1979
1980	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
1981	if (restart)
1982		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
1983
1984	if (sc->sc_flags & HIFN_IS_7811) {
1985		if (dmacsr & HIFN_DMACSR_ILLR)
1986			device_printf(sc->sc_dev, "illegal read\n");
1987		if (dmacsr & HIFN_DMACSR_ILLW)
1988			device_printf(sc->sc_dev, "illegal write\n");
1989	}
1990
1991	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1992	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1993	if (restart) {
1994		device_printf(sc->sc_dev, "abort, resetting.\n");
1995		hifnstats.hst_abort++;
1996		hifn_abort(sc);
1997		HIFN_UNLOCK(sc);
1998		return;
1999	}
2000
2001	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2002		/*
2003		 * If no slots to process and we receive a "waiting on
2004		 * command" interrupt, we disable the "waiting on command"
2005		 * (by clearing it).
2006		 */
2007		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2008		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2009	}
2010
2011	/* clear the rings */
2012	i = dma->resk; u = dma->resu;
2013	while (u != 0) {
2014		HIFN_RESR_SYNC(sc, i,
2015		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2016		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2017			HIFN_RESR_SYNC(sc, i,
2018			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2019			break;
2020		}
2021
2022		if (i != HIFN_D_RES_RSIZE) {
2023			struct hifn_command *cmd;
2024			u_int8_t *macbuf = NULL;
2025
2026			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2027			cmd = dma->hifn_commands[i];
2028			KASSERT(cmd != NULL,
2029				("hifn_intr: null command slot %u", i));
2030			dma->hifn_commands[i] = NULL;
2031
2032			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2033				macbuf = dma->result_bufs[i];
2034				macbuf += 12;
2035			}
2036
2037			hifn_callback(sc, cmd, macbuf);
2038			hifnstats.hst_opackets++;
2039			u--;
2040		}
2041
2042		if (++i == (HIFN_D_RES_RSIZE + 1))
2043			i = 0;
2044	}
2045	dma->resk = i; dma->resu = u;
2046
2047	i = dma->srck; u = dma->srcu;
2048	while (u != 0) {
2049		if (i == HIFN_D_SRC_RSIZE)
2050			i = 0;
2051		HIFN_SRCR_SYNC(sc, i,
2052		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2053		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2054			HIFN_SRCR_SYNC(sc, i,
2055			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2056			break;
2057		}
2058		i++, u--;
2059	}
2060	dma->srck = i; dma->srcu = u;
2061
2062	i = dma->cmdk; u = dma->cmdu;
2063	while (u != 0) {
2064		HIFN_CMDR_SYNC(sc, i,
2065		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2066		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2067			HIFN_CMDR_SYNC(sc, i,
2068			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2069			break;
2070		}
2071		if (i != HIFN_D_CMD_RSIZE) {
2072			u--;
2073			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2074		}
2075		if (++i == (HIFN_D_CMD_RSIZE + 1))
2076			i = 0;
2077	}
2078	dma->cmdk = i; dma->cmdu = u;
2079
2080	if (sc->sc_needwakeup) {		/* XXX check high watermark */
2081		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2082#ifdef HIFN_DEBUG
2083		if (hifn_debug)
2084			device_printf(sc->sc_dev,
2085				"wakeup crypto (%x) u %d/%d/%d/%d\n",
2086				sc->sc_needwakeup,
2087				dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2088#endif
2089		sc->sc_needwakeup &= ~wakeup;
2090		crypto_unblock(sc->sc_cid, wakeup);
2091	}
2092	HIFN_UNLOCK(sc);
2093}
2094
2095/*
2096 * Allocate a new 'session' and return an encoded session id.  'sidp'
2097 * contains our registration id, and should contain an encoded session
2098 * id on successful allocation.
2099 */
2100static int
2101hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2102{
2103	struct cryptoini *c;
2104	struct hifn_softc *sc = arg;
2105	int i, mac = 0, cry = 0;
2106
2107	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2108	if (sidp == NULL || cri == NULL || sc == NULL)
2109		return (EINVAL);
2110
2111	for (i = 0; i < sc->sc_maxses; i++)
2112		if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2113			break;
2114	if (i == sc->sc_maxses)
2115		return (ENOMEM);
2116
2117	for (c = cri; c != NULL; c = c->cri_next) {
2118		switch (c->cri_alg) {
2119		case CRYPTO_MD5:
2120		case CRYPTO_SHA1:
2121		case CRYPTO_MD5_HMAC:
2122		case CRYPTO_SHA1_HMAC:
2123			if (mac)
2124				return (EINVAL);
2125			mac = 1;
2126			break;
2127		case CRYPTO_DES_CBC:
2128		case CRYPTO_3DES_CBC:
2129			/* XXX this may read fewer, does it matter? */
2130			read_random(sc->sc_sessions[i].hs_iv, HIFN_IV_LENGTH);
2131			/*FALLTHROUGH*/
2132		case CRYPTO_ARC4:
2133			if (cry)
2134				return (EINVAL);
2135			cry = 1;
2136			break;
2137		default:
2138			return (EINVAL);
2139		}
2140	}
2141	if (mac == 0 && cry == 0)
2142		return (EINVAL);
2143
2144	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), i);
2145	sc->sc_sessions[i].hs_state = HS_STATE_USED;
2146
2147	return (0);
2148}
2149
2150/*
2151 * Deallocate a session.
2152 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2153 * XXX to blow away any keys already stored there.
2154 */
2155static int
2156hifn_freesession(void *arg, u_int64_t tid)
2157{
2158	struct hifn_softc *sc = arg;
2159	int session;
2160	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2161
2162	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2163	if (sc == NULL)
2164		return (EINVAL);
2165
2166	session = HIFN_SESSION(sid);
2167	if (session >= sc->sc_maxses)
2168		return (EINVAL);
2169
2170	bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2171	return (0);
2172}
2173
2174static int
2175hifn_process(void *arg, struct cryptop *crp, int hint)
2176{
2177	struct hifn_softc *sc = arg;
2178	struct hifn_command *cmd = NULL;
2179	int session, err;
2180	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2181
2182	if (crp == NULL || crp->crp_callback == NULL) {
2183		hifnstats.hst_invalid++;
2184		return (EINVAL);
2185	}
2186	session = HIFN_SESSION(crp->crp_sid);
2187
2188	if (sc == NULL || session >= sc->sc_maxses) {
2189		err = EINVAL;
2190		goto errout;
2191	}
2192
2193	cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2194	if (cmd == NULL) {
2195		hifnstats.hst_nomem++;
2196		err = ENOMEM;
2197		goto errout;
2198	}
2199
2200	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2201		cmd->src_m = (struct mbuf *)crp->crp_buf;
2202		cmd->dst_m = (struct mbuf *)crp->crp_buf;
2203	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2204		cmd->src_io = (struct uio *)crp->crp_buf;
2205		cmd->dst_io = (struct uio *)crp->crp_buf;
2206	} else {
2207		err = EINVAL;
2208		goto errout;	/* XXX we don't handle contiguous buffers! */
2209	}
2210
2211	crd1 = crp->crp_desc;
2212	if (crd1 == NULL) {
2213		err = EINVAL;
2214		goto errout;
2215	}
2216	crd2 = crd1->crd_next;
2217
2218	if (crd2 == NULL) {
2219		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2220		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2221		    crd1->crd_alg == CRYPTO_SHA1 ||
2222		    crd1->crd_alg == CRYPTO_MD5) {
2223			maccrd = crd1;
2224			enccrd = NULL;
2225		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2226		    crd1->crd_alg == CRYPTO_3DES_CBC ||
2227		    crd1->crd_alg == CRYPTO_ARC4) {
2228			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2229				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2230			maccrd = NULL;
2231			enccrd = crd1;
2232		} else {
2233			err = EINVAL;
2234			goto errout;
2235		}
2236	} else {
2237		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2238                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2239                     crd1->crd_alg == CRYPTO_MD5 ||
2240                     crd1->crd_alg == CRYPTO_SHA1) &&
2241		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2242		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2243		     crd2->crd_alg == CRYPTO_ARC4) &&
2244		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2245			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2246			maccrd = crd1;
2247			enccrd = crd2;
2248		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2249		     crd1->crd_alg == CRYPTO_ARC4 ||
2250		     crd1->crd_alg == CRYPTO_3DES_CBC) &&
2251		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2252                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2253                     crd2->crd_alg == CRYPTO_MD5 ||
2254                     crd2->crd_alg == CRYPTO_SHA1) &&
2255		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
2256			enccrd = crd1;
2257			maccrd = crd2;
2258		} else {
2259			/*
2260			 * We cannot order the 7751 as requested
2261			 */
2262			err = EINVAL;
2263			goto errout;
2264		}
2265	}
2266
2267	if (enccrd) {
2268		cmd->enccrd = enccrd;
2269		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2270		switch (enccrd->crd_alg) {
2271		case CRYPTO_ARC4:
2272			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2273			if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2274			    != sc->sc_sessions[session].hs_prev_op)
2275				sc->sc_sessions[session].hs_state =
2276				    HS_STATE_USED;
2277			break;
2278		case CRYPTO_DES_CBC:
2279			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2280			    HIFN_CRYPT_CMD_MODE_CBC |
2281			    HIFN_CRYPT_CMD_NEW_IV;
2282			break;
2283		case CRYPTO_3DES_CBC:
2284			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2285			    HIFN_CRYPT_CMD_MODE_CBC |
2286			    HIFN_CRYPT_CMD_NEW_IV;
2287			break;
2288		default:
2289			err = EINVAL;
2290			goto errout;
2291		}
2292		if (enccrd->crd_alg != CRYPTO_ARC4) {
2293			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2294				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2295					bcopy(enccrd->crd_iv, cmd->iv,
2296					    HIFN_IV_LENGTH);
2297				else
2298					bcopy(sc->sc_sessions[session].hs_iv,
2299					    cmd->iv, HIFN_IV_LENGTH);
2300
2301				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2302				    == 0) {
2303					if (crp->crp_flags & CRYPTO_F_IMBUF)
2304						m_copyback(cmd->src_m,
2305						    enccrd->crd_inject,
2306						    HIFN_IV_LENGTH, cmd->iv);
2307					else if (crp->crp_flags & CRYPTO_F_IOV)
2308						cuio_copyback(cmd->src_io,
2309						    enccrd->crd_inject,
2310						    HIFN_IV_LENGTH, cmd->iv);
2311				}
2312			} else {
2313				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2314					bcopy(enccrd->crd_iv, cmd->iv,
2315					    HIFN_IV_LENGTH);
2316				else if (crp->crp_flags & CRYPTO_F_IMBUF)
2317					m_copydata(cmd->src_m,
2318					    enccrd->crd_inject,
2319					    HIFN_IV_LENGTH, cmd->iv);
2320				else if (crp->crp_flags & CRYPTO_F_IOV)
2321					cuio_copydata(cmd->src_io,
2322					    enccrd->crd_inject,
2323					    HIFN_IV_LENGTH, cmd->iv);
2324			}
2325		}
2326
2327		cmd->ck = enccrd->crd_key;
2328		cmd->cklen = enccrd->crd_klen >> 3;
2329
2330		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2331			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2332	}
2333
2334	if (maccrd) {
2335		cmd->maccrd = maccrd;
2336		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2337
2338		switch (maccrd->crd_alg) {
2339		case CRYPTO_MD5:
2340			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2341			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2342			    HIFN_MAC_CMD_POS_IPSEC;
2343                       break;
2344		case CRYPTO_MD5_HMAC:
2345			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2346			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2347			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2348			break;
2349		case CRYPTO_SHA1:
2350			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2351			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2352			    HIFN_MAC_CMD_POS_IPSEC;
2353			break;
2354		case CRYPTO_SHA1_HMAC:
2355			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2356			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2357			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2358			break;
2359		}
2360
2361		if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2362		     maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2363		    sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2364			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2365			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2366			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2367			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2368		}
2369	}
2370
2371	cmd->crp = crp;
2372	cmd->session_num = session;
2373	cmd->softc = sc;
2374
2375	err = hifn_crypto(sc, cmd, crp, hint);
2376	if (!err) {
2377		if (enccrd)
2378			sc->sc_sessions[session].hs_prev_op =
2379				enccrd->crd_flags & CRD_F_ENCRYPT;
2380		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2381			sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2382		return 0;
2383	} else if (err == ERESTART) {
2384		/*
2385		 * There weren't enough resources to dispatch the request
2386		 * to the part.  Notify the caller so they'll requeue this
2387		 * request and resubmit it again soon.
2388		 */
2389#ifdef HIFN_DEBUG
2390		if (hifn_debug)
2391			device_printf(sc->sc_dev, "requeue request\n");
2392#endif
2393		free(cmd, M_DEVBUF);
2394		sc->sc_needwakeup |= CRYPTO_SYMQ;
2395		return (err);
2396	}
2397
2398errout:
2399	if (cmd != NULL)
2400		free(cmd, M_DEVBUF);
2401	if (err == EINVAL)
2402		hifnstats.hst_invalid++;
2403	else
2404		hifnstats.hst_nomem++;
2405	crp->crp_etype = err;
2406	crypto_done(crp);
2407	return (err);
2408}
2409
2410static void
2411hifn_abort(struct hifn_softc *sc)
2412{
2413	struct hifn_dma *dma = sc->sc_dma;
2414	struct hifn_command *cmd;
2415	struct cryptop *crp;
2416	int i, u;
2417
2418	i = dma->resk; u = dma->resu;
2419	while (u != 0) {
2420		cmd = dma->hifn_commands[i];
2421		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2422		dma->hifn_commands[i] = NULL;
2423		crp = cmd->crp;
2424
2425		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2426			/* Salvage what we can. */
2427			u_int8_t *macbuf;
2428
2429			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2430				macbuf = dma->result_bufs[i];
2431				macbuf += 12;
2432			} else
2433				macbuf = NULL;
2434			hifnstats.hst_opackets++;
2435			hifn_callback(sc, cmd, macbuf);
2436		} else {
2437			if (cmd->src_map == cmd->dst_map) {
2438				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2439				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2440			} else {
2441				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2442				    BUS_DMASYNC_POSTWRITE);
2443				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2444				    BUS_DMASYNC_POSTREAD);
2445			}
2446
2447			if (cmd->src_m != cmd->dst_m) {
2448				m_freem(cmd->src_m);
2449				crp->crp_buf = (caddr_t)cmd->dst_m;
2450			}
2451
2452			/* non-shared buffers cannot be restarted */
2453			if (cmd->src_map != cmd->dst_map) {
2454				/*
2455				 * XXX should be EAGAIN, delayed until
2456				 * after the reset.
2457				 */
2458				crp->crp_etype = ENOMEM;
2459				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2460				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2461			} else
2462				crp->crp_etype = ENOMEM;
2463
2464			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2465			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2466
2467			free(cmd, M_DEVBUF);
2468			if (crp->crp_etype != EAGAIN)
2469				crypto_done(crp);
2470		}
2471
2472		if (++i == HIFN_D_RES_RSIZE)
2473			i = 0;
2474		u--;
2475	}
2476	dma->resk = i; dma->resu = u;
2477
2478	/* Force upload of key next time */
2479	for (i = 0; i < sc->sc_maxses; i++)
2480		if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2481			sc->sc_sessions[i].hs_state = HS_STATE_USED;
2482
2483	hifn_reset_board(sc, 1);
2484	hifn_init_dma(sc);
2485	hifn_init_pci_registers(sc);
2486}
2487
2488static void
2489hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2490{
2491	struct hifn_dma *dma = sc->sc_dma;
2492	struct cryptop *crp = cmd->crp;
2493	struct cryptodesc *crd;
2494	struct mbuf *m;
2495	int totlen, i, u;
2496
2497	if (cmd->src_map == cmd->dst_map) {
2498		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2499		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2500	} else {
2501		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2502		    BUS_DMASYNC_POSTWRITE);
2503		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2504		    BUS_DMASYNC_POSTREAD);
2505	}
2506
2507	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2508		if (cmd->src_m != cmd->dst_m) {
2509			crp->crp_buf = (caddr_t)cmd->dst_m;
2510			totlen = cmd->src_mapsize;
2511			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2512				if (totlen < m->m_len) {
2513					m->m_len = totlen;
2514					totlen = 0;
2515				} else
2516					totlen -= m->m_len;
2517			}
2518			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2519			m_freem(cmd->src_m);
2520		}
2521	}
2522
2523	if (cmd->sloplen != 0) {
2524		if (crp->crp_flags & CRYPTO_F_IMBUF)
2525			m_copyback((struct mbuf *)crp->crp_buf,
2526			    cmd->src_mapsize - cmd->sloplen,
2527			    cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2528		else if (crp->crp_flags & CRYPTO_F_IOV)
2529			cuio_copyback((struct uio *)crp->crp_buf,
2530			    cmd->src_mapsize - cmd->sloplen,
2531			    cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2532	}
2533
2534	i = dma->dstk; u = dma->dstu;
2535	while (u != 0) {
2536		if (i == HIFN_D_DST_RSIZE)
2537			i = 0;
2538		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2539		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2540		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2541			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2542			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2543			break;
2544		}
2545		i++, u--;
2546	}
2547	dma->dstk = i; dma->dstu = u;
2548
2549	hifnstats.hst_obytes += cmd->dst_mapsize;
2550
2551	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2552	    HIFN_BASE_CMD_CRYPT) {
2553		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2554			if (crd->crd_alg != CRYPTO_DES_CBC &&
2555			    crd->crd_alg != CRYPTO_3DES_CBC)
2556				continue;
2557			if (crp->crp_flags & CRYPTO_F_IMBUF)
2558				m_copydata((struct mbuf *)crp->crp_buf,
2559				    crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2560				    HIFN_IV_LENGTH,
2561				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2562			else if (crp->crp_flags & CRYPTO_F_IOV) {
2563				cuio_copydata((struct uio *)crp->crp_buf,
2564				    crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2565				    HIFN_IV_LENGTH,
2566				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2567			}
2568			break;
2569		}
2570	}
2571
2572	if (macbuf != NULL) {
2573		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2574                       int len;
2575
2576                       if (crd->crd_alg == CRYPTO_MD5)
2577                               len = 16;
2578                       else if (crd->crd_alg == CRYPTO_SHA1)
2579                               len = 20;
2580                       else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2581                           crd->crd_alg == CRYPTO_SHA1_HMAC)
2582                               len = 12;
2583                       else
2584				continue;
2585
2586			if (crp->crp_flags & CRYPTO_F_IMBUF)
2587				m_copyback((struct mbuf *)crp->crp_buf,
2588                                   crd->crd_inject, len, macbuf);
2589			else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2590				bcopy((caddr_t)macbuf, crp->crp_mac, len);
2591			break;
2592		}
2593	}
2594
2595	if (cmd->src_map != cmd->dst_map) {
2596		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2597		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2598	}
2599	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2600	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2601	free(cmd, M_DEVBUF);
2602	crypto_done(crp);
2603}
2604
2605/*
2606 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2607 * and Group 1 registers; avoid conditions that could create
2608 * burst writes by doing a read in between the writes.
2609 *
2610 * NB: The read we interpose is always to the same register;
2611 *     we do this because reading from an arbitrary (e.g. last)
2612 *     register may not always work.
2613 */
2614static void
2615hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2616{
2617	if (sc->sc_flags & HIFN_IS_7811) {
2618		if (sc->sc_bar0_lastreg == reg - 4)
2619			bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2620		sc->sc_bar0_lastreg = reg;
2621	}
2622	bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2623}
2624
2625static void
2626hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2627{
2628	if (sc->sc_flags & HIFN_IS_7811) {
2629		if (sc->sc_bar1_lastreg == reg - 4)
2630			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2631		sc->sc_bar1_lastreg = reg;
2632	}
2633	bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2634}
2635