sec.c revision 194101
1/*-
2 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26/*
27 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28 * 3.0 are supported.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/sec/sec.c 194101 2009-06-13 08:57:04Z raj $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/random.h>
45#include <sys/rman.h>
46
47#include <machine/bus.h>
48#include <machine/ocpbus.h>
49#include <machine/resource.h>
50
51#include <opencrypto/cryptodev.h>
52#include "cryptodev_if.h"
53
54#include <dev/sec/sec.h>
55
56static int	sec_probe(device_t dev);
57static int	sec_attach(device_t dev);
58static int	sec_detach(device_t dev);
59static int	sec_suspend(device_t dev);
60static int	sec_resume(device_t dev);
61static int	sec_shutdown(device_t dev);
62static void	sec_primary_intr(void *arg);
63static void	sec_secondary_intr(void *arg);
64static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
65    void **ihand, int *irid, driver_intr_t handler, const char *iname);
66static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
67    void *ihand, int irid, const char *iname);
68static int	sec_controller_reset(struct sec_softc *sc);
69static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
70static int	sec_init(struct sec_softc *sc);
71static int	sec_alloc_dma_mem(struct sec_softc *sc,
72    struct sec_dma_mem *dma_mem, bus_size_t size);
73static int	sec_desc_map_dma(struct sec_softc *sc,
74    struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
75    struct sec_desc_map_info *sdmi);
76static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
77static void	sec_enqueue(struct sec_softc *sc);
78static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
79    int channel);
80static int	sec_eu_channel(struct sec_softc *sc, int eu);
81static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
82    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
83static int	sec_make_pointer_direct(struct sec_softc *sc,
84    struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
85static int	sec_alloc_session(struct sec_softc *sc);
86static int	sec_newsession(device_t dev, u_int32_t *sidp,
87    struct cryptoini *cri);
88static int	sec_freesession(device_t dev, uint64_t tid);
89static int	sec_process(device_t dev, struct cryptop *crp, int hint);
90static int	sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
91    struct cryptoini **mac);
92static int	sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
93    struct cryptodesc **mac);
94static int	sec_build_common_ns_desc(struct sec_softc *sc,
95    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
96    struct cryptodesc *enc, int buftype);
97static int	sec_build_common_s_desc(struct sec_softc *sc,
98    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99    struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
100
101static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
102static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103
104/* AESU */
105static int	sec_aesu_newsession(struct sec_softc *sc,
106    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107static int	sec_aesu_make_desc(struct sec_softc *sc,
108    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109    int buftype);
110
111/* DEU */
112static int	sec_deu_newsession(struct sec_softc *sc,
113    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114static int	sec_deu_make_desc(struct sec_softc *sc,
115    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116    int buftype);
117
118/* MDEU */
119static int	sec_mdeu_can_handle(u_int alg);
120static int	sec_mdeu_config(struct cryptodesc *crd,
121    u_int *eu, u_int *mode, u_int *hashlen);
122static int	sec_mdeu_newsession(struct sec_softc *sc,
123    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124static int	sec_mdeu_make_desc(struct sec_softc *sc,
125    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126    int buftype);
127
128static device_method_t sec_methods[] = {
129	/* Device interface */
130	DEVMETHOD(device_probe,		sec_probe),
131	DEVMETHOD(device_attach,	sec_attach),
132	DEVMETHOD(device_detach,	sec_detach),
133
134	DEVMETHOD(device_suspend,	sec_suspend),
135	DEVMETHOD(device_resume,	sec_resume),
136	DEVMETHOD(device_shutdown,	sec_shutdown),
137
138	/* Bus interface */
139	DEVMETHOD(bus_print_child,	bus_generic_print_child),
140	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
141
142	/* Crypto methods */
143	DEVMETHOD(cryptodev_newsession,	sec_newsession),
144	DEVMETHOD(cryptodev_freesession,sec_freesession),
145	DEVMETHOD(cryptodev_process,	sec_process),
146
147	{ 0, 0 }
148};
149static driver_t sec_driver = {
150	"sec",
151	sec_methods,
152	sizeof(struct sec_softc),
153};
154
155static devclass_t sec_devclass;
156DRIVER_MODULE(sec, ocpbus, sec_driver, sec_devclass, 0, 0);
157MODULE_DEPEND(sec, crypto, 1, 1, 1);
158
159static struct sec_eu_methods sec_eus[] = {
160	{
161		sec_aesu_newsession,
162		sec_aesu_make_desc,
163	},
164	{
165		sec_deu_newsession,
166		sec_deu_make_desc,
167	},
168	{
169		sec_mdeu_newsession,
170		sec_mdeu_make_desc,
171	},
172	{ NULL, NULL }
173};
174
175static inline void
176sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
177{
178
179	/* Sync only if dma memory is valid */
180	if (dma_mem->dma_vaddr != NULL)
181		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
182}
183
184static inline void
185sec_free_session(struct sec_softc *sc, struct sec_session *ses)
186{
187
188	SEC_LOCK(sc, sessions);
189	ses->ss_used = 0;
190	SEC_UNLOCK(sc, sessions);
191}
192
193static inline void *
194sec_get_pointer_data(struct sec_desc *desc, u_int n)
195{
196
197	return (desc->sd_ptr_dmem[n].dma_vaddr);
198}
199
200static int
201sec_probe(device_t dev)
202{
203	struct sec_softc *sc;
204	device_t parent;
205	uintptr_t devtype;
206	uint64_t id;
207	int error;
208
209	parent = device_get_parent(dev);
210	error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
211	if (error)
212		return (error);
213
214	if (devtype != OCPBUS_DEVTYPE_SEC)
215		return (ENXIO);
216
217	sc = device_get_softc(dev);
218
219	sc->sc_rrid = 0;
220	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
221	    0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
222
223	if (sc->sc_rres == NULL)
224		return (ENXIO);
225
226	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
227	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
228
229	id = SEC_READ(sc, SEC_ID);
230
231	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
232
233	switch (id) {
234	case SEC_20_ID:
235		device_set_desc(dev, "Freescale Security Engine 2.0");
236		sc->sc_version = 2;
237		break;
238	case SEC_30_ID:
239		device_set_desc(dev, "Freescale Security Engine 3.0");
240		sc->sc_version = 3;
241		break;
242	default:
243		device_printf(dev, "unknown SEC ID 0x%016llx!\n", id);
244		return (ENXIO);
245	}
246
247	return (0);
248}
249
250static int
251sec_attach(device_t dev)
252{
253	struct sec_softc *sc;
254	struct sec_hw_lt *lt;
255	int error = 0;
256	int i;
257
258	sc = device_get_softc(dev);
259	sc->sc_dev = dev;
260	sc->sc_blocked = 0;
261	sc->sc_shutdown = 0;
262
263	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
264	if (sc->sc_cid < 0) {
265		device_printf(dev, "could not get crypto driver ID!\n");
266		return (ENXIO);
267	}
268
269	/* Init locks */
270	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
271	    "SEC Controller lock", MTX_DEF);
272	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
273	    "SEC Descriptors lock", MTX_DEF);
274	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
275	    "SEC Sessions lock", MTX_DEF);
276
277	/* Allocate I/O memory for SEC registers */
278	sc->sc_rrid = 0;
279	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
280	    0ul, ~0ul, SEC_IO_SIZE, RF_ACTIVE);
281
282	if (sc->sc_rres == NULL) {
283		device_printf(dev, "could not allocate I/O memory!\n");
284		goto fail1;
285	}
286
287	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
288	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
289
290	/* Setup interrupts */
291	sc->sc_pri_irid = 0;
292	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
293	    &sc->sc_pri_irid, sec_primary_intr, "primary");
294
295	if (error)
296		goto fail2;
297
298	sc->sc_sec_irid = 1;
299	error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
300	    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
301
302	if (error)
303		goto fail3;
304
305	/* Alloc DMA memory for descriptors and link tables */
306	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
307	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
308
309	if (error)
310		goto fail4;
311
312	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
313	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
314
315	if (error)
316		goto fail5;
317
318	/* Fill in descriptors and link tables */
319	for (i = 0; i < SEC_DESCRIPTORS; i++) {
320		sc->sc_desc[i].sd_desc =
321		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
322		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
323		    (i * sizeof(struct sec_hw_desc));
324	}
325
326	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
327		sc->sc_lt[i].sl_lt =
328		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
329		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
330		    (i * sizeof(struct sec_hw_lt));
331	}
332
333	/* Last entry in link table is used to create a circle */
334	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
335	lt->shl_length = 0;
336	lt->shl_r = 0;
337	lt->shl_n = 1;
338	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
339
340	/* Init descriptor and link table queues pointers */
341	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
342	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
343	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
344	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
345	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
346	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
347	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
348	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
349
350	/* Create masks for fast checks */
351	sc->sc_int_error_mask = 0;
352	for (i = 0; i < SEC_CHANNELS; i++)
353		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
354
355	switch (sc->sc_version) {
356	case 2:
357		sc->sc_channel_idle_mask =
358		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
359		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
360		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
361		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
362		break;
363	case 3:
364		sc->sc_channel_idle_mask =
365		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
366		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
367		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
368		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
369		break;
370	}
371
372	/* Init hardware */
373	error = sec_init(sc);
374
375	if (error)
376		goto fail6;
377
378	/* Register in OCF (AESU) */
379	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
380
381	/* Register in OCF (DEU) */
382	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
383	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
384
385	/* Register in OCF (MDEU) */
386	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
387	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
388	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
389	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
390	crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
391	if (sc->sc_version >= 3) {
392		crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
393		crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
394	}
395
396	return (0);
397
398fail6:
399	sec_free_dma_mem(&(sc->sc_lt_dmem));
400fail5:
401	sec_free_dma_mem(&(sc->sc_desc_dmem));
402fail4:
403	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
404	    sc->sc_sec_irid, "secondary");
405fail3:
406	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
407	    sc->sc_pri_irid, "primary");
408fail2:
409	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
410fail1:
411	mtx_destroy(&sc->sc_controller_lock);
412	mtx_destroy(&sc->sc_descriptors_lock);
413	mtx_destroy(&sc->sc_sessions_lock);
414
415	return (ENXIO);
416}
417
418static int
419sec_detach(device_t dev)
420{
421	struct sec_softc *sc = device_get_softc(dev);
422	int i, error, timeout = SEC_TIMEOUT;
423
424	/* Prepare driver to shutdown */
425	SEC_LOCK(sc, descriptors);
426	sc->sc_shutdown = 1;
427	SEC_UNLOCK(sc, descriptors);
428
429	/* Wait until all queued processing finishes */
430	while (1) {
431		SEC_LOCK(sc, descriptors);
432		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
433		SEC_UNLOCK(sc, descriptors);
434
435		if (i == 0)
436			break;
437
438		if (timeout < 0) {
439			device_printf(dev, "queue flush timeout!\n");
440
441			/* DMA can be still active - stop it */
442			for (i = 0; i < SEC_CHANNELS; i++)
443				sec_channel_reset(sc, i, 1);
444
445			break;
446		}
447
448		timeout -= 1000;
449		DELAY(1000);
450	}
451
452	/* Disable interrupts */
453	SEC_WRITE(sc, SEC_IER, 0);
454
455	/* Unregister from OCF */
456	crypto_unregister_all(sc->sc_cid);
457
458	/* Free DMA memory */
459	for (i = 0; i < SEC_DESCRIPTORS; i++)
460		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
461
462	sec_free_dma_mem(&(sc->sc_lt_dmem));
463	sec_free_dma_mem(&(sc->sc_desc_dmem));
464
465	/* Release interrupts */
466	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
467	    sc->sc_pri_irid, "primary");
468	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
469	    sc->sc_sec_irid, "secondary");
470
471	/* Release memory */
472	if (sc->sc_rres) {
473		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
474		    sc->sc_rres);
475		if (error)
476			device_printf(dev, "bus_release_resource() failed for"
477			    " I/O memory, error %d\n", error);
478
479		sc->sc_rres = NULL;
480	}
481
482	mtx_destroy(&sc->sc_controller_lock);
483	mtx_destroy(&sc->sc_descriptors_lock);
484	mtx_destroy(&sc->sc_sessions_lock);
485
486	return (0);
487}
488
489static int
490sec_suspend(device_t dev)
491{
492
493	return (0);
494}
495
496static int
497sec_resume(device_t dev)
498{
499
500	return (0);
501}
502
503static int
504sec_shutdown(device_t dev)
505{
506
507	return (0);
508}
509
510static int
511sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
512    int *irid, driver_intr_t handler, const char *iname)
513{
514	int error;
515
516	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
517	    RF_ACTIVE);
518
519	if ((*ires) == NULL) {
520		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
521		return (ENXIO);
522	}
523
524	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
525	    NULL, handler, sc, ihand);
526
527	if (error) {
528		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
529		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
530			device_printf(sc->sc_dev, "could not release %s IRQ\n",
531			    iname);
532
533		(*ires) = NULL;
534		return (error);
535	}
536
537	return (0);
538}
539
540static void
541sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
542    int irid, const char *iname)
543{
544	int error;
545
546	if (ires == NULL)
547		return;
548
549	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
550	if (error)
551		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
552		    " IRQ, error %d\n", iname, error);
553
554	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
555	if (error)
556		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
557		    " IRQ, error %d\n", iname, error);
558}
559
560static void
561sec_primary_intr(void *arg)
562{
563	struct sec_softc *sc = arg;
564	struct sec_desc *desc;
565	uint64_t isr;
566	int i, wakeup = 0;
567
568	SEC_LOCK(sc, controller);
569
570	/* Check for errors */
571	isr = SEC_READ(sc, SEC_ISR);
572	if (isr & sc->sc_int_error_mask) {
573		/* Check each channel for error */
574		for (i = 0; i < SEC_CHANNELS; i++) {
575			if ((isr & SEC_INT_CH_ERR(i)) == 0)
576				continue;
577
578			device_printf(sc->sc_dev,
579			    "I/O error on channel %i!\n", i);
580
581			/* Find and mark problematic descriptor */
582			desc = sec_find_desc(sc, SEC_READ(sc,
583			    SEC_CHAN_CDPR(i)));
584
585			if (desc != NULL)
586				desc->sd_error = EIO;
587
588			/* Do partial channel reset */
589			sec_channel_reset(sc, i, 0);
590		}
591	}
592
593	/* ACK interrupt */
594	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
595
596	SEC_UNLOCK(sc, controller);
597	SEC_LOCK(sc, descriptors);
598
599	/* Handle processed descriptors */
600	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
601
602	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
603		desc = SEC_GET_QUEUED_DESC(sc);
604
605		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
606			SEC_PUT_BACK_QUEUED_DESC(sc);
607			break;
608		}
609
610		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
611		    BUS_DMASYNC_PREWRITE);
612
613		desc->sd_crp->crp_etype = desc->sd_error;
614		crypto_done(desc->sd_crp);
615
616		SEC_DESC_FREE_POINTERS(desc);
617		SEC_DESC_FREE_LT(sc, desc);
618		SEC_DESC_QUEUED2FREE(sc);
619	}
620
621	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
622
623	if (!sc->sc_shutdown) {
624		wakeup = sc->sc_blocked;
625		sc->sc_blocked = 0;
626	}
627
628	SEC_UNLOCK(sc, descriptors);
629
630	/* Enqueue ready descriptors in hardware */
631	sec_enqueue(sc);
632
633	if (wakeup)
634		crypto_unblock(sc->sc_cid, wakeup);
635}
636
637static void
638sec_secondary_intr(void *arg)
639{
640	struct sec_softc *sc = arg;
641
642	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
643	sec_primary_intr(arg);
644}
645
646static int
647sec_controller_reset(struct sec_softc *sc)
648{
649	int timeout = SEC_TIMEOUT;
650
651	/* Reset Controller */
652	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
653
654	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
655		DELAY(1000);
656		timeout -= 1000;
657
658		if (timeout < 0) {
659			device_printf(sc->sc_dev, "timeout while waiting for "
660			    "device reset!\n");
661			return (ETIMEDOUT);
662		}
663	}
664
665	return (0);
666}
667
668static int
669sec_channel_reset(struct sec_softc *sc, int channel, int full)
670{
671	int timeout = SEC_TIMEOUT;
672	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
673	uint64_t reg;
674
675	/* Reset Channel */
676	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
677	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
678
679	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
680		DELAY(1000);
681		timeout -= 1000;
682
683		if (timeout < 0) {
684			device_printf(sc->sc_dev, "timeout while waiting for "
685			    "channel reset!\n");
686			return (ETIMEDOUT);
687		}
688	}
689
690	if (full) {
691		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
692
693		switch(sc->sc_version) {
694		case 2:
695			reg |= SEC_CHAN_CCR_CDWE;
696			break;
697		case 3:
698			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
699			break;
700		}
701
702		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
703	}
704
705	return (0);
706}
707
708static int
709sec_init(struct sec_softc *sc)
710{
711	uint64_t reg;
712	int error, i;
713
714	/* Reset controller twice to clear all pending interrupts */
715	error = sec_controller_reset(sc);
716	if (error)
717		return (error);
718
719	error = sec_controller_reset(sc);
720	if (error)
721		return (error);
722
723	/* Reset channels */
724	for (i = 0; i < SEC_CHANNELS; i++) {
725		error = sec_channel_reset(sc, i, 1);
726		if (error)
727			return (error);
728	}
729
730	/* Enable Interrupts */
731	reg = SEC_INT_ITO;
732	for (i = 0; i < SEC_CHANNELS; i++)
733		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
734
735	SEC_WRITE(sc, SEC_IER, reg);
736
737	return (error);
738}
739
740static void
741sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
742{
743	struct sec_dma_mem *dma_mem = arg;
744
745	if (error)
746		return;
747
748	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
749	dma_mem->dma_paddr = segs->ds_addr;
750}
751
752static void
753sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
754    int error)
755{
756	struct sec_desc_map_info *sdmi = arg;
757	struct sec_softc *sc = sdmi->sdmi_sc;
758	struct sec_lt *lt = NULL;
759	bus_addr_t addr;
760	bus_size_t size;
761	int i;
762
763	SEC_LOCK_ASSERT(sc, descriptors);
764
765	if (error)
766		return;
767
768	for (i = 0; i < nseg; i++) {
769		addr = segs[i].ds_addr;
770		size = segs[i].ds_len;
771
772		/* Skip requested offset */
773		if (sdmi->sdmi_offset >= size) {
774			sdmi->sdmi_offset -= size;
775			continue;
776		}
777
778		addr += sdmi->sdmi_offset;
779		size -= sdmi->sdmi_offset;
780		sdmi->sdmi_offset = 0;
781
782		/* Do not link more than requested */
783		if (sdmi->sdmi_size < size)
784			size = sdmi->sdmi_size;
785
786		lt = SEC_ALLOC_LT_ENTRY(sc);
787		lt->sl_lt->shl_length = size;
788		lt->sl_lt->shl_r = 0;
789		lt->sl_lt->shl_n = 0;
790		lt->sl_lt->shl_ptr = addr;
791
792		if (sdmi->sdmi_lt_first == NULL)
793			sdmi->sdmi_lt_first = lt;
794
795		sdmi->sdmi_lt_used += 1;
796
797		if ((sdmi->sdmi_size -= size) == 0)
798			break;
799	}
800
801	sdmi->sdmi_lt_last = lt;
802}
803
804static void
805sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
806    bus_size_t size, int error)
807{
808
809	sec_dma_map_desc_cb(arg, segs, nseg, error);
810}
811
812static int
813sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
814    bus_size_t size)
815{
816	int error;
817
818	if (dma_mem->dma_vaddr != NULL)
819		return (EBUSY);
820
821	error = bus_dma_tag_create(NULL,	/* parent */
822		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
823		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
824		BUS_SPACE_MAXADDR,		/* highaddr */
825		NULL, NULL,			/* filtfunc, filtfuncarg */
826		size, 1,			/* maxsize, nsegments */
827		size, 0,			/* maxsegsz, flags */
828		NULL, NULL,			/* lockfunc, lockfuncarg */
829		&(dma_mem->dma_tag));		/* dmat */
830
831	if (error) {
832		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
833		    " %i!\n", error);
834		goto err1;
835	}
836
837	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
838	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
839
840	if (error) {
841		device_printf(sc->sc_dev, "failed to allocate DMA safe"
842		    " memory, error %i!\n", error);
843		goto err2;
844	}
845
846	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
847		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
848		    BUS_DMA_NOWAIT);
849
850	if (error) {
851		device_printf(sc->sc_dev, "cannot get address of the DMA"
852		    " memory, error %i\n", error);
853		goto err3;
854	}
855
856	dma_mem->dma_is_map = 0;
857	return (0);
858
859err3:
860	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
861err2:
862	bus_dma_tag_destroy(dma_mem->dma_tag);
863err1:
864	dma_mem->dma_vaddr = NULL;
865	return(error);
866}
867
868static int
869sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
870    bus_size_t size, int type, struct sec_desc_map_info *sdmi)
871{
872	int error;
873
874	if (dma_mem->dma_vaddr != NULL)
875		return (EBUSY);
876
877	switch (type) {
878	case SEC_MEMORY:
879		break;
880	case SEC_UIO:
881		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
882		break;
883	case SEC_MBUF:
884		size = m_length((struct mbuf*)mem, NULL);
885		break;
886	default:
887		return (EINVAL);
888	}
889
890	error = bus_dma_tag_create(NULL,	/* parent */
891		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
892		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
893		BUS_SPACE_MAXADDR,		/* highaddr */
894		NULL, NULL,			/* filtfunc, filtfuncarg */
895		size,				/* maxsize */
896		SEC_FREE_LT_CNT(sc),		/* nsegments */
897		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
898		NULL, NULL,			/* lockfunc, lockfuncarg */
899		&(dma_mem->dma_tag));		/* dmat */
900
901	if (error) {
902		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
903		    " %i!\n", error);
904		dma_mem->dma_vaddr = NULL;
905		return (error);
906	}
907
908	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
909
910	if (error) {
911		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
912		    "\n", error);
913		bus_dma_tag_destroy(dma_mem->dma_tag);
914		return (error);
915	}
916
917	switch (type) {
918	case SEC_MEMORY:
919		error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
920		    mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
921		break;
922	case SEC_UIO:
923		error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
924		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
925		break;
926	case SEC_MBUF:
927		error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
928		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
929		break;
930	}
931
932	if (error) {
933		device_printf(sc->sc_dev, "cannot get address of the DMA"
934		    " memory, error %i!\n", error);
935		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
936		bus_dma_tag_destroy(dma_mem->dma_tag);
937		return (error);
938	}
939
940	dma_mem->dma_is_map = 1;
941	dma_mem->dma_vaddr = mem;
942
943	return (0);
944}
945
946static void
947sec_free_dma_mem(struct sec_dma_mem *dma_mem)
948{
949
950	/* Check for double free */
951	if (dma_mem->dma_vaddr == NULL)
952		return;
953
954	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
955
956	if (dma_mem->dma_is_map)
957		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
958	else
959		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
960		    dma_mem->dma_map);
961
962	bus_dma_tag_destroy(dma_mem->dma_tag);
963	dma_mem->dma_vaddr = NULL;
964}
965
966static int
967sec_eu_channel(struct sec_softc *sc, int eu)
968{
969	uint64_t reg;
970	int channel = 0;
971
972	SEC_LOCK_ASSERT(sc, controller);
973
974	reg = SEC_READ(sc, SEC_EUASR);
975
976	switch (eu) {
977	case SEC_EU_AFEU:
978		channel = SEC_EUASR_AFEU(reg);
979		break;
980	case SEC_EU_DEU:
981		channel = SEC_EUASR_DEU(reg);
982		break;
983	case SEC_EU_MDEU_A:
984	case SEC_EU_MDEU_B:
985		channel = SEC_EUASR_MDEU(reg);
986		break;
987	case SEC_EU_RNGU:
988		channel = SEC_EUASR_RNGU(reg);
989		break;
990	case SEC_EU_PKEU:
991		channel = SEC_EUASR_PKEU(reg);
992		break;
993	case SEC_EU_AESU:
994		channel = SEC_EUASR_AESU(reg);
995		break;
996	case SEC_EU_KEU:
997		channel = SEC_EUASR_KEU(reg);
998		break;
999	case SEC_EU_CRCU:
1000		channel = SEC_EUASR_CRCU(reg);
1001		break;
1002	}
1003
1004	return (channel - 1);
1005}
1006
1007static int
1008sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1009{
1010	u_int fflvl = SEC_MAX_FIFO_LEVEL;
1011	uint64_t reg;
1012	int i;
1013
1014	SEC_LOCK_ASSERT(sc, controller);
1015
1016	/* Find free channel if have not got one */
1017	if (channel < 0) {
1018		for (i = 0; i < SEC_CHANNELS; i++) {
1019			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1020
1021			if ((reg & sc->sc_channel_idle_mask) == 0) {
1022				channel = i;
1023				break;
1024			}
1025		}
1026	}
1027
1028	/* There is no free channel */
1029	if (channel < 0)
1030		return (-1);
1031
1032	/* Check FIFO level on selected channel */
1033	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1034
1035	switch(sc->sc_version) {
1036	case 2:
1037		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1038		break;
1039	case 3:
1040		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1041		break;
1042	}
1043
1044	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1045		return (-1);
1046
1047	/* Enqueue descriptor in channel */
1048	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1049
1050	return (channel);
1051}
1052
1053static void
1054sec_enqueue(struct sec_softc *sc)
1055{
1056	struct sec_desc *desc;
1057	int ch0, ch1;
1058
1059	SEC_LOCK(sc, descriptors);
1060	SEC_LOCK(sc, controller);
1061
1062	while (SEC_READY_DESC_CNT(sc) > 0) {
1063		desc = SEC_GET_READY_DESC(sc);
1064
1065		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1066		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1067
1068		/*
1069		 * Both EU are used by the same channel.
1070		 * Enqueue descriptor in channel used by busy EUs.
1071		 */
1072		if (ch0 >= 0 && ch0 == ch1) {
1073			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1074				SEC_DESC_READY2QUEUED(sc);
1075				continue;
1076			}
1077		}
1078
1079		/*
1080		 * Only one EU is free.
1081		 * Enqueue descriptor in channel used by busy EU.
1082		 */
1083		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1084			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1085			    >= 0) {
1086				SEC_DESC_READY2QUEUED(sc);
1087				continue;
1088			}
1089		}
1090
1091		/*
1092		 * Both EU are free.
1093		 * Enqueue descriptor in first free channel.
1094		 */
1095		if (ch0 < 0 && ch1 < 0) {
1096			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1097				SEC_DESC_READY2QUEUED(sc);
1098				continue;
1099			}
1100		}
1101
1102		/* Current descriptor can not be queued at the moment */
1103		SEC_PUT_BACK_READY_DESC(sc);
1104		break;
1105	}
1106
1107	SEC_UNLOCK(sc, controller);
1108	SEC_UNLOCK(sc, descriptors);
1109}
1110
1111static struct sec_desc *
1112sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1113{
1114	struct sec_desc *desc = NULL;
1115	int i;
1116
1117	SEC_LOCK_ASSERT(sc, descriptors);
1118
1119	for (i = 0; i < SEC_CHANNELS; i++) {
1120		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1121			desc = &(sc->sc_desc[i]);
1122			break;
1123		}
1124	}
1125
1126	return (desc);
1127}
1128
1129static int
1130sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1131    bus_addr_t data, bus_size_t dsize)
1132{
1133	struct sec_hw_desc_ptr *ptr;
1134
1135	SEC_LOCK_ASSERT(sc, descriptors);
1136
1137	ptr = &(desc->sd_desc->shd_pointer[n]);
1138	ptr->shdp_length = dsize;
1139	ptr->shdp_extent = 0;
1140	ptr->shdp_j = 0;
1141	ptr->shdp_ptr = data;
1142
1143	return (0);
1144}
1145
1146static int
1147sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1148    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1149{
1150	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1151	struct sec_hw_desc_ptr *ptr;
1152	int error;
1153
1154	SEC_LOCK_ASSERT(sc, descriptors);
1155
1156	/* For flat memory map only requested region */
1157	if (dtype == SEC_MEMORY) {
1158		 data = (uint8_t*)(data) + doffset;
1159		 sdmi.sdmi_offset = 0;
1160	}
1161
1162	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1163	    dtype, &sdmi);
1164
1165	if (error)
1166		return (error);
1167
1168	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1169	desc->sd_lt_used += sdmi.sdmi_lt_used;
1170
1171	ptr = &(desc->sd_desc->shd_pointer[n]);
1172	ptr->shdp_length = dsize;
1173	ptr->shdp_extent = 0;
1174	ptr->shdp_j = 1;
1175	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1176
1177	return (0);
1178}
1179
1180static int
1181sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1182    struct cryptoini **mac)
1183{
1184	struct cryptoini *e, *m;
1185
1186	e = cri;
1187	m = cri->cri_next;
1188
1189	/* We can haldle only two operations */
1190	if (m && m->cri_next)
1191		return (EINVAL);
1192
1193	if (sec_mdeu_can_handle(e->cri_alg)) {
1194		cri = m;
1195		m = e;
1196		e = cri;
1197	}
1198
1199	if (m && !sec_mdeu_can_handle(m->cri_alg))
1200		return (EINVAL);
1201
1202	*enc = e;
1203	*mac = m;
1204
1205	return (0);
1206}
1207
1208static int
1209sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1210    struct cryptodesc **mac)
1211{
1212	struct cryptodesc *e, *m, *t;
1213
1214	e = crp->crp_desc;
1215	m = e->crd_next;
1216
1217	/* We can haldle only two operations */
1218	if (m && m->crd_next)
1219		return (EINVAL);
1220
1221	if (sec_mdeu_can_handle(e->crd_alg)) {
1222		t = m;
1223		m = e;
1224		e = t;
1225	}
1226
1227	if (m && !sec_mdeu_can_handle(m->crd_alg))
1228		return (EINVAL);
1229
1230	*enc = e;
1231	*mac = m;
1232
1233	return (0);
1234}
1235
1236static int
1237sec_alloc_session(struct sec_softc *sc)
1238{
1239	struct sec_session *ses = NULL;
1240	int sid = -1;
1241	u_int i;
1242
1243	SEC_LOCK(sc, sessions);
1244
1245	for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1246		if (sc->sc_sessions[i].ss_used == 0) {
1247			ses = &(sc->sc_sessions[i]);
1248			ses->ss_used = 1;
1249			ses->ss_ivlen = 0;
1250			ses->ss_klen = 0;
1251			ses->ss_mklen = 0;
1252			sid = i;
1253			break;
1254		}
1255	}
1256
1257	SEC_UNLOCK(sc, sessions);
1258
1259	return (sid);
1260}
1261
1262static struct sec_session *
1263sec_get_session(struct sec_softc *sc, u_int sid)
1264{
1265	struct sec_session *ses;
1266
1267	if (sid >= SEC_MAX_SESSIONS)
1268		return (NULL);
1269
1270	SEC_LOCK(sc, sessions);
1271
1272	ses = &(sc->sc_sessions[sid]);
1273
1274	if (ses->ss_used == 0)
1275		ses = NULL;
1276
1277	SEC_UNLOCK(sc, sessions);
1278
1279	return (ses);
1280}
1281
1282static int
1283sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1284{
1285	struct sec_softc *sc = device_get_softc(dev);
1286	struct sec_eu_methods *eu = sec_eus;
1287	struct cryptoini *enc = NULL;
1288	struct cryptoini *mac = NULL;
1289	struct sec_session *ses;
1290	int error = -1;
1291	int sid;
1292
1293	error = sec_split_cri(cri, &enc, &mac);
1294	if (error)
1295		return (error);
1296
1297	/* Check key lengths */
1298	if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1299		return (E2BIG);
1300
1301	if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1302		return (E2BIG);
1303
1304	/* Only SEC 3.0 supports digests larger than 256 bits */
1305	if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1306		return (E2BIG);
1307
1308	sid = sec_alloc_session(sc);
1309	if (sid < 0)
1310		return (ENOMEM);
1311
1312	ses = sec_get_session(sc, sid);
1313
1314	/* Find EU for this session */
1315	while (eu->sem_make_desc != NULL) {
1316		error = eu->sem_newsession(sc, ses, enc, mac);
1317		if (error >= 0)
1318			break;
1319
1320		eu++;
1321	}
1322
1323	/* If not found, return EINVAL */
1324	if (error < 0) {
1325		sec_free_session(sc, ses);
1326		return (EINVAL);
1327	}
1328
1329	/* Save cipher key */
1330	if (enc && enc->cri_key) {
1331		ses->ss_klen = enc->cri_klen / 8;
1332		memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1333	}
1334
1335	/* Save digest key */
1336	if (mac && mac->cri_key) {
1337		ses->ss_mklen = mac->cri_klen / 8;
1338		memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1339	}
1340
1341	ses->ss_eu = eu;
1342	*sidp = sid;
1343
1344	return (0);
1345}
1346
1347static int
1348sec_freesession(device_t dev, uint64_t tid)
1349{
1350	struct sec_softc *sc = device_get_softc(dev);
1351	struct sec_session *ses;
1352	int error = 0;
1353
1354	ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1355	if (ses == NULL)
1356		return (EINVAL);
1357
1358	sec_free_session(sc, ses);
1359
1360	return (error);
1361}
1362
1363static int
1364sec_process(device_t dev, struct cryptop *crp, int hint)
1365{
1366	struct sec_softc *sc = device_get_softc(dev);
1367	struct sec_desc *desc = NULL;
1368	struct cryptodesc *mac, *enc;
1369	struct sec_session *ses;
1370	int buftype, error = 0;
1371
1372	/* Check Session ID */
1373	ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1374	if (ses == NULL) {
1375		crp->crp_etype = EINVAL;
1376		crypto_done(crp);
1377		return (0);
1378	}
1379
1380	/* Check for input length */
1381	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1382		crp->crp_etype = E2BIG;
1383		crypto_done(crp);
1384		return (0);
1385	}
1386
1387	/* Get descriptors */
1388	if (sec_split_crp(crp, &enc, &mac)) {
1389		crp->crp_etype = EINVAL;
1390		crypto_done(crp);
1391		return (0);
1392	}
1393
1394	SEC_LOCK(sc, descriptors);
1395	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396
1397	/* Block driver if there is no free descriptors or we are going down */
1398	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1399		sc->sc_blocked |= CRYPTO_SYMQ;
1400		SEC_UNLOCK(sc, descriptors);
1401		return (ERESTART);
1402	}
1403
1404	/* Prepare descriptor */
1405	desc = SEC_GET_FREE_DESC(sc);
1406	desc->sd_lt_used = 0;
1407	desc->sd_error = 0;
1408	desc->sd_crp = crp;
1409
1410	if (crp->crp_flags & CRYPTO_F_IOV)
1411		buftype = SEC_UIO;
1412	else if (crp->crp_flags & CRYPTO_F_IMBUF)
1413		buftype = SEC_MBUF;
1414	else
1415		buftype = SEC_MEMORY;
1416
1417	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1418		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1419			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1420			    ses->ss_ivlen);
1421		else
1422			arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1423
1424		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1425			crypto_copyback(crp->crp_flags, crp->crp_buf,
1426			    enc->crd_inject, ses->ss_ivlen,
1427			    desc->sd_desc->shd_iv);
1428	} else if (enc) {
1429		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1430			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1431			    ses->ss_ivlen);
1432		else
1433			crypto_copydata(crp->crp_flags, crp->crp_buf,
1434			    enc->crd_inject, ses->ss_ivlen,
1435			    desc->sd_desc->shd_iv);
1436	}
1437
1438	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1439		if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1440			ses->ss_klen = enc->crd_klen / 8;
1441			memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1442		} else
1443			error = E2BIG;
1444	}
1445
1446	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1447		if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1448			ses->ss_mklen = mac->crd_klen / 8;
1449			memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1450		} else
1451			error = E2BIG;
1452	}
1453
1454	if (!error) {
1455		memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1456		memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1457
1458		error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1459	}
1460
1461	if (error) {
1462		SEC_DESC_FREE_POINTERS(desc);
1463		SEC_DESC_PUT_BACK_LT(sc, desc);
1464		SEC_PUT_BACK_FREE_DESC(sc);
1465		SEC_UNLOCK(sc, descriptors);
1466		crp->crp_etype = error;
1467		crypto_done(crp);
1468		return (0);
1469	}
1470
1471	/*
1472	 * Skip DONE interrupt if this is not last request in burst, but only
1473	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1474	 * signaling on each descriptor.
1475	 */
1476	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1477		desc->sd_desc->shd_dn = 0;
1478	else
1479		desc->sd_desc->shd_dn = 1;
1480
1481	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1482	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1483	    BUS_DMASYNC_POSTWRITE);
1484	SEC_DESC_FREE2READY(sc);
1485	SEC_UNLOCK(sc, descriptors);
1486
1487	/* Enqueue ready descriptors in hardware */
1488	sec_enqueue(sc);
1489
1490	return (0);
1491}
1492
1493static int
1494sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1495    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1496    int buftype)
1497{
1498	struct sec_hw_desc *hd = desc->sd_desc;
1499	int error;
1500
1501	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1502	hd->shd_eu_sel1 = SEC_EU_NONE;
1503	hd->shd_mode1 = 0;
1504
1505	/* Pointer 0: NULL */
1506	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1507	if (error)
1508		return (error);
1509
1510	/* Pointer 1: IV IN */
1511	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1512	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1513	if (error)
1514		return (error);
1515
1516	/* Pointer 2: Cipher Key */
1517	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1518	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1519 	if (error)
1520		return (error);
1521
1522	/* Pointer 3: Data IN */
1523	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1524	    enc->crd_len, buftype);
1525	if (error)
1526		return (error);
1527
1528	/* Pointer 4: Data OUT */
1529	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1530	    enc->crd_len, buftype);
1531	if (error)
1532		return (error);
1533
1534	/* Pointer 5: IV OUT (Not used: NULL) */
1535	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1536	if (error)
1537		return (error);
1538
1539	/* Pointer 6: NULL */
1540	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1541
1542	return (error);
1543}
1544
1545static int
1546sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1547    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1548    struct cryptodesc *mac, int buftype)
1549{
1550	struct sec_hw_desc *hd = desc->sd_desc;
1551	u_int eu, mode, hashlen;
1552	int error;
1553
1554	if (mac->crd_len < enc->crd_len)
1555		return (EINVAL);
1556
1557	if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1558		return (EINVAL);
1559
1560	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1561	if (error)
1562		return (error);
1563
1564	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1565	hd->shd_eu_sel1 = eu;
1566	hd->shd_mode1 = mode;
1567
1568	/* Pointer 0: HMAC Key */
1569	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1570	    offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1571	if (error)
1572		return (error);
1573
1574	/* Pointer 1: HMAC-Only Data IN */
1575	error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1576	    mac->crd_len - enc->crd_len, buftype);
1577	if (error)
1578		return (error);
1579
1580	/* Pointer 2: Cipher Key */
1581	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1582	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1583 	if (error)
1584		return (error);
1585
1586	/* Pointer 3: IV IN */
1587	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1588	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1589	if (error)
1590		return (error);
1591
1592	/* Pointer 4: Data IN */
1593	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1594	    enc->crd_len, buftype);
1595	if (error)
1596		return (error);
1597
1598	/* Pointer 5: Data OUT */
1599	error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1600	    enc->crd_len, buftype);
1601	if (error)
1602		return (error);
1603
1604	/* Pointer 6: HMAC OUT */
1605	error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1606	    hashlen, buftype);
1607
1608	return (error);
1609}
1610
1611/* AESU */
1612
1613static int
1614sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1615    struct cryptoini *enc, struct cryptoini *mac)
1616{
1617
1618	if (enc == NULL)
1619		return (-1);
1620
1621	if (enc->cri_alg != CRYPTO_AES_CBC)
1622		return (-1);
1623
1624	ses->ss_ivlen = AES_BLOCK_LEN;
1625
1626	return (0);
1627}
1628
1629static int
1630sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1631    struct sec_desc *desc, struct cryptop *crp, int buftype)
1632{
1633	struct sec_hw_desc *hd = desc->sd_desc;
1634	struct cryptodesc *enc, *mac;
1635	int error;
1636
1637	error = sec_split_crp(crp, &enc, &mac);
1638	if (error)
1639		return (error);
1640
1641	if (!enc)
1642		return (EINVAL);
1643
1644	hd->shd_eu_sel0 = SEC_EU_AESU;
1645	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1646
1647	if (enc->crd_alg != CRYPTO_AES_CBC)
1648		return (EINVAL);
1649
1650	if (enc->crd_flags & CRD_F_ENCRYPT) {
1651		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1652		hd->shd_dir = 0;
1653	} else
1654		hd->shd_dir = 1;
1655
1656	if (mac)
1657		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1658		    buftype);
1659	else
1660		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1661		    buftype);
1662
1663	return (error);
1664}
1665
1666/* DEU */
1667
1668static int
1669sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1670    struct cryptoini *enc, struct cryptoini *mac)
1671{
1672
1673	if (enc == NULL)
1674		return (-1);
1675
1676	switch (enc->cri_alg) {
1677	case CRYPTO_DES_CBC:
1678	case CRYPTO_3DES_CBC:
1679		break;
1680	default:
1681		return (-1);
1682	}
1683
1684	ses->ss_ivlen = DES_BLOCK_LEN;
1685
1686	return (0);
1687}
1688
1689static int
1690sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1691    struct sec_desc *desc, struct cryptop *crp, int buftype)
1692{
1693	struct sec_hw_desc *hd = desc->sd_desc;
1694	struct cryptodesc *enc, *mac;
1695	int error;
1696
1697	error = sec_split_crp(crp, &enc, &mac);
1698	if (error)
1699		return (error);
1700
1701	if (!enc)
1702		return (EINVAL);
1703
1704	hd->shd_eu_sel0 = SEC_EU_DEU;
1705	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1706
1707	switch (enc->crd_alg) {
1708	case CRYPTO_3DES_CBC:
1709		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1710		break;
1711	case CRYPTO_DES_CBC:
1712		break;
1713	default:
1714		return (EINVAL);
1715	}
1716
1717	if (enc->crd_flags & CRD_F_ENCRYPT) {
1718		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1719		hd->shd_dir = 0;
1720	} else
1721		hd->shd_dir = 1;
1722
1723	if (mac)
1724		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1725		    buftype);
1726	else
1727		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1728		    buftype);
1729
1730	return (error);
1731}
1732
1733/* MDEU */
1734
1735static int
1736sec_mdeu_can_handle(u_int alg)
1737{
1738	switch (alg) {
1739	case CRYPTO_MD5:
1740	case CRYPTO_SHA1:
1741	case CRYPTO_MD5_HMAC:
1742	case CRYPTO_SHA1_HMAC:
1743	case CRYPTO_SHA2_256_HMAC:
1744	case CRYPTO_SHA2_384_HMAC:
1745	case CRYPTO_SHA2_512_HMAC:
1746		return (1);
1747	default:
1748		return (0);
1749	}
1750}
1751
1752static int
1753sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1754{
1755
1756	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1757	*eu = SEC_EU_NONE;
1758
1759	switch (crd->crd_alg) {
1760	case CRYPTO_MD5_HMAC:
1761		*mode |= SEC_MDEU_MODE_HMAC;
1762		/* FALLTHROUGH */
1763	case CRYPTO_MD5:
1764		*eu = SEC_EU_MDEU_A;
1765		*mode |= SEC_MDEU_MODE_MD5;
1766		*hashlen = MD5_HASH_LEN;
1767		break;
1768	case CRYPTO_SHA1_HMAC:
1769		*mode |= SEC_MDEU_MODE_HMAC;
1770		/* FALLTHROUGH */
1771	case CRYPTO_SHA1:
1772		*eu = SEC_EU_MDEU_A;
1773		*mode |= SEC_MDEU_MODE_SHA1;
1774		*hashlen = SHA1_HASH_LEN;
1775		break;
1776	case CRYPTO_SHA2_256_HMAC:
1777		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1778		*eu = SEC_EU_MDEU_A;
1779		break;
1780	case CRYPTO_SHA2_384_HMAC:
1781		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1782		*eu = SEC_EU_MDEU_B;
1783		break;
1784	case CRYPTO_SHA2_512_HMAC:
1785		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1786		*eu = SEC_EU_MDEU_B;
1787		break;
1788	default:
1789		return (EINVAL);
1790	}
1791
1792	if (*mode & SEC_MDEU_MODE_HMAC)
1793		*hashlen = SEC_HMAC_HASH_LEN;
1794
1795	return (0);
1796}
1797
1798static int
1799sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1800    struct cryptoini *enc, struct cryptoini *mac)
1801{
1802
1803	if (mac && sec_mdeu_can_handle(mac->cri_alg))
1804		return (0);
1805
1806	return (-1);
1807}
1808
1809static int
1810sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1811    struct sec_desc *desc, struct cryptop *crp, int buftype)
1812{
1813	struct cryptodesc *enc, *mac;
1814	struct sec_hw_desc *hd = desc->sd_desc;
1815	u_int eu, mode, hashlen;
1816	int error;
1817
1818	error = sec_split_crp(crp, &enc, &mac);
1819	if (error)
1820		return (error);
1821
1822	if (enc)
1823		return (EINVAL);
1824
1825	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1826	if (error)
1827		return (error);
1828
1829	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1830	hd->shd_eu_sel0 = eu;
1831	hd->shd_mode0 = mode;
1832	hd->shd_eu_sel1 = SEC_EU_NONE;
1833	hd->shd_mode1 = 0;
1834
1835	/* Pointer 0: NULL */
1836	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1837	if (error)
1838		return (error);
1839
1840	/* Pointer 1: Context In (Not used: NULL) */
1841	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1842	if (error)
1843		return (error);
1844
1845	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1846	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1847		error = sec_make_pointer_direct(sc, desc, 2,
1848		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1849		    shd_mkey), ses->ss_mklen);
1850	else
1851		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1852
1853	if (error)
1854		return (error);
1855
1856	/* Pointer 3: Input Data */
1857	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1858	    mac->crd_len, buftype);
1859	if (error)
1860		return (error);
1861
1862	/* Pointer 4: NULL */
1863	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1864	if (error)
1865		return (error);
1866
1867	/* Pointer 5: Hash out */
1868	error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1869	    mac->crd_inject, hashlen, buftype);
1870	if (error)
1871		return (error);
1872
1873	/* Pointer 6: NULL */
1874	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1875
1876	return (0);
1877}
1878