1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
30 * 3.0 are supported.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/bus.h>
39#include <sys/endian.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/module.h>
45#include <sys/mutex.h>
46#include <sys/random.h>
47#include <sys/rman.h>
48
49#include <machine/_inttypes.h>
50#include <machine/bus.h>
51#include <machine/resource.h>
52
53#include <opencrypto/cryptodev.h>
54#include "cryptodev_if.h"
55
56#include <dev/ofw/ofw_bus_subr.h>
57#include <dev/sec/sec.h>
58
59static int	sec_probe(device_t dev);
60static int	sec_attach(device_t dev);
61static int	sec_detach(device_t dev);
62static int	sec_suspend(device_t dev);
63static int	sec_resume(device_t dev);
64static int	sec_shutdown(device_t dev);
65static void	sec_primary_intr(void *arg);
66static void	sec_secondary_intr(void *arg);
67static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
68    void **ihand, int *irid, driver_intr_t handler, const char *iname);
69static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
70    void *ihand, int irid, const char *iname);
71static int	sec_controller_reset(struct sec_softc *sc);
72static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
73static int	sec_init(struct sec_softc *sc);
74static int	sec_alloc_dma_mem(struct sec_softc *sc,
75    struct sec_dma_mem *dma_mem, bus_size_t size);
76static int	sec_desc_map_dma(struct sec_softc *sc,
77    struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
78    struct sec_desc_map_info *sdmi);
79static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
80static void	sec_enqueue(struct sec_softc *sc);
81static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
82    int channel);
83static int	sec_eu_channel(struct sec_softc *sc, int eu);
84static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
85    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
86static int	sec_make_pointer_direct(struct sec_softc *sc,
87    struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
88static int	sec_newsession(device_t dev, crypto_session_t cses,
89    struct cryptoini *cri);
90static int	sec_process(device_t dev, struct cryptop *crp, int hint);
91static int	sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
92    struct cryptoini **mac);
93static int	sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
94    struct cryptodesc **mac);
95static int	sec_build_common_ns_desc(struct sec_softc *sc,
96    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
97    struct cryptodesc *enc, int buftype);
98static int	sec_build_common_s_desc(struct sec_softc *sc,
99    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
100    struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
101
102static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103
104/* AESU */
105static int	sec_aesu_newsession(struct sec_softc *sc,
106    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107static int	sec_aesu_make_desc(struct sec_softc *sc,
108    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109    int buftype);
110
111/* DEU */
112static int	sec_deu_newsession(struct sec_softc *sc,
113    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114static int	sec_deu_make_desc(struct sec_softc *sc,
115    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116    int buftype);
117
118/* MDEU */
119static int	sec_mdeu_can_handle(u_int alg);
120static int	sec_mdeu_config(struct cryptodesc *crd,
121    u_int *eu, u_int *mode, u_int *hashlen);
122static int	sec_mdeu_newsession(struct sec_softc *sc,
123    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124static int	sec_mdeu_make_desc(struct sec_softc *sc,
125    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126    int buftype);
127
128static device_method_t sec_methods[] = {
129	/* Device interface */
130	DEVMETHOD(device_probe,		sec_probe),
131	DEVMETHOD(device_attach,	sec_attach),
132	DEVMETHOD(device_detach,	sec_detach),
133
134	DEVMETHOD(device_suspend,	sec_suspend),
135	DEVMETHOD(device_resume,	sec_resume),
136	DEVMETHOD(device_shutdown,	sec_shutdown),
137
138	/* Crypto methods */
139	DEVMETHOD(cryptodev_newsession,	sec_newsession),
140	DEVMETHOD(cryptodev_process,	sec_process),
141
142	DEVMETHOD_END
143};
144static driver_t sec_driver = {
145	"sec",
146	sec_methods,
147	sizeof(struct sec_softc),
148};
149
150static devclass_t sec_devclass;
151DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
152MODULE_DEPEND(sec, crypto, 1, 1, 1);
153
154static struct sec_eu_methods sec_eus[] = {
155	{
156		sec_aesu_newsession,
157		sec_aesu_make_desc,
158	},
159	{
160		sec_deu_newsession,
161		sec_deu_make_desc,
162	},
163	{
164		sec_mdeu_newsession,
165		sec_mdeu_make_desc,
166	},
167	{ NULL, NULL }
168};
169
170static inline void
171sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
172{
173
174	/* Sync only if dma memory is valid */
175	if (dma_mem->dma_vaddr != NULL)
176		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
177}
178
179static inline void *
180sec_get_pointer_data(struct sec_desc *desc, u_int n)
181{
182
183	return (desc->sd_ptr_dmem[n].dma_vaddr);
184}
185
186static int
187sec_probe(device_t dev)
188{
189	struct sec_softc *sc;
190	uint64_t id;
191
192	if (!ofw_bus_status_okay(dev))
193		return (ENXIO);
194
195	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
196		return (ENXIO);
197
198	sc = device_get_softc(dev);
199
200	sc->sc_rrid = 0;
201	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
202	    RF_ACTIVE);
203
204	if (sc->sc_rres == NULL)
205		return (ENXIO);
206
207	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
208	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
209
210	id = SEC_READ(sc, SEC_ID);
211
212	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
213
214	switch (id) {
215	case SEC_20_ID:
216		device_set_desc(dev, "Freescale Security Engine 2.0");
217		sc->sc_version = 2;
218		break;
219	case SEC_30_ID:
220		device_set_desc(dev, "Freescale Security Engine 3.0");
221		sc->sc_version = 3;
222		break;
223	case SEC_31_ID:
224		device_set_desc(dev, "Freescale Security Engine 3.1");
225		sc->sc_version = 3;
226		break;
227	default:
228		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
229		return (ENXIO);
230	}
231
232	return (0);
233}
234
235static int
236sec_attach(device_t dev)
237{
238	struct sec_softc *sc;
239	struct sec_hw_lt *lt;
240	int error = 0;
241	int i;
242
243	sc = device_get_softc(dev);
244	sc->sc_dev = dev;
245	sc->sc_blocked = 0;
246	sc->sc_shutdown = 0;
247
248	sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session),
249	    CRYPTOCAP_F_HARDWARE);
250	if (sc->sc_cid < 0) {
251		device_printf(dev, "could not get crypto driver ID!\n");
252		return (ENXIO);
253	}
254
255	/* Init locks */
256	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
257	    "SEC Controller lock", MTX_DEF);
258	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
259	    "SEC Descriptors lock", MTX_DEF);
260
261	/* Allocate I/O memory for SEC registers */
262	sc->sc_rrid = 0;
263	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
264	    RF_ACTIVE);
265
266	if (sc->sc_rres == NULL) {
267		device_printf(dev, "could not allocate I/O memory!\n");
268		goto fail1;
269	}
270
271	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
272	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
273
274	/* Setup interrupts */
275	sc->sc_pri_irid = 0;
276	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
277	    &sc->sc_pri_irid, sec_primary_intr, "primary");
278
279	if (error)
280		goto fail2;
281
282
283	if (sc->sc_version == 3) {
284		sc->sc_sec_irid = 1;
285		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
286		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
287
288		if (error)
289			goto fail3;
290	}
291
292	/* Alloc DMA memory for descriptors and link tables */
293	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
294	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
295
296	if (error)
297		goto fail4;
298
299	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
300	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
301
302	if (error)
303		goto fail5;
304
305	/* Fill in descriptors and link tables */
306	for (i = 0; i < SEC_DESCRIPTORS; i++) {
307		sc->sc_desc[i].sd_desc =
308		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
309		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
310		    (i * sizeof(struct sec_hw_desc));
311	}
312
313	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
314		sc->sc_lt[i].sl_lt =
315		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
316		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
317		    (i * sizeof(struct sec_hw_lt));
318	}
319
320	/* Last entry in link table is used to create a circle */
321	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
322	lt->shl_length = 0;
323	lt->shl_r = 0;
324	lt->shl_n = 1;
325	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
326
327	/* Init descriptor and link table queues pointers */
328	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
329	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
330	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
331	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
332	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
333	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
334	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
335	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
336
337	/* Create masks for fast checks */
338	sc->sc_int_error_mask = 0;
339	for (i = 0; i < SEC_CHANNELS; i++)
340		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
341
342	switch (sc->sc_version) {
343	case 2:
344		sc->sc_channel_idle_mask =
345		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
346		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
347		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
348		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
349		break;
350	case 3:
351		sc->sc_channel_idle_mask =
352		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
353		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
354		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
355		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
356		break;
357	}
358
359	/* Init hardware */
360	error = sec_init(sc);
361
362	if (error)
363		goto fail6;
364
365	/* Register in OCF (AESU) */
366	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
367
368	/* Register in OCF (DEU) */
369	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
370	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
371
372	/* Register in OCF (MDEU) */
373	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
374	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
375	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
376	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
377	crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
378	if (sc->sc_version >= 3) {
379		crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
380		crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
381	}
382
383	return (0);
384
385fail6:
386	sec_free_dma_mem(&(sc->sc_lt_dmem));
387fail5:
388	sec_free_dma_mem(&(sc->sc_desc_dmem));
389fail4:
390	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
391	    sc->sc_sec_irid, "secondary");
392fail3:
393	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
394	    sc->sc_pri_irid, "primary");
395fail2:
396	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
397fail1:
398	mtx_destroy(&sc->sc_controller_lock);
399	mtx_destroy(&sc->sc_descriptors_lock);
400
401	return (ENXIO);
402}
403
404static int
405sec_detach(device_t dev)
406{
407	struct sec_softc *sc = device_get_softc(dev);
408	int i, error, timeout = SEC_TIMEOUT;
409
410	/* Prepare driver to shutdown */
411	SEC_LOCK(sc, descriptors);
412	sc->sc_shutdown = 1;
413	SEC_UNLOCK(sc, descriptors);
414
415	/* Wait until all queued processing finishes */
416	while (1) {
417		SEC_LOCK(sc, descriptors);
418		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
419		SEC_UNLOCK(sc, descriptors);
420
421		if (i == 0)
422			break;
423
424		if (timeout < 0) {
425			device_printf(dev, "queue flush timeout!\n");
426
427			/* DMA can be still active - stop it */
428			for (i = 0; i < SEC_CHANNELS; i++)
429				sec_channel_reset(sc, i, 1);
430
431			break;
432		}
433
434		timeout -= 1000;
435		DELAY(1000);
436	}
437
438	/* Disable interrupts */
439	SEC_WRITE(sc, SEC_IER, 0);
440
441	/* Unregister from OCF */
442	crypto_unregister_all(sc->sc_cid);
443
444	/* Free DMA memory */
445	for (i = 0; i < SEC_DESCRIPTORS; i++)
446		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
447
448	sec_free_dma_mem(&(sc->sc_lt_dmem));
449	sec_free_dma_mem(&(sc->sc_desc_dmem));
450
451	/* Release interrupts */
452	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
453	    sc->sc_pri_irid, "primary");
454	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
455	    sc->sc_sec_irid, "secondary");
456
457	/* Release memory */
458	if (sc->sc_rres) {
459		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
460		    sc->sc_rres);
461		if (error)
462			device_printf(dev, "bus_release_resource() failed for"
463			    " I/O memory, error %d\n", error);
464
465		sc->sc_rres = NULL;
466	}
467
468	mtx_destroy(&sc->sc_controller_lock);
469	mtx_destroy(&sc->sc_descriptors_lock);
470
471	return (0);
472}
473
474static int
475sec_suspend(device_t dev)
476{
477
478	return (0);
479}
480
481static int
482sec_resume(device_t dev)
483{
484
485	return (0);
486}
487
488static int
489sec_shutdown(device_t dev)
490{
491
492	return (0);
493}
494
495static int
496sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
497    int *irid, driver_intr_t handler, const char *iname)
498{
499	int error;
500
501	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
502	    RF_ACTIVE);
503
504	if ((*ires) == NULL) {
505		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
506		return (ENXIO);
507	}
508
509	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
510	    NULL, handler, sc, ihand);
511
512	if (error) {
513		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
514		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
515			device_printf(sc->sc_dev, "could not release %s IRQ\n",
516			    iname);
517
518		(*ires) = NULL;
519		return (error);
520	}
521
522	return (0);
523}
524
525static void
526sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
527    int irid, const char *iname)
528{
529	int error;
530
531	if (ires == NULL)
532		return;
533
534	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
535	if (error)
536		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
537		    " IRQ, error %d\n", iname, error);
538
539	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
540	if (error)
541		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
542		    " IRQ, error %d\n", iname, error);
543}
544
545static void
546sec_primary_intr(void *arg)
547{
548	struct sec_softc *sc = arg;
549	struct sec_desc *desc;
550	uint64_t isr;
551	int i, wakeup = 0;
552
553	SEC_LOCK(sc, controller);
554
555	/* Check for errors */
556	isr = SEC_READ(sc, SEC_ISR);
557	if (isr & sc->sc_int_error_mask) {
558		/* Check each channel for error */
559		for (i = 0; i < SEC_CHANNELS; i++) {
560			if ((isr & SEC_INT_CH_ERR(i)) == 0)
561				continue;
562
563			device_printf(sc->sc_dev,
564			    "I/O error on channel %i!\n", i);
565
566			/* Find and mark problematic descriptor */
567			desc = sec_find_desc(sc, SEC_READ(sc,
568			    SEC_CHAN_CDPR(i)));
569
570			if (desc != NULL)
571				desc->sd_error = EIO;
572
573			/* Do partial channel reset */
574			sec_channel_reset(sc, i, 0);
575		}
576	}
577
578	/* ACK interrupt */
579	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
580
581	SEC_UNLOCK(sc, controller);
582	SEC_LOCK(sc, descriptors);
583
584	/* Handle processed descriptors */
585	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
586
587	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
588		desc = SEC_GET_QUEUED_DESC(sc);
589
590		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
591			SEC_PUT_BACK_QUEUED_DESC(sc);
592			break;
593		}
594
595		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
596		    BUS_DMASYNC_PREWRITE);
597
598		desc->sd_crp->crp_etype = desc->sd_error;
599		crypto_done(desc->sd_crp);
600
601		SEC_DESC_FREE_POINTERS(desc);
602		SEC_DESC_FREE_LT(sc, desc);
603		SEC_DESC_QUEUED2FREE(sc);
604	}
605
606	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
607
608	if (!sc->sc_shutdown) {
609		wakeup = sc->sc_blocked;
610		sc->sc_blocked = 0;
611	}
612
613	SEC_UNLOCK(sc, descriptors);
614
615	/* Enqueue ready descriptors in hardware */
616	sec_enqueue(sc);
617
618	if (wakeup)
619		crypto_unblock(sc->sc_cid, wakeup);
620}
621
622static void
623sec_secondary_intr(void *arg)
624{
625	struct sec_softc *sc = arg;
626
627	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
628	sec_primary_intr(arg);
629}
630
631static int
632sec_controller_reset(struct sec_softc *sc)
633{
634	int timeout = SEC_TIMEOUT;
635
636	/* Reset Controller */
637	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
638
639	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
640		DELAY(1000);
641		timeout -= 1000;
642
643		if (timeout < 0) {
644			device_printf(sc->sc_dev, "timeout while waiting for "
645			    "device reset!\n");
646			return (ETIMEDOUT);
647		}
648	}
649
650	return (0);
651}
652
653static int
654sec_channel_reset(struct sec_softc *sc, int channel, int full)
655{
656	int timeout = SEC_TIMEOUT;
657	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
658	uint64_t reg;
659
660	/* Reset Channel */
661	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
662	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
663
664	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
665		DELAY(1000);
666		timeout -= 1000;
667
668		if (timeout < 0) {
669			device_printf(sc->sc_dev, "timeout while waiting for "
670			    "channel reset!\n");
671			return (ETIMEDOUT);
672		}
673	}
674
675	if (full) {
676		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
677
678		switch(sc->sc_version) {
679		case 2:
680			reg |= SEC_CHAN_CCR_CDWE;
681			break;
682		case 3:
683			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
684			break;
685		}
686
687		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
688	}
689
690	return (0);
691}
692
693static int
694sec_init(struct sec_softc *sc)
695{
696	uint64_t reg;
697	int error, i;
698
699	/* Reset controller twice to clear all pending interrupts */
700	error = sec_controller_reset(sc);
701	if (error)
702		return (error);
703
704	error = sec_controller_reset(sc);
705	if (error)
706		return (error);
707
708	/* Reset channels */
709	for (i = 0; i < SEC_CHANNELS; i++) {
710		error = sec_channel_reset(sc, i, 1);
711		if (error)
712			return (error);
713	}
714
715	/* Enable Interrupts */
716	reg = SEC_INT_ITO;
717	for (i = 0; i < SEC_CHANNELS; i++)
718		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
719
720	SEC_WRITE(sc, SEC_IER, reg);
721
722	return (error);
723}
724
725static void
726sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
727{
728	struct sec_dma_mem *dma_mem = arg;
729
730	if (error)
731		return;
732
733	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
734	dma_mem->dma_paddr = segs->ds_addr;
735}
736
737static void
738sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
739    int error)
740{
741	struct sec_desc_map_info *sdmi = arg;
742	struct sec_softc *sc = sdmi->sdmi_sc;
743	struct sec_lt *lt = NULL;
744	bus_addr_t addr;
745	bus_size_t size;
746	int i;
747
748	SEC_LOCK_ASSERT(sc, descriptors);
749
750	if (error)
751		return;
752
753	for (i = 0; i < nseg; i++) {
754		addr = segs[i].ds_addr;
755		size = segs[i].ds_len;
756
757		/* Skip requested offset */
758		if (sdmi->sdmi_offset >= size) {
759			sdmi->sdmi_offset -= size;
760			continue;
761		}
762
763		addr += sdmi->sdmi_offset;
764		size -= sdmi->sdmi_offset;
765		sdmi->sdmi_offset = 0;
766
767		/* Do not link more than requested */
768		if (sdmi->sdmi_size < size)
769			size = sdmi->sdmi_size;
770
771		lt = SEC_ALLOC_LT_ENTRY(sc);
772		lt->sl_lt->shl_length = size;
773		lt->sl_lt->shl_r = 0;
774		lt->sl_lt->shl_n = 0;
775		lt->sl_lt->shl_ptr = addr;
776
777		if (sdmi->sdmi_lt_first == NULL)
778			sdmi->sdmi_lt_first = lt;
779
780		sdmi->sdmi_lt_used += 1;
781
782		if ((sdmi->sdmi_size -= size) == 0)
783			break;
784	}
785
786	sdmi->sdmi_lt_last = lt;
787}
788
789static void
790sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
791    bus_size_t size, int error)
792{
793
794	sec_dma_map_desc_cb(arg, segs, nseg, error);
795}
796
797static int
798sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
799    bus_size_t size)
800{
801	int error;
802
803	if (dma_mem->dma_vaddr != NULL)
804		return (EBUSY);
805
806	error = bus_dma_tag_create(NULL,	/* parent */
807		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
808		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
809		BUS_SPACE_MAXADDR,		/* highaddr */
810		NULL, NULL,			/* filtfunc, filtfuncarg */
811		size, 1,			/* maxsize, nsegments */
812		size, 0,			/* maxsegsz, flags */
813		NULL, NULL,			/* lockfunc, lockfuncarg */
814		&(dma_mem->dma_tag));		/* dmat */
815
816	if (error) {
817		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
818		    " %i!\n", error);
819		goto err1;
820	}
821
822	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
823	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
824
825	if (error) {
826		device_printf(sc->sc_dev, "failed to allocate DMA safe"
827		    " memory, error %i!\n", error);
828		goto err2;
829	}
830
831	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
832		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
833		    BUS_DMA_NOWAIT);
834
835	if (error) {
836		device_printf(sc->sc_dev, "cannot get address of the DMA"
837		    " memory, error %i\n", error);
838		goto err3;
839	}
840
841	dma_mem->dma_is_map = 0;
842	return (0);
843
844err3:
845	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
846err2:
847	bus_dma_tag_destroy(dma_mem->dma_tag);
848err1:
849	dma_mem->dma_vaddr = NULL;
850	return(error);
851}
852
853static int
854sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
855    bus_size_t size, int type, struct sec_desc_map_info *sdmi)
856{
857	int error;
858
859	if (dma_mem->dma_vaddr != NULL)
860		return (EBUSY);
861
862	switch (type) {
863	case SEC_MEMORY:
864		break;
865	case SEC_UIO:
866		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
867		break;
868	case SEC_MBUF:
869		size = m_length((struct mbuf*)mem, NULL);
870		break;
871	default:
872		return (EINVAL);
873	}
874
875	error = bus_dma_tag_create(NULL,	/* parent */
876		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
877		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
878		BUS_SPACE_MAXADDR,		/* highaddr */
879		NULL, NULL,			/* filtfunc, filtfuncarg */
880		size,				/* maxsize */
881		SEC_FREE_LT_CNT(sc),		/* nsegments */
882		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
883		NULL, NULL,			/* lockfunc, lockfuncarg */
884		&(dma_mem->dma_tag));		/* dmat */
885
886	if (error) {
887		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
888		    " %i!\n", error);
889		dma_mem->dma_vaddr = NULL;
890		return (error);
891	}
892
893	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
894
895	if (error) {
896		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
897		    "\n", error);
898		bus_dma_tag_destroy(dma_mem->dma_tag);
899		return (error);
900	}
901
902	switch (type) {
903	case SEC_MEMORY:
904		error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
905		    mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
906		break;
907	case SEC_UIO:
908		error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
909		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
910		break;
911	case SEC_MBUF:
912		error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
913		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
914		break;
915	}
916
917	if (error) {
918		device_printf(sc->sc_dev, "cannot get address of the DMA"
919		    " memory, error %i!\n", error);
920		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
921		bus_dma_tag_destroy(dma_mem->dma_tag);
922		return (error);
923	}
924
925	dma_mem->dma_is_map = 1;
926	dma_mem->dma_vaddr = mem;
927
928	return (0);
929}
930
931static void
932sec_free_dma_mem(struct sec_dma_mem *dma_mem)
933{
934
935	/* Check for double free */
936	if (dma_mem->dma_vaddr == NULL)
937		return;
938
939	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
940
941	if (dma_mem->dma_is_map)
942		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
943	else
944		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
945		    dma_mem->dma_map);
946
947	bus_dma_tag_destroy(dma_mem->dma_tag);
948	dma_mem->dma_vaddr = NULL;
949}
950
951static int
952sec_eu_channel(struct sec_softc *sc, int eu)
953{
954	uint64_t reg;
955	int channel = 0;
956
957	SEC_LOCK_ASSERT(sc, controller);
958
959	reg = SEC_READ(sc, SEC_EUASR);
960
961	switch (eu) {
962	case SEC_EU_AFEU:
963		channel = SEC_EUASR_AFEU(reg);
964		break;
965	case SEC_EU_DEU:
966		channel = SEC_EUASR_DEU(reg);
967		break;
968	case SEC_EU_MDEU_A:
969	case SEC_EU_MDEU_B:
970		channel = SEC_EUASR_MDEU(reg);
971		break;
972	case SEC_EU_RNGU:
973		channel = SEC_EUASR_RNGU(reg);
974		break;
975	case SEC_EU_PKEU:
976		channel = SEC_EUASR_PKEU(reg);
977		break;
978	case SEC_EU_AESU:
979		channel = SEC_EUASR_AESU(reg);
980		break;
981	case SEC_EU_KEU:
982		channel = SEC_EUASR_KEU(reg);
983		break;
984	case SEC_EU_CRCU:
985		channel = SEC_EUASR_CRCU(reg);
986		break;
987	}
988
989	return (channel - 1);
990}
991
992static int
993sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
994{
995	u_int fflvl = SEC_MAX_FIFO_LEVEL;
996	uint64_t reg;
997	int i;
998
999	SEC_LOCK_ASSERT(sc, controller);
1000
1001	/* Find free channel if have not got one */
1002	if (channel < 0) {
1003		for (i = 0; i < SEC_CHANNELS; i++) {
1004			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1005
1006			if ((reg & sc->sc_channel_idle_mask) == 0) {
1007				channel = i;
1008				break;
1009			}
1010		}
1011	}
1012
1013	/* There is no free channel */
1014	if (channel < 0)
1015		return (-1);
1016
1017	/* Check FIFO level on selected channel */
1018	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1019
1020	switch(sc->sc_version) {
1021	case 2:
1022		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1023		break;
1024	case 3:
1025		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1026		break;
1027	}
1028
1029	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1030		return (-1);
1031
1032	/* Enqueue descriptor in channel */
1033	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1034
1035	return (channel);
1036}
1037
1038static void
1039sec_enqueue(struct sec_softc *sc)
1040{
1041	struct sec_desc *desc;
1042	int ch0, ch1;
1043
1044	SEC_LOCK(sc, descriptors);
1045	SEC_LOCK(sc, controller);
1046
1047	while (SEC_READY_DESC_CNT(sc) > 0) {
1048		desc = SEC_GET_READY_DESC(sc);
1049
1050		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1051		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1052
1053		/*
1054		 * Both EU are used by the same channel.
1055		 * Enqueue descriptor in channel used by busy EUs.
1056		 */
1057		if (ch0 >= 0 && ch0 == ch1) {
1058			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1059				SEC_DESC_READY2QUEUED(sc);
1060				continue;
1061			}
1062		}
1063
1064		/*
1065		 * Only one EU is free.
1066		 * Enqueue descriptor in channel used by busy EU.
1067		 */
1068		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1069			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1070			    >= 0) {
1071				SEC_DESC_READY2QUEUED(sc);
1072				continue;
1073			}
1074		}
1075
1076		/*
1077		 * Both EU are free.
1078		 * Enqueue descriptor in first free channel.
1079		 */
1080		if (ch0 < 0 && ch1 < 0) {
1081			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1082				SEC_DESC_READY2QUEUED(sc);
1083				continue;
1084			}
1085		}
1086
1087		/* Current descriptor can not be queued at the moment */
1088		SEC_PUT_BACK_READY_DESC(sc);
1089		break;
1090	}
1091
1092	SEC_UNLOCK(sc, controller);
1093	SEC_UNLOCK(sc, descriptors);
1094}
1095
1096static struct sec_desc *
1097sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1098{
1099	struct sec_desc *desc = NULL;
1100	int i;
1101
1102	SEC_LOCK_ASSERT(sc, descriptors);
1103
1104	for (i = 0; i < SEC_CHANNELS; i++) {
1105		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1106			desc = &(sc->sc_desc[i]);
1107			break;
1108		}
1109	}
1110
1111	return (desc);
1112}
1113
1114static int
1115sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1116    bus_addr_t data, bus_size_t dsize)
1117{
1118	struct sec_hw_desc_ptr *ptr;
1119
1120	SEC_LOCK_ASSERT(sc, descriptors);
1121
1122	ptr = &(desc->sd_desc->shd_pointer[n]);
1123	ptr->shdp_length = dsize;
1124	ptr->shdp_extent = 0;
1125	ptr->shdp_j = 0;
1126	ptr->shdp_ptr = data;
1127
1128	return (0);
1129}
1130
1131static int
1132sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1133    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1134{
1135	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1136	struct sec_hw_desc_ptr *ptr;
1137	int error;
1138
1139	SEC_LOCK_ASSERT(sc, descriptors);
1140
1141	/* For flat memory map only requested region */
1142	if (dtype == SEC_MEMORY) {
1143		 data = (uint8_t*)(data) + doffset;
1144		 sdmi.sdmi_offset = 0;
1145	}
1146
1147	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1148	    dtype, &sdmi);
1149
1150	if (error)
1151		return (error);
1152
1153	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1154	desc->sd_lt_used += sdmi.sdmi_lt_used;
1155
1156	ptr = &(desc->sd_desc->shd_pointer[n]);
1157	ptr->shdp_length = dsize;
1158	ptr->shdp_extent = 0;
1159	ptr->shdp_j = 1;
1160	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1161
1162	return (0);
1163}
1164
1165static int
1166sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1167    struct cryptoini **mac)
1168{
1169	struct cryptoini *e, *m;
1170
1171	e = cri;
1172	m = cri->cri_next;
1173
1174	/* We can haldle only two operations */
1175	if (m && m->cri_next)
1176		return (EINVAL);
1177
1178	if (sec_mdeu_can_handle(e->cri_alg)) {
1179		cri = m;
1180		m = e;
1181		e = cri;
1182	}
1183
1184	if (m && !sec_mdeu_can_handle(m->cri_alg))
1185		return (EINVAL);
1186
1187	*enc = e;
1188	*mac = m;
1189
1190	return (0);
1191}
1192
1193static int
1194sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1195    struct cryptodesc **mac)
1196{
1197	struct cryptodesc *e, *m, *t;
1198
1199	e = crp->crp_desc;
1200	m = e->crd_next;
1201
1202	/* We can haldle only two operations */
1203	if (m && m->crd_next)
1204		return (EINVAL);
1205
1206	if (sec_mdeu_can_handle(e->crd_alg)) {
1207		t = m;
1208		m = e;
1209		e = t;
1210	}
1211
1212	if (m && !sec_mdeu_can_handle(m->crd_alg))
1213		return (EINVAL);
1214
1215	*enc = e;
1216	*mac = m;
1217
1218	return (0);
1219}
1220
1221static int
1222sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
1223{
1224	struct sec_softc *sc = device_get_softc(dev);
1225	struct sec_eu_methods *eu = sec_eus;
1226	struct cryptoini *enc = NULL;
1227	struct cryptoini *mac = NULL;
1228	struct sec_session *ses;
1229	int error = -1;
1230
1231	error = sec_split_cri(cri, &enc, &mac);
1232	if (error)
1233		return (error);
1234
1235	/* Check key lengths */
1236	if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1237		return (E2BIG);
1238
1239	if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1240		return (E2BIG);
1241
1242	/* Only SEC 3.0 supports digests larger than 256 bits */
1243	if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1244		return (E2BIG);
1245
1246	ses = crypto_get_driver_session(cses);
1247
1248	/* Find EU for this session */
1249	while (eu->sem_make_desc != NULL) {
1250		error = eu->sem_newsession(sc, ses, enc, mac);
1251		if (error >= 0)
1252			break;
1253
1254		eu++;
1255	}
1256
1257	/* If not found, return EINVAL */
1258	if (error < 0)
1259		return (EINVAL);
1260
1261	/* Save cipher key */
1262	if (enc && enc->cri_key) {
1263		ses->ss_klen = enc->cri_klen / 8;
1264		memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1265	}
1266
1267	/* Save digest key */
1268	if (mac && mac->cri_key) {
1269		ses->ss_mklen = mac->cri_klen / 8;
1270		memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1271	}
1272
1273	ses->ss_eu = eu;
1274	return (0);
1275}
1276
1277static int
1278sec_process(device_t dev, struct cryptop *crp, int hint)
1279{
1280	struct sec_softc *sc = device_get_softc(dev);
1281	struct sec_desc *desc = NULL;
1282	struct cryptodesc *mac, *enc;
1283	struct sec_session *ses;
1284	int buftype, error = 0;
1285
1286	ses = crypto_get_driver_session(crp->crp_session);
1287
1288	/* Check for input length */
1289	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1290		crp->crp_etype = E2BIG;
1291		crypto_done(crp);
1292		return (0);
1293	}
1294
1295	/* Get descriptors */
1296	if (sec_split_crp(crp, &enc, &mac)) {
1297		crp->crp_etype = EINVAL;
1298		crypto_done(crp);
1299		return (0);
1300	}
1301
1302	SEC_LOCK(sc, descriptors);
1303	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1304
1305	/* Block driver if there is no free descriptors or we are going down */
1306	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1307		sc->sc_blocked |= CRYPTO_SYMQ;
1308		SEC_UNLOCK(sc, descriptors);
1309		return (ERESTART);
1310	}
1311
1312	/* Prepare descriptor */
1313	desc = SEC_GET_FREE_DESC(sc);
1314	desc->sd_lt_used = 0;
1315	desc->sd_error = 0;
1316	desc->sd_crp = crp;
1317
1318	if (crp->crp_flags & CRYPTO_F_IOV)
1319		buftype = SEC_UIO;
1320	else if (crp->crp_flags & CRYPTO_F_IMBUF)
1321		buftype = SEC_MBUF;
1322	else
1323		buftype = SEC_MEMORY;
1324
1325	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1326		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1327			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1328			    ses->ss_ivlen);
1329		else
1330			arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1331
1332		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1333			crypto_copyback(crp->crp_flags, crp->crp_buf,
1334			    enc->crd_inject, ses->ss_ivlen,
1335			    desc->sd_desc->shd_iv);
1336	} else if (enc) {
1337		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1338			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1339			    ses->ss_ivlen);
1340		else
1341			crypto_copydata(crp->crp_flags, crp->crp_buf,
1342			    enc->crd_inject, ses->ss_ivlen,
1343			    desc->sd_desc->shd_iv);
1344	}
1345
1346	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1347		if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1348			ses->ss_klen = enc->crd_klen / 8;
1349			memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1350		} else
1351			error = E2BIG;
1352	}
1353
1354	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1355		if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1356			ses->ss_mklen = mac->crd_klen / 8;
1357			memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1358		} else
1359			error = E2BIG;
1360	}
1361
1362	if (!error) {
1363		memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1364		memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1365
1366		error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1367	}
1368
1369	if (error) {
1370		SEC_DESC_FREE_POINTERS(desc);
1371		SEC_DESC_PUT_BACK_LT(sc, desc);
1372		SEC_PUT_BACK_FREE_DESC(sc);
1373		SEC_UNLOCK(sc, descriptors);
1374		crp->crp_etype = error;
1375		crypto_done(crp);
1376		return (0);
1377	}
1378
1379	/*
1380	 * Skip DONE interrupt if this is not last request in burst, but only
1381	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1382	 * signaling on each descriptor.
1383	 */
1384	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1385		desc->sd_desc->shd_dn = 0;
1386	else
1387		desc->sd_desc->shd_dn = 1;
1388
1389	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1390	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1391	    BUS_DMASYNC_POSTWRITE);
1392	SEC_DESC_FREE2READY(sc);
1393	SEC_UNLOCK(sc, descriptors);
1394
1395	/* Enqueue ready descriptors in hardware */
1396	sec_enqueue(sc);
1397
1398	return (0);
1399}
1400
1401static int
1402sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1403    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1404    int buftype)
1405{
1406	struct sec_hw_desc *hd = desc->sd_desc;
1407	int error;
1408
1409	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1410	hd->shd_eu_sel1 = SEC_EU_NONE;
1411	hd->shd_mode1 = 0;
1412
1413	/* Pointer 0: NULL */
1414	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1415	if (error)
1416		return (error);
1417
1418	/* Pointer 1: IV IN */
1419	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1420	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1421	if (error)
1422		return (error);
1423
1424	/* Pointer 2: Cipher Key */
1425	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1426	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1427 	if (error)
1428		return (error);
1429
1430	/* Pointer 3: Data IN */
1431	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1432	    enc->crd_len, buftype);
1433	if (error)
1434		return (error);
1435
1436	/* Pointer 4: Data OUT */
1437	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1438	    enc->crd_len, buftype);
1439	if (error)
1440		return (error);
1441
1442	/* Pointer 5: IV OUT (Not used: NULL) */
1443	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1444	if (error)
1445		return (error);
1446
1447	/* Pointer 6: NULL */
1448	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1449
1450	return (error);
1451}
1452
1453static int
1454sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1455    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1456    struct cryptodesc *mac, int buftype)
1457{
1458	struct sec_hw_desc *hd = desc->sd_desc;
1459	u_int eu, mode, hashlen;
1460	int error;
1461
1462	if (mac->crd_len < enc->crd_len)
1463		return (EINVAL);
1464
1465	if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1466		return (EINVAL);
1467
1468	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1469	if (error)
1470		return (error);
1471
1472	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1473	hd->shd_eu_sel1 = eu;
1474	hd->shd_mode1 = mode;
1475
1476	/* Pointer 0: HMAC Key */
1477	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1478	    offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1479	if (error)
1480		return (error);
1481
1482	/* Pointer 1: HMAC-Only Data IN */
1483	error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1484	    mac->crd_len - enc->crd_len, buftype);
1485	if (error)
1486		return (error);
1487
1488	/* Pointer 2: Cipher Key */
1489	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1490	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1491 	if (error)
1492		return (error);
1493
1494	/* Pointer 3: IV IN */
1495	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1496	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1497	if (error)
1498		return (error);
1499
1500	/* Pointer 4: Data IN */
1501	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1502	    enc->crd_len, buftype);
1503	if (error)
1504		return (error);
1505
1506	/* Pointer 5: Data OUT */
1507	error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1508	    enc->crd_len, buftype);
1509	if (error)
1510		return (error);
1511
1512	/* Pointer 6: HMAC OUT */
1513	error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1514	    hashlen, buftype);
1515
1516	return (error);
1517}
1518
1519/* AESU */
1520
1521static int
1522sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1523    struct cryptoini *enc, struct cryptoini *mac)
1524{
1525
1526	if (enc == NULL)
1527		return (-1);
1528
1529	if (enc->cri_alg != CRYPTO_AES_CBC)
1530		return (-1);
1531
1532	ses->ss_ivlen = AES_BLOCK_LEN;
1533
1534	return (0);
1535}
1536
1537static int
1538sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1539    struct sec_desc *desc, struct cryptop *crp, int buftype)
1540{
1541	struct sec_hw_desc *hd = desc->sd_desc;
1542	struct cryptodesc *enc, *mac;
1543	int error;
1544
1545	error = sec_split_crp(crp, &enc, &mac);
1546	if (error)
1547		return (error);
1548
1549	if (!enc)
1550		return (EINVAL);
1551
1552	hd->shd_eu_sel0 = SEC_EU_AESU;
1553	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1554
1555	if (enc->crd_alg != CRYPTO_AES_CBC)
1556		return (EINVAL);
1557
1558	if (enc->crd_flags & CRD_F_ENCRYPT) {
1559		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1560		hd->shd_dir = 0;
1561	} else
1562		hd->shd_dir = 1;
1563
1564	if (mac)
1565		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1566		    buftype);
1567	else
1568		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1569		    buftype);
1570
1571	return (error);
1572}
1573
1574/* DEU */
1575
1576static int
1577sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1578    struct cryptoini *enc, struct cryptoini *mac)
1579{
1580
1581	if (enc == NULL)
1582		return (-1);
1583
1584	switch (enc->cri_alg) {
1585	case CRYPTO_DES_CBC:
1586	case CRYPTO_3DES_CBC:
1587		break;
1588	default:
1589		return (-1);
1590	}
1591
1592	ses->ss_ivlen = DES_BLOCK_LEN;
1593
1594	return (0);
1595}
1596
1597static int
1598sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1599    struct sec_desc *desc, struct cryptop *crp, int buftype)
1600{
1601	struct sec_hw_desc *hd = desc->sd_desc;
1602	struct cryptodesc *enc, *mac;
1603	int error;
1604
1605	error = sec_split_crp(crp, &enc, &mac);
1606	if (error)
1607		return (error);
1608
1609	if (!enc)
1610		return (EINVAL);
1611
1612	hd->shd_eu_sel0 = SEC_EU_DEU;
1613	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1614
1615	switch (enc->crd_alg) {
1616	case CRYPTO_3DES_CBC:
1617		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1618		break;
1619	case CRYPTO_DES_CBC:
1620		break;
1621	default:
1622		return (EINVAL);
1623	}
1624
1625	if (enc->crd_flags & CRD_F_ENCRYPT) {
1626		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1627		hd->shd_dir = 0;
1628	} else
1629		hd->shd_dir = 1;
1630
1631	if (mac)
1632		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1633		    buftype);
1634	else
1635		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1636		    buftype);
1637
1638	return (error);
1639}
1640
1641/* MDEU */
1642
1643static int
1644sec_mdeu_can_handle(u_int alg)
1645{
1646	switch (alg) {
1647	case CRYPTO_MD5:
1648	case CRYPTO_SHA1:
1649	case CRYPTO_MD5_HMAC:
1650	case CRYPTO_SHA1_HMAC:
1651	case CRYPTO_SHA2_256_HMAC:
1652	case CRYPTO_SHA2_384_HMAC:
1653	case CRYPTO_SHA2_512_HMAC:
1654		return (1);
1655	default:
1656		return (0);
1657	}
1658}
1659
1660static int
1661sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1662{
1663
1664	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1665	*eu = SEC_EU_NONE;
1666
1667	switch (crd->crd_alg) {
1668	case CRYPTO_MD5_HMAC:
1669		*mode |= SEC_MDEU_MODE_HMAC;
1670		/* FALLTHROUGH */
1671	case CRYPTO_MD5:
1672		*eu = SEC_EU_MDEU_A;
1673		*mode |= SEC_MDEU_MODE_MD5;
1674		*hashlen = MD5_HASH_LEN;
1675		break;
1676	case CRYPTO_SHA1_HMAC:
1677		*mode |= SEC_MDEU_MODE_HMAC;
1678		/* FALLTHROUGH */
1679	case CRYPTO_SHA1:
1680		*eu = SEC_EU_MDEU_A;
1681		*mode |= SEC_MDEU_MODE_SHA1;
1682		*hashlen = SHA1_HASH_LEN;
1683		break;
1684	case CRYPTO_SHA2_256_HMAC:
1685		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1686		*eu = SEC_EU_MDEU_A;
1687		break;
1688	case CRYPTO_SHA2_384_HMAC:
1689		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1690		*eu = SEC_EU_MDEU_B;
1691		break;
1692	case CRYPTO_SHA2_512_HMAC:
1693		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1694		*eu = SEC_EU_MDEU_B;
1695		break;
1696	default:
1697		return (EINVAL);
1698	}
1699
1700	if (*mode & SEC_MDEU_MODE_HMAC)
1701		*hashlen = SEC_HMAC_HASH_LEN;
1702
1703	return (0);
1704}
1705
1706static int
1707sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1708    struct cryptoini *enc, struct cryptoini *mac)
1709{
1710
1711	if (mac && sec_mdeu_can_handle(mac->cri_alg))
1712		return (0);
1713
1714	return (-1);
1715}
1716
1717static int
1718sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1719    struct sec_desc *desc, struct cryptop *crp, int buftype)
1720{
1721	struct cryptodesc *enc, *mac;
1722	struct sec_hw_desc *hd = desc->sd_desc;
1723	u_int eu, mode, hashlen;
1724	int error;
1725
1726	error = sec_split_crp(crp, &enc, &mac);
1727	if (error)
1728		return (error);
1729
1730	if (enc)
1731		return (EINVAL);
1732
1733	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1734	if (error)
1735		return (error);
1736
1737	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1738	hd->shd_eu_sel0 = eu;
1739	hd->shd_mode0 = mode;
1740	hd->shd_eu_sel1 = SEC_EU_NONE;
1741	hd->shd_mode1 = 0;
1742
1743	/* Pointer 0: NULL */
1744	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1745	if (error)
1746		return (error);
1747
1748	/* Pointer 1: Context In (Not used: NULL) */
1749	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1750	if (error)
1751		return (error);
1752
1753	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1754	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1755		error = sec_make_pointer_direct(sc, desc, 2,
1756		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1757		    shd_mkey), ses->ss_mklen);
1758	else
1759		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1760
1761	if (error)
1762		return (error);
1763
1764	/* Pointer 3: Input Data */
1765	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1766	    mac->crd_len, buftype);
1767	if (error)
1768		return (error);
1769
1770	/* Pointer 4: NULL */
1771	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1772	if (error)
1773		return (error);
1774
1775	/* Pointer 5: Hash out */
1776	error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1777	    mac->crd_inject, hashlen, buftype);
1778	if (error)
1779		return (error);
1780
1781	/* Pointer 6: NULL */
1782	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1783
1784	return (0);
1785}
1786