1/*-
2 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26/*
27 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28 * 3.0 are supported.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/kernel.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/random.h>
45#include <sys/rman.h>
46
47#include <machine/_inttypes.h>
48#include <machine/bus.h>
49#include <machine/resource.h>
50
51#include <opencrypto/cryptodev.h>
52#include "cryptodev_if.h"
53
54#include <dev/ofw/ofw_bus_subr.h>
55#include <dev/sec/sec.h>
56
57static int	sec_probe(device_t dev);
58static int	sec_attach(device_t dev);
59static int	sec_detach(device_t dev);
60static int	sec_suspend(device_t dev);
61static int	sec_resume(device_t dev);
62static int	sec_shutdown(device_t dev);
63static void	sec_primary_intr(void *arg);
64static void	sec_secondary_intr(void *arg);
65static int	sec_setup_intr(struct sec_softc *sc, struct resource **ires,
66    void **ihand, int *irid, driver_intr_t handler, const char *iname);
67static void	sec_release_intr(struct sec_softc *sc, struct resource *ires,
68    void *ihand, int irid, const char *iname);
69static int	sec_controller_reset(struct sec_softc *sc);
70static int	sec_channel_reset(struct sec_softc *sc, int channel, int full);
71static int	sec_init(struct sec_softc *sc);
72static int	sec_alloc_dma_mem(struct sec_softc *sc,
73    struct sec_dma_mem *dma_mem, bus_size_t size);
74static int	sec_desc_map_dma(struct sec_softc *sc,
75    struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
76    struct sec_desc_map_info *sdmi);
77static void	sec_free_dma_mem(struct sec_dma_mem *dma_mem);
78static void	sec_enqueue(struct sec_softc *sc);
79static int	sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
80    int channel);
81static int	sec_eu_channel(struct sec_softc *sc, int eu);
82static int	sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
83    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
84static int	sec_make_pointer_direct(struct sec_softc *sc,
85    struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
86static int	sec_alloc_session(struct sec_softc *sc);
87static int	sec_newsession(device_t dev, u_int32_t *sidp,
88    struct cryptoini *cri);
89static int	sec_freesession(device_t dev, uint64_t tid);
90static int	sec_process(device_t dev, struct cryptop *crp, int hint);
91static int	sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
92    struct cryptoini **mac);
93static int	sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
94    struct cryptodesc **mac);
95static int	sec_build_common_ns_desc(struct sec_softc *sc,
96    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
97    struct cryptodesc *enc, int buftype);
98static int	sec_build_common_s_desc(struct sec_softc *sc,
99    struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
100    struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
101
102static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
103static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
104
105/* AESU */
106static int	sec_aesu_newsession(struct sec_softc *sc,
107    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
108static int	sec_aesu_make_desc(struct sec_softc *sc,
109    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
110    int buftype);
111
112/* DEU */
113static int	sec_deu_newsession(struct sec_softc *sc,
114    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
115static int	sec_deu_make_desc(struct sec_softc *sc,
116    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
117    int buftype);
118
119/* MDEU */
120static int	sec_mdeu_can_handle(u_int alg);
121static int	sec_mdeu_config(struct cryptodesc *crd,
122    u_int *eu, u_int *mode, u_int *hashlen);
123static int	sec_mdeu_newsession(struct sec_softc *sc,
124    struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
125static int	sec_mdeu_make_desc(struct sec_softc *sc,
126    struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
127    int buftype);
128
129static device_method_t sec_methods[] = {
130	/* Device interface */
131	DEVMETHOD(device_probe,		sec_probe),
132	DEVMETHOD(device_attach,	sec_attach),
133	DEVMETHOD(device_detach,	sec_detach),
134
135	DEVMETHOD(device_suspend,	sec_suspend),
136	DEVMETHOD(device_resume,	sec_resume),
137	DEVMETHOD(device_shutdown,	sec_shutdown),
138
139	/* Crypto methods */
140	DEVMETHOD(cryptodev_newsession,	sec_newsession),
141	DEVMETHOD(cryptodev_freesession,sec_freesession),
142	DEVMETHOD(cryptodev_process,	sec_process),
143
144	DEVMETHOD_END
145};
146static driver_t sec_driver = {
147	"sec",
148	sec_methods,
149	sizeof(struct sec_softc),
150};
151
152static devclass_t sec_devclass;
153DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
154MODULE_DEPEND(sec, crypto, 1, 1, 1);
155
156static struct sec_eu_methods sec_eus[] = {
157	{
158		sec_aesu_newsession,
159		sec_aesu_make_desc,
160	},
161	{
162		sec_deu_newsession,
163		sec_deu_make_desc,
164	},
165	{
166		sec_mdeu_newsession,
167		sec_mdeu_make_desc,
168	},
169	{ NULL, NULL }
170};
171
172static inline void
173sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
174{
175
176	/* Sync only if dma memory is valid */
177	if (dma_mem->dma_vaddr != NULL)
178		bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
179}
180
181static inline void
182sec_free_session(struct sec_softc *sc, struct sec_session *ses)
183{
184
185	SEC_LOCK(sc, sessions);
186	ses->ss_used = 0;
187	SEC_UNLOCK(sc, sessions);
188}
189
190static inline void *
191sec_get_pointer_data(struct sec_desc *desc, u_int n)
192{
193
194	return (desc->sd_ptr_dmem[n].dma_vaddr);
195}
196
197static int
198sec_probe(device_t dev)
199{
200	struct sec_softc *sc;
201	uint64_t id;
202
203	if (!ofw_bus_status_okay(dev))
204		return (ENXIO);
205
206	if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
207		return (ENXIO);
208
209	sc = device_get_softc(dev);
210
211	sc->sc_rrid = 0;
212	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
213	    RF_ACTIVE);
214
215	if (sc->sc_rres == NULL)
216		return (ENXIO);
217
218	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
219	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
220
221	id = SEC_READ(sc, SEC_ID);
222
223	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
224
225	switch (id) {
226	case SEC_20_ID:
227		device_set_desc(dev, "Freescale Security Engine 2.0");
228		sc->sc_version = 2;
229		break;
230	case SEC_30_ID:
231		device_set_desc(dev, "Freescale Security Engine 3.0");
232		sc->sc_version = 3;
233		break;
234	case SEC_31_ID:
235		device_set_desc(dev, "Freescale Security Engine 3.1");
236		sc->sc_version = 3;
237		break;
238	default:
239		device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id);
240		return (ENXIO);
241	}
242
243	return (0);
244}
245
246static int
247sec_attach(device_t dev)
248{
249	struct sec_softc *sc;
250	struct sec_hw_lt *lt;
251	int error = 0;
252	int i;
253
254	sc = device_get_softc(dev);
255	sc->sc_dev = dev;
256	sc->sc_blocked = 0;
257	sc->sc_shutdown = 0;
258
259	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
260	if (sc->sc_cid < 0) {
261		device_printf(dev, "could not get crypto driver ID!\n");
262		return (ENXIO);
263	}
264
265	/* Init locks */
266	mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
267	    "SEC Controller lock", MTX_DEF);
268	mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
269	    "SEC Descriptors lock", MTX_DEF);
270	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
271	    "SEC Sessions lock", MTX_DEF);
272
273	/* Allocate I/O memory for SEC registers */
274	sc->sc_rrid = 0;
275	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
276	    RF_ACTIVE);
277
278	if (sc->sc_rres == NULL) {
279		device_printf(dev, "could not allocate I/O memory!\n");
280		goto fail1;
281	}
282
283	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
284	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
285
286	/* Setup interrupts */
287	sc->sc_pri_irid = 0;
288	error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
289	    &sc->sc_pri_irid, sec_primary_intr, "primary");
290
291	if (error)
292		goto fail2;
293
294
295	if (sc->sc_version == 3) {
296		sc->sc_sec_irid = 1;
297		error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
298		    &sc->sc_sec_irid, sec_secondary_intr, "secondary");
299
300		if (error)
301			goto fail3;
302	}
303
304	/* Alloc DMA memory for descriptors and link tables */
305	error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
306	    SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
307
308	if (error)
309		goto fail4;
310
311	error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
312	    (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
313
314	if (error)
315		goto fail5;
316
317	/* Fill in descriptors and link tables */
318	for (i = 0; i < SEC_DESCRIPTORS; i++) {
319		sc->sc_desc[i].sd_desc =
320		    (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
321		sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
322		    (i * sizeof(struct sec_hw_desc));
323	}
324
325	for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
326		sc->sc_lt[i].sl_lt =
327		    (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
328		sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
329		    (i * sizeof(struct sec_hw_lt));
330	}
331
332	/* Last entry in link table is used to create a circle */
333	lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
334	lt->shl_length = 0;
335	lt->shl_r = 0;
336	lt->shl_n = 1;
337	lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
338
339	/* Init descriptor and link table queues pointers */
340	SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
341	SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
342	SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
343	SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
344	SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
345	SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
346	SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
347	SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
348
349	/* Create masks for fast checks */
350	sc->sc_int_error_mask = 0;
351	for (i = 0; i < SEC_CHANNELS; i++)
352		sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
353
354	switch (sc->sc_version) {
355	case 2:
356		sc->sc_channel_idle_mask =
357		    (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
358		    (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
359		    (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
360		    (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
361		break;
362	case 3:
363		sc->sc_channel_idle_mask =
364		    (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
365		    (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
366		    (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
367		    (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
368		break;
369	}
370
371	/* Init hardware */
372	error = sec_init(sc);
373
374	if (error)
375		goto fail6;
376
377	/* Register in OCF (AESU) */
378	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
379
380	/* Register in OCF (DEU) */
381	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
382	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
383
384	/* Register in OCF (MDEU) */
385	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
386	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
387	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
388	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
389	crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
390	if (sc->sc_version >= 3) {
391		crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
392		crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
393	}
394
395	return (0);
396
397fail6:
398	sec_free_dma_mem(&(sc->sc_lt_dmem));
399fail5:
400	sec_free_dma_mem(&(sc->sc_desc_dmem));
401fail4:
402	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
403	    sc->sc_sec_irid, "secondary");
404fail3:
405	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
406	    sc->sc_pri_irid, "primary");
407fail2:
408	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
409fail1:
410	mtx_destroy(&sc->sc_controller_lock);
411	mtx_destroy(&sc->sc_descriptors_lock);
412	mtx_destroy(&sc->sc_sessions_lock);
413
414	return (ENXIO);
415}
416
417static int
418sec_detach(device_t dev)
419{
420	struct sec_softc *sc = device_get_softc(dev);
421	int i, error, timeout = SEC_TIMEOUT;
422
423	/* Prepare driver to shutdown */
424	SEC_LOCK(sc, descriptors);
425	sc->sc_shutdown = 1;
426	SEC_UNLOCK(sc, descriptors);
427
428	/* Wait until all queued processing finishes */
429	while (1) {
430		SEC_LOCK(sc, descriptors);
431		i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
432		SEC_UNLOCK(sc, descriptors);
433
434		if (i == 0)
435			break;
436
437		if (timeout < 0) {
438			device_printf(dev, "queue flush timeout!\n");
439
440			/* DMA can be still active - stop it */
441			for (i = 0; i < SEC_CHANNELS; i++)
442				sec_channel_reset(sc, i, 1);
443
444			break;
445		}
446
447		timeout -= 1000;
448		DELAY(1000);
449	}
450
451	/* Disable interrupts */
452	SEC_WRITE(sc, SEC_IER, 0);
453
454	/* Unregister from OCF */
455	crypto_unregister_all(sc->sc_cid);
456
457	/* Free DMA memory */
458	for (i = 0; i < SEC_DESCRIPTORS; i++)
459		SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
460
461	sec_free_dma_mem(&(sc->sc_lt_dmem));
462	sec_free_dma_mem(&(sc->sc_desc_dmem));
463
464	/* Release interrupts */
465	sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
466	    sc->sc_pri_irid, "primary");
467	sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
468	    sc->sc_sec_irid, "secondary");
469
470	/* Release memory */
471	if (sc->sc_rres) {
472		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
473		    sc->sc_rres);
474		if (error)
475			device_printf(dev, "bus_release_resource() failed for"
476			    " I/O memory, error %d\n", error);
477
478		sc->sc_rres = NULL;
479	}
480
481	mtx_destroy(&sc->sc_controller_lock);
482	mtx_destroy(&sc->sc_descriptors_lock);
483	mtx_destroy(&sc->sc_sessions_lock);
484
485	return (0);
486}
487
488static int
489sec_suspend(device_t dev)
490{
491
492	return (0);
493}
494
495static int
496sec_resume(device_t dev)
497{
498
499	return (0);
500}
501
502static int
503sec_shutdown(device_t dev)
504{
505
506	return (0);
507}
508
509static int
510sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
511    int *irid, driver_intr_t handler, const char *iname)
512{
513	int error;
514
515	(*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
516	    RF_ACTIVE);
517
518	if ((*ires) == NULL) {
519		device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
520		return (ENXIO);
521	}
522
523	error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
524	    NULL, handler, sc, ihand);
525
526	if (error) {
527		device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
528		if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
529			device_printf(sc->sc_dev, "could not release %s IRQ\n",
530			    iname);
531
532		(*ires) = NULL;
533		return (error);
534	}
535
536	return (0);
537}
538
539static void
540sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
541    int irid, const char *iname)
542{
543	int error;
544
545	if (ires == NULL)
546		return;
547
548	error = bus_teardown_intr(sc->sc_dev, ires, ihand);
549	if (error)
550		device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
551		    " IRQ, error %d\n", iname, error);
552
553	error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
554	if (error)
555		device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
556		    " IRQ, error %d\n", iname, error);
557}
558
559static void
560sec_primary_intr(void *arg)
561{
562	struct sec_softc *sc = arg;
563	struct sec_desc *desc;
564	uint64_t isr;
565	int i, wakeup = 0;
566
567	SEC_LOCK(sc, controller);
568
569	/* Check for errors */
570	isr = SEC_READ(sc, SEC_ISR);
571	if (isr & sc->sc_int_error_mask) {
572		/* Check each channel for error */
573		for (i = 0; i < SEC_CHANNELS; i++) {
574			if ((isr & SEC_INT_CH_ERR(i)) == 0)
575				continue;
576
577			device_printf(sc->sc_dev,
578			    "I/O error on channel %i!\n", i);
579
580			/* Find and mark problematic descriptor */
581			desc = sec_find_desc(sc, SEC_READ(sc,
582			    SEC_CHAN_CDPR(i)));
583
584			if (desc != NULL)
585				desc->sd_error = EIO;
586
587			/* Do partial channel reset */
588			sec_channel_reset(sc, i, 0);
589		}
590	}
591
592	/* ACK interrupt */
593	SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
594
595	SEC_UNLOCK(sc, controller);
596	SEC_LOCK(sc, descriptors);
597
598	/* Handle processed descriptors */
599	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
600
601	while (SEC_QUEUED_DESC_CNT(sc) > 0) {
602		desc = SEC_GET_QUEUED_DESC(sc);
603
604		if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
605			SEC_PUT_BACK_QUEUED_DESC(sc);
606			break;
607		}
608
609		SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
610		    BUS_DMASYNC_PREWRITE);
611
612		desc->sd_crp->crp_etype = desc->sd_error;
613		crypto_done(desc->sd_crp);
614
615		SEC_DESC_FREE_POINTERS(desc);
616		SEC_DESC_FREE_LT(sc, desc);
617		SEC_DESC_QUEUED2FREE(sc);
618	}
619
620	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
621
622	if (!sc->sc_shutdown) {
623		wakeup = sc->sc_blocked;
624		sc->sc_blocked = 0;
625	}
626
627	SEC_UNLOCK(sc, descriptors);
628
629	/* Enqueue ready descriptors in hardware */
630	sec_enqueue(sc);
631
632	if (wakeup)
633		crypto_unblock(sc->sc_cid, wakeup);
634}
635
636static void
637sec_secondary_intr(void *arg)
638{
639	struct sec_softc *sc = arg;
640
641	device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
642	sec_primary_intr(arg);
643}
644
645static int
646sec_controller_reset(struct sec_softc *sc)
647{
648	int timeout = SEC_TIMEOUT;
649
650	/* Reset Controller */
651	SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
652
653	while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
654		DELAY(1000);
655		timeout -= 1000;
656
657		if (timeout < 0) {
658			device_printf(sc->sc_dev, "timeout while waiting for "
659			    "device reset!\n");
660			return (ETIMEDOUT);
661		}
662	}
663
664	return (0);
665}
666
667static int
668sec_channel_reset(struct sec_softc *sc, int channel, int full)
669{
670	int timeout = SEC_TIMEOUT;
671	uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
672	uint64_t reg;
673
674	/* Reset Channel */
675	reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
676	SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
677
678	while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
679		DELAY(1000);
680		timeout -= 1000;
681
682		if (timeout < 0) {
683			device_printf(sc->sc_dev, "timeout while waiting for "
684			    "channel reset!\n");
685			return (ETIMEDOUT);
686		}
687	}
688
689	if (full) {
690		reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
691
692		switch(sc->sc_version) {
693		case 2:
694			reg |= SEC_CHAN_CCR_CDWE;
695			break;
696		case 3:
697			reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
698			break;
699		}
700
701		SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
702	}
703
704	return (0);
705}
706
707static int
708sec_init(struct sec_softc *sc)
709{
710	uint64_t reg;
711	int error, i;
712
713	/* Reset controller twice to clear all pending interrupts */
714	error = sec_controller_reset(sc);
715	if (error)
716		return (error);
717
718	error = sec_controller_reset(sc);
719	if (error)
720		return (error);
721
722	/* Reset channels */
723	for (i = 0; i < SEC_CHANNELS; i++) {
724		error = sec_channel_reset(sc, i, 1);
725		if (error)
726			return (error);
727	}
728
729	/* Enable Interrupts */
730	reg = SEC_INT_ITO;
731	for (i = 0; i < SEC_CHANNELS; i++)
732		reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
733
734	SEC_WRITE(sc, SEC_IER, reg);
735
736	return (error);
737}
738
739static void
740sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
741{
742	struct sec_dma_mem *dma_mem = arg;
743
744	if (error)
745		return;
746
747	KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
748	dma_mem->dma_paddr = segs->ds_addr;
749}
750
751static void
752sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
753    int error)
754{
755	struct sec_desc_map_info *sdmi = arg;
756	struct sec_softc *sc = sdmi->sdmi_sc;
757	struct sec_lt *lt = NULL;
758	bus_addr_t addr;
759	bus_size_t size;
760	int i;
761
762	SEC_LOCK_ASSERT(sc, descriptors);
763
764	if (error)
765		return;
766
767	for (i = 0; i < nseg; i++) {
768		addr = segs[i].ds_addr;
769		size = segs[i].ds_len;
770
771		/* Skip requested offset */
772		if (sdmi->sdmi_offset >= size) {
773			sdmi->sdmi_offset -= size;
774			continue;
775		}
776
777		addr += sdmi->sdmi_offset;
778		size -= sdmi->sdmi_offset;
779		sdmi->sdmi_offset = 0;
780
781		/* Do not link more than requested */
782		if (sdmi->sdmi_size < size)
783			size = sdmi->sdmi_size;
784
785		lt = SEC_ALLOC_LT_ENTRY(sc);
786		lt->sl_lt->shl_length = size;
787		lt->sl_lt->shl_r = 0;
788		lt->sl_lt->shl_n = 0;
789		lt->sl_lt->shl_ptr = addr;
790
791		if (sdmi->sdmi_lt_first == NULL)
792			sdmi->sdmi_lt_first = lt;
793
794		sdmi->sdmi_lt_used += 1;
795
796		if ((sdmi->sdmi_size -= size) == 0)
797			break;
798	}
799
800	sdmi->sdmi_lt_last = lt;
801}
802
803static void
804sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
805    bus_size_t size, int error)
806{
807
808	sec_dma_map_desc_cb(arg, segs, nseg, error);
809}
810
811static int
812sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
813    bus_size_t size)
814{
815	int error;
816
817	if (dma_mem->dma_vaddr != NULL)
818		return (EBUSY);
819
820	error = bus_dma_tag_create(NULL,	/* parent */
821		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
822		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
823		BUS_SPACE_MAXADDR,		/* highaddr */
824		NULL, NULL,			/* filtfunc, filtfuncarg */
825		size, 1,			/* maxsize, nsegments */
826		size, 0,			/* maxsegsz, flags */
827		NULL, NULL,			/* lockfunc, lockfuncarg */
828		&(dma_mem->dma_tag));		/* dmat */
829
830	if (error) {
831		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
832		    " %i!\n", error);
833		goto err1;
834	}
835
836	error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
837	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
838
839	if (error) {
840		device_printf(sc->sc_dev, "failed to allocate DMA safe"
841		    " memory, error %i!\n", error);
842		goto err2;
843	}
844
845	error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
846		    dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
847		    BUS_DMA_NOWAIT);
848
849	if (error) {
850		device_printf(sc->sc_dev, "cannot get address of the DMA"
851		    " memory, error %i\n", error);
852		goto err3;
853	}
854
855	dma_mem->dma_is_map = 0;
856	return (0);
857
858err3:
859	bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
860err2:
861	bus_dma_tag_destroy(dma_mem->dma_tag);
862err1:
863	dma_mem->dma_vaddr = NULL;
864	return(error);
865}
866
867static int
868sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
869    bus_size_t size, int type, struct sec_desc_map_info *sdmi)
870{
871	int error;
872
873	if (dma_mem->dma_vaddr != NULL)
874		return (EBUSY);
875
876	switch (type) {
877	case SEC_MEMORY:
878		break;
879	case SEC_UIO:
880		size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
881		break;
882	case SEC_MBUF:
883		size = m_length((struct mbuf*)mem, NULL);
884		break;
885	default:
886		return (EINVAL);
887	}
888
889	error = bus_dma_tag_create(NULL,	/* parent */
890		SEC_DMA_ALIGNMENT, 0,		/* alignment, boundary */
891		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
892		BUS_SPACE_MAXADDR,		/* highaddr */
893		NULL, NULL,			/* filtfunc, filtfuncarg */
894		size,				/* maxsize */
895		SEC_FREE_LT_CNT(sc),		/* nsegments */
896		SEC_MAX_DMA_BLOCK_SIZE, 0,	/* maxsegsz, flags */
897		NULL, NULL,			/* lockfunc, lockfuncarg */
898		&(dma_mem->dma_tag));		/* dmat */
899
900	if (error) {
901		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
902		    " %i!\n", error);
903		dma_mem->dma_vaddr = NULL;
904		return (error);
905	}
906
907	error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
908
909	if (error) {
910		device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
911		    "\n", error);
912		bus_dma_tag_destroy(dma_mem->dma_tag);
913		return (error);
914	}
915
916	switch (type) {
917	case SEC_MEMORY:
918		error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
919		    mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
920		break;
921	case SEC_UIO:
922		error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
923		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
924		break;
925	case SEC_MBUF:
926		error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
927		    mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
928		break;
929	}
930
931	if (error) {
932		device_printf(sc->sc_dev, "cannot get address of the DMA"
933		    " memory, error %i!\n", error);
934		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
935		bus_dma_tag_destroy(dma_mem->dma_tag);
936		return (error);
937	}
938
939	dma_mem->dma_is_map = 1;
940	dma_mem->dma_vaddr = mem;
941
942	return (0);
943}
944
945static void
946sec_free_dma_mem(struct sec_dma_mem *dma_mem)
947{
948
949	/* Check for double free */
950	if (dma_mem->dma_vaddr == NULL)
951		return;
952
953	bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
954
955	if (dma_mem->dma_is_map)
956		bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
957	else
958		bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
959		    dma_mem->dma_map);
960
961	bus_dma_tag_destroy(dma_mem->dma_tag);
962	dma_mem->dma_vaddr = NULL;
963}
964
965static int
966sec_eu_channel(struct sec_softc *sc, int eu)
967{
968	uint64_t reg;
969	int channel = 0;
970
971	SEC_LOCK_ASSERT(sc, controller);
972
973	reg = SEC_READ(sc, SEC_EUASR);
974
975	switch (eu) {
976	case SEC_EU_AFEU:
977		channel = SEC_EUASR_AFEU(reg);
978		break;
979	case SEC_EU_DEU:
980		channel = SEC_EUASR_DEU(reg);
981		break;
982	case SEC_EU_MDEU_A:
983	case SEC_EU_MDEU_B:
984		channel = SEC_EUASR_MDEU(reg);
985		break;
986	case SEC_EU_RNGU:
987		channel = SEC_EUASR_RNGU(reg);
988		break;
989	case SEC_EU_PKEU:
990		channel = SEC_EUASR_PKEU(reg);
991		break;
992	case SEC_EU_AESU:
993		channel = SEC_EUASR_AESU(reg);
994		break;
995	case SEC_EU_KEU:
996		channel = SEC_EUASR_KEU(reg);
997		break;
998	case SEC_EU_CRCU:
999		channel = SEC_EUASR_CRCU(reg);
1000		break;
1001	}
1002
1003	return (channel - 1);
1004}
1005
1006static int
1007sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1008{
1009	u_int fflvl = SEC_MAX_FIFO_LEVEL;
1010	uint64_t reg;
1011	int i;
1012
1013	SEC_LOCK_ASSERT(sc, controller);
1014
1015	/* Find free channel if have not got one */
1016	if (channel < 0) {
1017		for (i = 0; i < SEC_CHANNELS; i++) {
1018			reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1019
1020			if ((reg & sc->sc_channel_idle_mask) == 0) {
1021				channel = i;
1022				break;
1023			}
1024		}
1025	}
1026
1027	/* There is no free channel */
1028	if (channel < 0)
1029		return (-1);
1030
1031	/* Check FIFO level on selected channel */
1032	reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1033
1034	switch(sc->sc_version) {
1035	case 2:
1036		fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1037		break;
1038	case 3:
1039		fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1040		break;
1041	}
1042
1043	if (fflvl >= SEC_MAX_FIFO_LEVEL)
1044		return (-1);
1045
1046	/* Enqueue descriptor in channel */
1047	SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1048
1049	return (channel);
1050}
1051
1052static void
1053sec_enqueue(struct sec_softc *sc)
1054{
1055	struct sec_desc *desc;
1056	int ch0, ch1;
1057
1058	SEC_LOCK(sc, descriptors);
1059	SEC_LOCK(sc, controller);
1060
1061	while (SEC_READY_DESC_CNT(sc) > 0) {
1062		desc = SEC_GET_READY_DESC(sc);
1063
1064		ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1065		ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1066
1067		/*
1068		 * Both EU are used by the same channel.
1069		 * Enqueue descriptor in channel used by busy EUs.
1070		 */
1071		if (ch0 >= 0 && ch0 == ch1) {
1072			if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1073				SEC_DESC_READY2QUEUED(sc);
1074				continue;
1075			}
1076		}
1077
1078		/*
1079		 * Only one EU is free.
1080		 * Enqueue descriptor in channel used by busy EU.
1081		 */
1082		if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1083			if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1084			    >= 0) {
1085				SEC_DESC_READY2QUEUED(sc);
1086				continue;
1087			}
1088		}
1089
1090		/*
1091		 * Both EU are free.
1092		 * Enqueue descriptor in first free channel.
1093		 */
1094		if (ch0 < 0 && ch1 < 0) {
1095			if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1096				SEC_DESC_READY2QUEUED(sc);
1097				continue;
1098			}
1099		}
1100
1101		/* Current descriptor can not be queued at the moment */
1102		SEC_PUT_BACK_READY_DESC(sc);
1103		break;
1104	}
1105
1106	SEC_UNLOCK(sc, controller);
1107	SEC_UNLOCK(sc, descriptors);
1108}
1109
1110static struct sec_desc *
1111sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1112{
1113	struct sec_desc *desc = NULL;
1114	int i;
1115
1116	SEC_LOCK_ASSERT(sc, descriptors);
1117
1118	for (i = 0; i < SEC_CHANNELS; i++) {
1119		if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1120			desc = &(sc->sc_desc[i]);
1121			break;
1122		}
1123	}
1124
1125	return (desc);
1126}
1127
1128static int
1129sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1130    bus_addr_t data, bus_size_t dsize)
1131{
1132	struct sec_hw_desc_ptr *ptr;
1133
1134	SEC_LOCK_ASSERT(sc, descriptors);
1135
1136	ptr = &(desc->sd_desc->shd_pointer[n]);
1137	ptr->shdp_length = dsize;
1138	ptr->shdp_extent = 0;
1139	ptr->shdp_j = 0;
1140	ptr->shdp_ptr = data;
1141
1142	return (0);
1143}
1144
1145static int
1146sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1147    u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1148{
1149	struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1150	struct sec_hw_desc_ptr *ptr;
1151	int error;
1152
1153	SEC_LOCK_ASSERT(sc, descriptors);
1154
1155	/* For flat memory map only requested region */
1156	if (dtype == SEC_MEMORY) {
1157		 data = (uint8_t*)(data) + doffset;
1158		 sdmi.sdmi_offset = 0;
1159	}
1160
1161	error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1162	    dtype, &sdmi);
1163
1164	if (error)
1165		return (error);
1166
1167	sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1168	desc->sd_lt_used += sdmi.sdmi_lt_used;
1169
1170	ptr = &(desc->sd_desc->shd_pointer[n]);
1171	ptr->shdp_length = dsize;
1172	ptr->shdp_extent = 0;
1173	ptr->shdp_j = 1;
1174	ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1175
1176	return (0);
1177}
1178
1179static int
1180sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1181    struct cryptoini **mac)
1182{
1183	struct cryptoini *e, *m;
1184
1185	e = cri;
1186	m = cri->cri_next;
1187
1188	/* We can haldle only two operations */
1189	if (m && m->cri_next)
1190		return (EINVAL);
1191
1192	if (sec_mdeu_can_handle(e->cri_alg)) {
1193		cri = m;
1194		m = e;
1195		e = cri;
1196	}
1197
1198	if (m && !sec_mdeu_can_handle(m->cri_alg))
1199		return (EINVAL);
1200
1201	*enc = e;
1202	*mac = m;
1203
1204	return (0);
1205}
1206
1207static int
1208sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1209    struct cryptodesc **mac)
1210{
1211	struct cryptodesc *e, *m, *t;
1212
1213	e = crp->crp_desc;
1214	m = e->crd_next;
1215
1216	/* We can haldle only two operations */
1217	if (m && m->crd_next)
1218		return (EINVAL);
1219
1220	if (sec_mdeu_can_handle(e->crd_alg)) {
1221		t = m;
1222		m = e;
1223		e = t;
1224	}
1225
1226	if (m && !sec_mdeu_can_handle(m->crd_alg))
1227		return (EINVAL);
1228
1229	*enc = e;
1230	*mac = m;
1231
1232	return (0);
1233}
1234
1235static int
1236sec_alloc_session(struct sec_softc *sc)
1237{
1238	struct sec_session *ses = NULL;
1239	int sid = -1;
1240	u_int i;
1241
1242	SEC_LOCK(sc, sessions);
1243
1244	for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1245		if (sc->sc_sessions[i].ss_used == 0) {
1246			ses = &(sc->sc_sessions[i]);
1247			ses->ss_used = 1;
1248			ses->ss_ivlen = 0;
1249			ses->ss_klen = 0;
1250			ses->ss_mklen = 0;
1251			sid = i;
1252			break;
1253		}
1254	}
1255
1256	SEC_UNLOCK(sc, sessions);
1257
1258	return (sid);
1259}
1260
1261static struct sec_session *
1262sec_get_session(struct sec_softc *sc, u_int sid)
1263{
1264	struct sec_session *ses;
1265
1266	if (sid >= SEC_MAX_SESSIONS)
1267		return (NULL);
1268
1269	SEC_LOCK(sc, sessions);
1270
1271	ses = &(sc->sc_sessions[sid]);
1272
1273	if (ses->ss_used == 0)
1274		ses = NULL;
1275
1276	SEC_UNLOCK(sc, sessions);
1277
1278	return (ses);
1279}
1280
1281static int
1282sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1283{
1284	struct sec_softc *sc = device_get_softc(dev);
1285	struct sec_eu_methods *eu = sec_eus;
1286	struct cryptoini *enc = NULL;
1287	struct cryptoini *mac = NULL;
1288	struct sec_session *ses;
1289	int error = -1;
1290	int sid;
1291
1292	error = sec_split_cri(cri, &enc, &mac);
1293	if (error)
1294		return (error);
1295
1296	/* Check key lengths */
1297	if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1298		return (E2BIG);
1299
1300	if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1301		return (E2BIG);
1302
1303	/* Only SEC 3.0 supports digests larger than 256 bits */
1304	if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1305		return (E2BIG);
1306
1307	sid = sec_alloc_session(sc);
1308	if (sid < 0)
1309		return (ENOMEM);
1310
1311	ses = sec_get_session(sc, sid);
1312
1313	/* Find EU for this session */
1314	while (eu->sem_make_desc != NULL) {
1315		error = eu->sem_newsession(sc, ses, enc, mac);
1316		if (error >= 0)
1317			break;
1318
1319		eu++;
1320	}
1321
1322	/* If not found, return EINVAL */
1323	if (error < 0) {
1324		sec_free_session(sc, ses);
1325		return (EINVAL);
1326	}
1327
1328	/* Save cipher key */
1329	if (enc && enc->cri_key) {
1330		ses->ss_klen = enc->cri_klen / 8;
1331		memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1332	}
1333
1334	/* Save digest key */
1335	if (mac && mac->cri_key) {
1336		ses->ss_mklen = mac->cri_klen / 8;
1337		memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1338	}
1339
1340	ses->ss_eu = eu;
1341	*sidp = sid;
1342
1343	return (0);
1344}
1345
1346static int
1347sec_freesession(device_t dev, uint64_t tid)
1348{
1349	struct sec_softc *sc = device_get_softc(dev);
1350	struct sec_session *ses;
1351	int error = 0;
1352
1353	ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1354	if (ses == NULL)
1355		return (EINVAL);
1356
1357	sec_free_session(sc, ses);
1358
1359	return (error);
1360}
1361
1362static int
1363sec_process(device_t dev, struct cryptop *crp, int hint)
1364{
1365	struct sec_softc *sc = device_get_softc(dev);
1366	struct sec_desc *desc = NULL;
1367	struct cryptodesc *mac, *enc;
1368	struct sec_session *ses;
1369	int buftype, error = 0;
1370
1371	/* Check Session ID */
1372	ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1373	if (ses == NULL) {
1374		crp->crp_etype = EINVAL;
1375		crypto_done(crp);
1376		return (0);
1377	}
1378
1379	/* Check for input length */
1380	if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1381		crp->crp_etype = E2BIG;
1382		crypto_done(crp);
1383		return (0);
1384	}
1385
1386	/* Get descriptors */
1387	if (sec_split_crp(crp, &enc, &mac)) {
1388		crp->crp_etype = EINVAL;
1389		crypto_done(crp);
1390		return (0);
1391	}
1392
1393	SEC_LOCK(sc, descriptors);
1394	SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1395
1396	/* Block driver if there is no free descriptors or we are going down */
1397	if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1398		sc->sc_blocked |= CRYPTO_SYMQ;
1399		SEC_UNLOCK(sc, descriptors);
1400		return (ERESTART);
1401	}
1402
1403	/* Prepare descriptor */
1404	desc = SEC_GET_FREE_DESC(sc);
1405	desc->sd_lt_used = 0;
1406	desc->sd_error = 0;
1407	desc->sd_crp = crp;
1408
1409	if (crp->crp_flags & CRYPTO_F_IOV)
1410		buftype = SEC_UIO;
1411	else if (crp->crp_flags & CRYPTO_F_IMBUF)
1412		buftype = SEC_MBUF;
1413	else
1414		buftype = SEC_MEMORY;
1415
1416	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1417		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1418			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1419			    ses->ss_ivlen);
1420		else
1421			arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1422
1423		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1424			crypto_copyback(crp->crp_flags, crp->crp_buf,
1425			    enc->crd_inject, ses->ss_ivlen,
1426			    desc->sd_desc->shd_iv);
1427	} else if (enc) {
1428		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1429			memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1430			    ses->ss_ivlen);
1431		else
1432			crypto_copydata(crp->crp_flags, crp->crp_buf,
1433			    enc->crd_inject, ses->ss_ivlen,
1434			    desc->sd_desc->shd_iv);
1435	}
1436
1437	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1438		if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1439			ses->ss_klen = enc->crd_klen / 8;
1440			memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1441		} else
1442			error = E2BIG;
1443	}
1444
1445	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1446		if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1447			ses->ss_mklen = mac->crd_klen / 8;
1448			memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1449		} else
1450			error = E2BIG;
1451	}
1452
1453	if (!error) {
1454		memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1455		memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1456
1457		error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1458	}
1459
1460	if (error) {
1461		SEC_DESC_FREE_POINTERS(desc);
1462		SEC_DESC_PUT_BACK_LT(sc, desc);
1463		SEC_PUT_BACK_FREE_DESC(sc);
1464		SEC_UNLOCK(sc, descriptors);
1465		crp->crp_etype = error;
1466		crypto_done(crp);
1467		return (0);
1468	}
1469
1470	/*
1471	 * Skip DONE interrupt if this is not last request in burst, but only
1472	 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1473	 * signaling on each descriptor.
1474	 */
1475	if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1476		desc->sd_desc->shd_dn = 0;
1477	else
1478		desc->sd_desc->shd_dn = 1;
1479
1480	SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1481	SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1482	    BUS_DMASYNC_POSTWRITE);
1483	SEC_DESC_FREE2READY(sc);
1484	SEC_UNLOCK(sc, descriptors);
1485
1486	/* Enqueue ready descriptors in hardware */
1487	sec_enqueue(sc);
1488
1489	return (0);
1490}
1491
1492static int
1493sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1494    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1495    int buftype)
1496{
1497	struct sec_hw_desc *hd = desc->sd_desc;
1498	int error;
1499
1500	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1501	hd->shd_eu_sel1 = SEC_EU_NONE;
1502	hd->shd_mode1 = 0;
1503
1504	/* Pointer 0: NULL */
1505	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1506	if (error)
1507		return (error);
1508
1509	/* Pointer 1: IV IN */
1510	error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1511	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1512	if (error)
1513		return (error);
1514
1515	/* Pointer 2: Cipher Key */
1516	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1517	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1518 	if (error)
1519		return (error);
1520
1521	/* Pointer 3: Data IN */
1522	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1523	    enc->crd_len, buftype);
1524	if (error)
1525		return (error);
1526
1527	/* Pointer 4: Data OUT */
1528	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1529	    enc->crd_len, buftype);
1530	if (error)
1531		return (error);
1532
1533	/* Pointer 5: IV OUT (Not used: NULL) */
1534	error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1535	if (error)
1536		return (error);
1537
1538	/* Pointer 6: NULL */
1539	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1540
1541	return (error);
1542}
1543
1544static int
1545sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1546    struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1547    struct cryptodesc *mac, int buftype)
1548{
1549	struct sec_hw_desc *hd = desc->sd_desc;
1550	u_int eu, mode, hashlen;
1551	int error;
1552
1553	if (mac->crd_len < enc->crd_len)
1554		return (EINVAL);
1555
1556	if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1557		return (EINVAL);
1558
1559	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1560	if (error)
1561		return (error);
1562
1563	hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1564	hd->shd_eu_sel1 = eu;
1565	hd->shd_mode1 = mode;
1566
1567	/* Pointer 0: HMAC Key */
1568	error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1569	    offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1570	if (error)
1571		return (error);
1572
1573	/* Pointer 1: HMAC-Only Data IN */
1574	error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1575	    mac->crd_len - enc->crd_len, buftype);
1576	if (error)
1577		return (error);
1578
1579	/* Pointer 2: Cipher Key */
1580	error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1581	    offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1582 	if (error)
1583		return (error);
1584
1585	/* Pointer 3: IV IN */
1586	error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1587	    offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1588	if (error)
1589		return (error);
1590
1591	/* Pointer 4: Data IN */
1592	error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1593	    enc->crd_len, buftype);
1594	if (error)
1595		return (error);
1596
1597	/* Pointer 5: Data OUT */
1598	error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1599	    enc->crd_len, buftype);
1600	if (error)
1601		return (error);
1602
1603	/* Pointer 6: HMAC OUT */
1604	error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1605	    hashlen, buftype);
1606
1607	return (error);
1608}
1609
1610/* AESU */
1611
1612static int
1613sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1614    struct cryptoini *enc, struct cryptoini *mac)
1615{
1616
1617	if (enc == NULL)
1618		return (-1);
1619
1620	if (enc->cri_alg != CRYPTO_AES_CBC)
1621		return (-1);
1622
1623	ses->ss_ivlen = AES_BLOCK_LEN;
1624
1625	return (0);
1626}
1627
1628static int
1629sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1630    struct sec_desc *desc, struct cryptop *crp, int buftype)
1631{
1632	struct sec_hw_desc *hd = desc->sd_desc;
1633	struct cryptodesc *enc, *mac;
1634	int error;
1635
1636	error = sec_split_crp(crp, &enc, &mac);
1637	if (error)
1638		return (error);
1639
1640	if (!enc)
1641		return (EINVAL);
1642
1643	hd->shd_eu_sel0 = SEC_EU_AESU;
1644	hd->shd_mode0 = SEC_AESU_MODE_CBC;
1645
1646	if (enc->crd_alg != CRYPTO_AES_CBC)
1647		return (EINVAL);
1648
1649	if (enc->crd_flags & CRD_F_ENCRYPT) {
1650		hd->shd_mode0 |= SEC_AESU_MODE_ED;
1651		hd->shd_dir = 0;
1652	} else
1653		hd->shd_dir = 1;
1654
1655	if (mac)
1656		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1657		    buftype);
1658	else
1659		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1660		    buftype);
1661
1662	return (error);
1663}
1664
1665/* DEU */
1666
1667static int
1668sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1669    struct cryptoini *enc, struct cryptoini *mac)
1670{
1671
1672	if (enc == NULL)
1673		return (-1);
1674
1675	switch (enc->cri_alg) {
1676	case CRYPTO_DES_CBC:
1677	case CRYPTO_3DES_CBC:
1678		break;
1679	default:
1680		return (-1);
1681	}
1682
1683	ses->ss_ivlen = DES_BLOCK_LEN;
1684
1685	return (0);
1686}
1687
1688static int
1689sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1690    struct sec_desc *desc, struct cryptop *crp, int buftype)
1691{
1692	struct sec_hw_desc *hd = desc->sd_desc;
1693	struct cryptodesc *enc, *mac;
1694	int error;
1695
1696	error = sec_split_crp(crp, &enc, &mac);
1697	if (error)
1698		return (error);
1699
1700	if (!enc)
1701		return (EINVAL);
1702
1703	hd->shd_eu_sel0 = SEC_EU_DEU;
1704	hd->shd_mode0 = SEC_DEU_MODE_CBC;
1705
1706	switch (enc->crd_alg) {
1707	case CRYPTO_3DES_CBC:
1708		hd->shd_mode0 |= SEC_DEU_MODE_TS;
1709		break;
1710	case CRYPTO_DES_CBC:
1711		break;
1712	default:
1713		return (EINVAL);
1714	}
1715
1716	if (enc->crd_flags & CRD_F_ENCRYPT) {
1717		hd->shd_mode0 |= SEC_DEU_MODE_ED;
1718		hd->shd_dir = 0;
1719	} else
1720		hd->shd_dir = 1;
1721
1722	if (mac)
1723		error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1724		    buftype);
1725	else
1726		error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1727		    buftype);
1728
1729	return (error);
1730}
1731
1732/* MDEU */
1733
1734static int
1735sec_mdeu_can_handle(u_int alg)
1736{
1737	switch (alg) {
1738	case CRYPTO_MD5:
1739	case CRYPTO_SHA1:
1740	case CRYPTO_MD5_HMAC:
1741	case CRYPTO_SHA1_HMAC:
1742	case CRYPTO_SHA2_256_HMAC:
1743	case CRYPTO_SHA2_384_HMAC:
1744	case CRYPTO_SHA2_512_HMAC:
1745		return (1);
1746	default:
1747		return (0);
1748	}
1749}
1750
1751static int
1752sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1753{
1754
1755	*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1756	*eu = SEC_EU_NONE;
1757
1758	switch (crd->crd_alg) {
1759	case CRYPTO_MD5_HMAC:
1760		*mode |= SEC_MDEU_MODE_HMAC;
1761		/* FALLTHROUGH */
1762	case CRYPTO_MD5:
1763		*eu = SEC_EU_MDEU_A;
1764		*mode |= SEC_MDEU_MODE_MD5;
1765		*hashlen = MD5_HASH_LEN;
1766		break;
1767	case CRYPTO_SHA1_HMAC:
1768		*mode |= SEC_MDEU_MODE_HMAC;
1769		/* FALLTHROUGH */
1770	case CRYPTO_SHA1:
1771		*eu = SEC_EU_MDEU_A;
1772		*mode |= SEC_MDEU_MODE_SHA1;
1773		*hashlen = SHA1_HASH_LEN;
1774		break;
1775	case CRYPTO_SHA2_256_HMAC:
1776		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1777		*eu = SEC_EU_MDEU_A;
1778		break;
1779	case CRYPTO_SHA2_384_HMAC:
1780		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1781		*eu = SEC_EU_MDEU_B;
1782		break;
1783	case CRYPTO_SHA2_512_HMAC:
1784		*mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1785		*eu = SEC_EU_MDEU_B;
1786		break;
1787	default:
1788		return (EINVAL);
1789	}
1790
1791	if (*mode & SEC_MDEU_MODE_HMAC)
1792		*hashlen = SEC_HMAC_HASH_LEN;
1793
1794	return (0);
1795}
1796
1797static int
1798sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1799    struct cryptoini *enc, struct cryptoini *mac)
1800{
1801
1802	if (mac && sec_mdeu_can_handle(mac->cri_alg))
1803		return (0);
1804
1805	return (-1);
1806}
1807
1808static int
1809sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1810    struct sec_desc *desc, struct cryptop *crp, int buftype)
1811{
1812	struct cryptodesc *enc, *mac;
1813	struct sec_hw_desc *hd = desc->sd_desc;
1814	u_int eu, mode, hashlen;
1815	int error;
1816
1817	error = sec_split_crp(crp, &enc, &mac);
1818	if (error)
1819		return (error);
1820
1821	if (enc)
1822		return (EINVAL);
1823
1824	error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1825	if (error)
1826		return (error);
1827
1828	hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1829	hd->shd_eu_sel0 = eu;
1830	hd->shd_mode0 = mode;
1831	hd->shd_eu_sel1 = SEC_EU_NONE;
1832	hd->shd_mode1 = 0;
1833
1834	/* Pointer 0: NULL */
1835	error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1836	if (error)
1837		return (error);
1838
1839	/* Pointer 1: Context In (Not used: NULL) */
1840	error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1841	if (error)
1842		return (error);
1843
1844	/* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1845	if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1846		error = sec_make_pointer_direct(sc, desc, 2,
1847		    desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1848		    shd_mkey), ses->ss_mklen);
1849	else
1850		error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1851
1852	if (error)
1853		return (error);
1854
1855	/* Pointer 3: Input Data */
1856	error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1857	    mac->crd_len, buftype);
1858	if (error)
1859		return (error);
1860
1861	/* Pointer 4: NULL */
1862	error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1863	if (error)
1864		return (error);
1865
1866	/* Pointer 5: Hash out */
1867	error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1868	    mac->crd_inject, hashlen, buftype);
1869	if (error)
1870		return (error);
1871
1872	/* Pointer 6: NULL */
1873	error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1874
1875	return (0);
1876}
1877