cesa.c revision 238873
1/*-
2 * Copyright (C) 2009-2011 Semihalf.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * CESA SRAM Memory Map:
29 *
30 * +------------------------+ <= sc->sc_sram_base + CESA_SRAM_SIZE
31 * |                        |
32 * |          DATA          |
33 * |                        |
34 * +------------------------+ <= sc->sc_sram_base + CESA_DATA(0)
35 * |  struct cesa_sa_data   |
36 * +------------------------+
37 * |  struct cesa_sa_hdesc  |
38 * +------------------------+ <= sc->sc_sram_base
39 */
40
41#include <sys/cdefs.h>
42__FBSDID("$FreeBSD: head/sys/dev/cesa/cesa.c 238873 2012-07-28 21:56:24Z hrs $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/bus.h>
47#include <sys/endian.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/mbuf.h>
51#include <sys/module.h>
52#include <sys/mutex.h>
53#include <sys/rman.h>
54
55#include <machine/bus.h>
56#include <machine/intr.h>
57#include <machine/resource.h>
58
59#include <dev/fdt/fdt_common.h>
60#include <dev/ofw/ofw_bus.h>
61#include <dev/ofw/ofw_bus_subr.h>
62
63#include <sys/md5.h>
64#include <crypto/sha1.h>
65#include <crypto/rijndael/rijndael.h>
66#include <opencrypto/cryptodev.h>
67#include "cryptodev_if.h"
68
69#include <arm/mv/mvreg.h>
70#include <arm/mv/mvwin.h>
71#include <arm/mv/mvvar.h>
72#include "cesa.h"
73
74#undef DEBUG
75
76static int	cesa_probe(device_t);
77static int	cesa_attach(device_t);
78static int	cesa_detach(device_t);
79static void	cesa_intr(void *);
80static int	cesa_newsession(device_t, u_int32_t *, struct cryptoini *);
81static int	cesa_freesession(device_t, u_int64_t);
82static int	cesa_process(device_t, struct cryptop *, int);
83
84static struct resource_spec cesa_res_spec[] = {
85	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
86	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
87	{ -1, 0 }
88};
89
90static device_method_t cesa_methods[] = {
91	/* Device interface */
92	DEVMETHOD(device_probe,		cesa_probe),
93	DEVMETHOD(device_attach,	cesa_attach),
94	DEVMETHOD(device_detach,	cesa_detach),
95
96	/* Crypto device methods */
97	DEVMETHOD(cryptodev_newsession,	cesa_newsession),
98	DEVMETHOD(cryptodev_freesession,cesa_freesession),
99	DEVMETHOD(cryptodev_process,	cesa_process),
100
101	DEVMETHOD_END
102};
103
104static driver_t cesa_driver = {
105	"cesa",
106	cesa_methods,
107	sizeof (struct cesa_softc)
108};
109static devclass_t cesa_devclass;
110
111DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0);
112MODULE_DEPEND(cesa, crypto, 1, 1, 1);
113
114static void
115cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
116{
117#ifdef DEBUG
118	device_t dev;
119
120	dev = sc->sc_dev;
121	device_printf(dev, "CESA SA Hardware Descriptor:\n");
122	device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
123	device_printf(dev, "\t\te_src:  0x%08X\n", cshd->cshd_enc_src);
124	device_printf(dev, "\t\te_dst:  0x%08X\n", cshd->cshd_enc_dst);
125	device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
126	device_printf(dev, "\t\te_key:  0x%08X\n", cshd->cshd_enc_key);
127	device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
128	device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
129	device_printf(dev, "\t\tm_src:  0x%08X\n", cshd->cshd_mac_src);
130	device_printf(dev, "\t\tm_dst:  0x%08X\n", cshd->cshd_mac_dst);
131	device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
132	device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
133	device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
134	device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
135#endif
136}
137
138static void
139cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
140{
141	struct cesa_dma_mem *cdm;
142
143	if (error)
144		return;
145
146	KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
147	cdm = arg;
148	cdm->cdm_paddr = segs->ds_addr;
149}
150
151static int
152cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
153    bus_size_t size)
154{
155	int error;
156
157	KASSERT(cdm->cdm_vaddr == NULL,
158	    ("%s(): DMA memory descriptor in use.", __func__));
159
160	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
161	    PAGE_SIZE, 0,			/* alignment, boundary */
162	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
163	    BUS_SPACE_MAXADDR,			/* highaddr */
164	    NULL, NULL,				/* filtfunc, filtfuncarg */
165	    size, 1,				/* maxsize, nsegments */
166	    size, 0,				/* maxsegsz, flags */
167	    NULL, NULL,				/* lockfunc, lockfuncarg */
168	    &cdm->cdm_tag);			/* dmat */
169	if (error) {
170		device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
171		    " %i!\n", error);
172
173		goto err1;
174	}
175
176	error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
177	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
178	if (error) {
179		device_printf(sc->sc_dev, "failed to allocate DMA safe"
180		    " memory, error %i!\n", error);
181
182		goto err2;
183	}
184
185	error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
186	    size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
187	if (error) {
188		device_printf(sc->sc_dev, "cannot get address of the DMA"
189		    " memory, error %i\n", error);
190
191		goto err3;
192	}
193
194	return (0);
195err3:
196	bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
197err2:
198	bus_dma_tag_destroy(cdm->cdm_tag);
199err1:
200	cdm->cdm_vaddr = NULL;
201	return (error);
202}
203
204static void
205cesa_free_dma_mem(struct cesa_dma_mem *cdm)
206{
207
208	bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
209	bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
210	bus_dma_tag_destroy(cdm->cdm_tag);
211	cdm->cdm_vaddr = NULL;
212}
213
214static void
215cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
216{
217
218	/* Sync only if dma memory is valid */
219        if (cdm->cdm_vaddr != NULL)
220		bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
221}
222
223static void
224cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
225{
226
227	cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
228	cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
229	cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
230}
231
232static struct cesa_session *
233cesa_alloc_session(struct cesa_softc *sc)
234{
235	struct cesa_session *cs;
236
237	CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions);
238
239	return (cs);
240}
241
242static struct cesa_session *
243cesa_get_session(struct cesa_softc *sc, uint32_t sid)
244{
245
246	if (sid >= CESA_SESSIONS)
247		return (NULL);
248
249	return (&sc->sc_sessions[sid]);
250}
251
252static void
253cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs)
254{
255
256	CESA_GENERIC_FREE_LOCKED(sc, cs, sessions);
257}
258
259static struct cesa_request *
260cesa_alloc_request(struct cesa_softc *sc)
261{
262	struct cesa_request *cr;
263
264	CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
265	if (!cr)
266		return (NULL);
267
268	STAILQ_INIT(&cr->cr_tdesc);
269	STAILQ_INIT(&cr->cr_sdesc);
270
271	return (cr);
272}
273
274static void
275cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
276{
277
278	/* Free TDMA descriptors assigned to this request */
279	CESA_LOCK(sc, tdesc);
280	STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
281	CESA_UNLOCK(sc, tdesc);
282
283	/* Free SA descriptors assigned to this request */
284	CESA_LOCK(sc, sdesc);
285	STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
286	CESA_UNLOCK(sc, sdesc);
287
288	/* Unload DMA memory asociated with request */
289	if (cr->cr_dmap_loaded) {
290		bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
291		cr->cr_dmap_loaded = 0;
292	}
293
294	CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
295}
296
297static void
298cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
299{
300
301	CESA_LOCK(sc, requests);
302	STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
303	CESA_UNLOCK(sc, requests);
304}
305
306static struct cesa_tdma_desc *
307cesa_alloc_tdesc(struct cesa_softc *sc)
308{
309	struct cesa_tdma_desc *ctd;
310
311	CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
312
313	if (!ctd)
314		device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
315		    "Consider increasing CESA_TDMA_DESCRIPTORS.\n");
316
317	return (ctd);
318}
319
320static struct cesa_sa_desc *
321cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
322{
323	struct cesa_sa_desc *csd;
324
325	CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
326	if (!csd) {
327		device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
328		    "Consider increasing CESA_SA_DESCRIPTORS.\n");
329		return (NULL);
330	}
331
332	STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
333
334	/* Fill-in SA descriptor with default values */
335	csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
336	csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
337	csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
338	csd->csd_cshd->cshd_enc_src = 0;
339	csd->csd_cshd->cshd_enc_dst = 0;
340	csd->csd_cshd->cshd_enc_dlen = 0;
341	csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
342	csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
343	csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
344	csd->csd_cshd->cshd_mac_src = 0;
345	csd->csd_cshd->cshd_mac_dlen = 0;
346
347	return (csd);
348}
349
350static struct cesa_tdma_desc *
351cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
352    bus_size_t size)
353{
354	struct cesa_tdma_desc *ctd;
355
356	ctd = cesa_alloc_tdesc(sc);
357	if (!ctd)
358		return (NULL);
359
360	ctd->ctd_cthd->cthd_dst = dst;
361	ctd->ctd_cthd->cthd_src = src;
362	ctd->ctd_cthd->cthd_byte_count = size;
363
364	/* Handle special control packet */
365	if (size != 0)
366		ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
367	else
368		ctd->ctd_cthd->cthd_flags = 0;
369
370	return (ctd);
371}
372
373static struct cesa_tdma_desc *
374cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
375{
376
377	return (cesa_tdma_copy(sc, sc->sc_sram_base +
378	    sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
379	    sizeof(struct cesa_sa_data)));
380}
381
382static struct cesa_tdma_desc *
383cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
384{
385
386	return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base +
387	    sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
388}
389
390static struct cesa_tdma_desc *
391cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
392{
393
394	return (cesa_tdma_copy(sc, sc->sc_sram_base, csd->csd_cshd_paddr,
395	    sizeof(struct cesa_sa_hdesc)));
396}
397
398static void
399cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
400{
401	struct cesa_tdma_desc *ctd_prev;
402
403	if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
404		ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
405		ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
406	}
407
408	ctd->ctd_cthd->cthd_next = 0;
409	STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
410}
411
412static int
413cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
414    struct cesa_packet *cp, struct cesa_sa_desc *csd)
415{
416	struct cesa_tdma_desc *ctd, *tmp;
417
418	/* Copy SA descriptor for this packet */
419	ctd = cesa_tdma_copy_sdesc(sc, csd);
420	if (!ctd)
421		return (ENOMEM);
422
423	cesa_append_tdesc(cr, ctd);
424
425	/* Copy data to be processed */
426	STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
427		cesa_append_tdesc(cr, ctd);
428	STAILQ_INIT(&cp->cp_copyin);
429
430	/* Insert control descriptor */
431	ctd = cesa_tdma_copy(sc, 0, 0, 0);
432	if (!ctd)
433		return (ENOMEM);
434
435	cesa_append_tdesc(cr, ctd);
436
437	/* Copy back results */
438	STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
439		cesa_append_tdesc(cr, ctd);
440	STAILQ_INIT(&cp->cp_copyout);
441
442	return (0);
443}
444
445static int
446cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
447{
448	uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN];
449	uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN];
450	SHA1_CTX sha1ctx;
451	MD5_CTX md5ctx;
452	uint32_t *hout;
453	uint32_t *hin;
454	int i;
455
456	memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
457	memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
458	for (i = 0; i < mklen; i++) {
459		ipad[i] ^= mkey[i];
460		opad[i] ^= mkey[i];
461	}
462
463	hin = (uint32_t *)cs->cs_hiv_in;
464	hout = (uint32_t *)cs->cs_hiv_out;
465
466	switch (alg) {
467	case CRYPTO_MD5_HMAC:
468		MD5Init(&md5ctx);
469		MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN);
470		memcpy(hin, md5ctx.state, sizeof(md5ctx.state));
471		MD5Init(&md5ctx);
472		MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN);
473		memcpy(hout, md5ctx.state, sizeof(md5ctx.state));
474		break;
475	case CRYPTO_SHA1_HMAC:
476		SHA1Init(&sha1ctx);
477		SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN);
478		memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
479		SHA1Init(&sha1ctx);
480		SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN);
481		memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
482		break;
483	default:
484		return (EINVAL);
485	}
486
487	for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
488		hin[i] = htobe32(hin[i]);
489		hout[i] = htobe32(hout[i]);
490	}
491
492	return (0);
493}
494
495static int
496cesa_prep_aes_key(struct cesa_session *cs)
497{
498	uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
499	uint32_t *dkey;
500	int i;
501
502	rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8);
503
504	cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
505	dkey = (uint32_t *)cs->cs_aes_dkey;
506
507	switch (cs->cs_klen) {
508	case 16:
509		cs->cs_config |= CESA_CSH_AES_KLEN_128;
510		for (i = 0; i < 4; i++)
511			*dkey++ = htobe32(ek[4 * 10 + i]);
512		break;
513	case 24:
514		cs->cs_config |= CESA_CSH_AES_KLEN_192;
515		for (i = 0; i < 4; i++)
516			*dkey++ = htobe32(ek[4 * 12 + i]);
517		for (i = 0; i < 2; i++)
518			*dkey++ = htobe32(ek[4 * 11 + 2 + i]);
519		break;
520	case 32:
521		cs->cs_config |= CESA_CSH_AES_KLEN_256;
522		for (i = 0; i < 4; i++)
523			*dkey++ = htobe32(ek[4 * 14 + i]);
524		for (i = 0; i < 4; i++)
525			*dkey++ = htobe32(ek[4 * 13 + i]);
526		break;
527	default:
528		return (EINVAL);
529	}
530
531	return (0);
532}
533
534static int
535cesa_is_hash(int alg)
536{
537
538	switch (alg) {
539	case CRYPTO_MD5:
540	case CRYPTO_MD5_HMAC:
541	case CRYPTO_SHA1:
542	case CRYPTO_SHA1_HMAC:
543		return (1);
544	default:
545		return (0);
546	}
547}
548
549static void
550cesa_start_packet(struct cesa_packet *cp, unsigned int size)
551{
552
553	cp->cp_size = size;
554	cp->cp_offset = 0;
555	STAILQ_INIT(&cp->cp_copyin);
556	STAILQ_INIT(&cp->cp_copyout);
557}
558
559static int
560cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
561    bus_dma_segment_t *seg)
562{
563	struct cesa_tdma_desc *ctd;
564	unsigned int bsize;
565
566	/* Calculate size of block copy */
567	bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
568
569	if (bsize > 0) {
570		ctd = cesa_tdma_copy(sc, sc->sc_sram_base +
571		    CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
572		if (!ctd)
573			return (-ENOMEM);
574
575		STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
576
577		ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base +
578		    CESA_DATA(cp->cp_offset), bsize);
579		if (!ctd)
580			return (-ENOMEM);
581
582		STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
583
584		seg->ds_len -= bsize;
585		seg->ds_addr += bsize;
586		cp->cp_offset += bsize;
587	}
588
589	return (bsize);
590}
591
592static void
593cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
594{
595	unsigned int mpsize, fragmented;
596	unsigned int mlen, mskip, tmlen;
597	struct cesa_chain_info *cci;
598	unsigned int elen, eskip;
599	unsigned int skip, len;
600	struct cesa_sa_desc *csd;
601	struct cesa_request *cr;
602	struct cesa_softc *sc;
603	struct cesa_packet cp;
604	bus_dma_segment_t seg;
605	uint32_t config;
606	int size;
607
608	cci = arg;
609	sc = cci->cci_sc;
610	cr = cci->cci_cr;
611
612	if (error) {
613		cci->cci_error = error;
614		return;
615	}
616
617	elen = cci->cci_enc ? cci->cci_enc->crd_len : 0;
618	eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0;
619	mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0;
620	mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0;
621
622	if (elen && mlen &&
623	    ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) ||
624	    (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) ||
625	    (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) {
626		/*
627		 * Data alignment in the request does not meet CESA requiremnts
628		 * for combined encryption/decryption and hashing. We have to
629		 * split the request to separate operations and process them
630		 * one by one.
631		 */
632		config = cci->cci_config;
633		if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
634			config &= ~CESA_CSHD_OP_MASK;
635
636			cci->cci_config = config | CESA_CSHD_MAC;
637			cci->cci_enc = NULL;
638			cci->cci_mac = cr->cr_mac;
639			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
640
641			cci->cci_config = config | CESA_CSHD_ENC;
642			cci->cci_enc = cr->cr_enc;
643			cci->cci_mac = NULL;
644			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
645		} else {
646			config &= ~CESA_CSHD_OP_MASK;
647
648			cci->cci_config = config | CESA_CSHD_ENC;
649			cci->cci_enc = cr->cr_enc;
650			cci->cci_mac = NULL;
651			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
652
653			cci->cci_config = config | CESA_CSHD_MAC;
654			cci->cci_enc = NULL;
655			cci->cci_mac = cr->cr_mac;
656			cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
657		}
658
659		return;
660	}
661
662	tmlen = mlen;
663	fragmented = 0;
664	mpsize = CESA_MAX_PACKET_SIZE;
665	mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
666
667	if (elen && mlen) {
668		skip = MIN(eskip, mskip);
669		len = MAX(elen + eskip, mlen + mskip) - skip;
670	} else if (elen) {
671		skip = eskip;
672		len = elen;
673	} else {
674		skip = mskip;
675		len = mlen;
676	}
677
678	/* Start first packet in chain */
679	cesa_start_packet(&cp, MIN(mpsize, len));
680
681	while (nseg-- && len > 0) {
682		seg = *(segs++);
683
684		/*
685		 * Skip data in buffer on which neither ENC nor MAC operation
686		 * is requested.
687		 */
688		if (skip > 0) {
689			size = MIN(skip, seg.ds_len);
690			skip -= size;
691
692			seg.ds_addr += size;
693			seg.ds_len -= size;
694
695			if (eskip > 0)
696				eskip -= size;
697
698			if (mskip > 0)
699				mskip -= size;
700
701			if (seg.ds_len == 0)
702				continue;
703		}
704
705		while (1) {
706			/*
707			 * Fill in current packet with data. Break if there is
708			 * no more data in current DMA segment or an error
709			 * occured.
710			 */
711			size = cesa_fill_packet(sc, &cp, &seg);
712			if (size <= 0) {
713				error = -size;
714				break;
715			}
716
717			len -= size;
718
719			/* If packet is full, append it to the chain */
720			if (cp.cp_size == cp.cp_offset) {
721				csd = cesa_alloc_sdesc(sc, cr);
722				if (!csd) {
723					error = ENOMEM;
724					break;
725				}
726
727				/* Create SA descriptor for this packet */
728				csd->csd_cshd->cshd_config = cci->cci_config;
729				csd->csd_cshd->cshd_mac_total_dlen = tmlen;
730
731				/*
732				 * Enable fragmentation if request will not fit
733				 * into one packet.
734				 */
735				if (len > 0) {
736					if (!fragmented) {
737						fragmented = 1;
738						csd->csd_cshd->cshd_config |=
739						    CESA_CSHD_FRAG_FIRST;
740					} else
741						csd->csd_cshd->cshd_config |=
742						    CESA_CSHD_FRAG_MIDDLE;
743				} else if (fragmented)
744					csd->csd_cshd->cshd_config |=
745					    CESA_CSHD_FRAG_LAST;
746
747				if (eskip < cp.cp_size && elen > 0) {
748					csd->csd_cshd->cshd_enc_src =
749					    CESA_DATA(eskip);
750					csd->csd_cshd->cshd_enc_dst =
751					    CESA_DATA(eskip);
752					csd->csd_cshd->cshd_enc_dlen =
753					    MIN(elen, cp.cp_size - eskip);
754				}
755
756				if (mskip < cp.cp_size && mlen > 0) {
757					csd->csd_cshd->cshd_mac_src =
758					    CESA_DATA(mskip);
759					csd->csd_cshd->cshd_mac_dlen =
760					    MIN(mlen, cp.cp_size - mskip);
761				}
762
763				elen -= csd->csd_cshd->cshd_enc_dlen;
764				eskip -= MIN(eskip, cp.cp_size);
765				mlen -= csd->csd_cshd->cshd_mac_dlen;
766				mskip -= MIN(mskip, cp.cp_size);
767
768				cesa_dump_cshd(sc, csd->csd_cshd);
769
770				/* Append packet to the request */
771				error = cesa_append_packet(sc, cr, &cp, csd);
772				if (error)
773					break;
774
775				/* Start a new packet, as current is full */
776				cesa_start_packet(&cp, MIN(mpsize, len));
777			}
778		}
779
780		if (error)
781			break;
782	}
783
784	if (error) {
785		/*
786		 * Move all allocated resources to the request. They will be
787		 * freed later.
788		 */
789		STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
790		STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
791		cci->cci_error = error;
792	}
793}
794
795static void
796cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
797    bus_size_t size, int error)
798{
799
800	cesa_create_chain_cb(arg, segs, nseg, error);
801}
802
803static int
804cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr)
805{
806	struct cesa_chain_info cci;
807	struct cesa_tdma_desc *ctd;
808	uint32_t config;
809	int error;
810
811	error = 0;
812	CESA_LOCK_ASSERT(sc, sessions);
813
814	/* Create request metadata */
815	if (cr->cr_enc) {
816		if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC &&
817		    (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
818			memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
819			    cr->cr_cs->cs_klen);
820		else
821			memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
822			    cr->cr_cs->cs_klen);
823	}
824
825	if (cr->cr_mac) {
826		memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
827		    CESA_MAX_HASH_LEN);
828		memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
829		    CESA_MAX_HASH_LEN);
830	}
831
832	ctd = cesa_tdma_copyin_sa_data(sc, cr);
833	if (!ctd)
834		return (ENOMEM);
835
836	cesa_append_tdesc(cr, ctd);
837
838	/* Prepare SA configuration */
839	config = cr->cr_cs->cs_config;
840
841	if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
842		config |= CESA_CSHD_DECRYPT;
843	if (cr->cr_enc && !cr->cr_mac)
844		config |= CESA_CSHD_ENC;
845	if (!cr->cr_enc && cr->cr_mac)
846		config |= CESA_CSHD_MAC;
847	if (cr->cr_enc && cr->cr_mac)
848		config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
849		    CESA_CSHD_ENC_AND_MAC;
850
851	/* Create data packets */
852	cci.cci_sc = sc;
853	cci.cci_cr = cr;
854	cci.cci_enc = cr->cr_enc;
855	cci.cci_mac = cr->cr_mac;
856	cci.cci_config = config;
857	cci.cci_error = 0;
858
859	if (cr->cr_crp->crp_flags & CRYPTO_F_IOV)
860		error = bus_dmamap_load_uio(sc->sc_data_dtag,
861		    cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf,
862		    cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
863	else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF)
864		error = bus_dmamap_load_mbuf(sc->sc_data_dtag,
865		    cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf,
866		    cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
867	else
868		error = bus_dmamap_load(sc->sc_data_dtag,
869		    cr->cr_dmap, cr->cr_crp->crp_buf,
870		    cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci,
871		    BUS_DMA_NOWAIT);
872
873	if (!error)
874		cr->cr_dmap_loaded = 1;
875
876	if (cci.cci_error)
877		error = cci.cci_error;
878
879	if (error)
880		return (error);
881
882	/* Read back request metadata */
883	ctd = cesa_tdma_copyout_sa_data(sc, cr);
884	if (!ctd)
885		return (ENOMEM);
886
887	cesa_append_tdesc(cr, ctd);
888
889	return (0);
890}
891
892static void
893cesa_execute(struct cesa_softc *sc)
894{
895	struct cesa_tdma_desc *prev_ctd, *ctd;
896	struct cesa_request *prev_cr, *cr;
897
898	CESA_LOCK(sc, requests);
899
900	/*
901	 * If ready list is empty, there is nothing to execute. If queued list
902	 * is not empty, the hardware is busy and we cannot start another
903	 * execution.
904	 */
905	if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
906	    !STAILQ_EMPTY(&sc->sc_queued_requests)) {
907		CESA_UNLOCK(sc, requests);
908		return;
909	}
910
911	/* Move all ready requests to queued list */
912	STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
913	STAILQ_INIT(&sc->sc_ready_requests);
914
915	/* Create one execution chain from all requests on the list */
916	if (STAILQ_FIRST(&sc->sc_queued_requests) !=
917	    STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
918		prev_cr = NULL;
919		cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
920		    BUS_DMASYNC_POSTWRITE);
921
922		STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
923			if (prev_cr) {
924				ctd = STAILQ_FIRST(&cr->cr_tdesc);
925				prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
926				    cesa_tdma_desc, ctd_stq);
927
928				prev_ctd->ctd_cthd->cthd_next =
929				    ctd->ctd_cthd_paddr;
930			}
931
932			prev_cr = cr;
933		}
934
935		cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
936		    BUS_DMASYNC_PREWRITE);
937	}
938
939	/* Start chain execution in hardware */
940	cr = STAILQ_FIRST(&sc->sc_queued_requests);
941	ctd = STAILQ_FIRST(&cr->cr_tdesc);
942
943	CESA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
944	CESA_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
945
946	CESA_UNLOCK(sc, requests);
947}
948
949static int
950cesa_setup_sram(struct cesa_softc *sc)
951{
952	phandle_t sram_node;
953	ihandle_t sram_ihandle;
954	pcell_t sram_handle, sram_reg;
955
956	if (OF_getprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
957	    (void *)&sram_handle, sizeof(sram_handle)) <= 0)
958		return (ENXIO);
959
960	sram_ihandle = (ihandle_t)sram_handle;
961	sram_ihandle = fdt32_to_cpu(sram_ihandle);
962	sram_node = OF_instance_to_package(sram_ihandle);
963
964	if (OF_getprop(sram_node, "reg", (void *)&sram_reg,
965	    sizeof(sram_reg)) <= 0)
966		return (ENXIO);
967
968	sc->sc_sram_base = fdt32_to_cpu(sram_reg);
969
970	return (0);
971}
972
973static int
974cesa_probe(device_t dev)
975{
976	if (!ofw_bus_is_compatible(dev, "mrvl,cesa"))
977		return (ENXIO);
978
979	device_set_desc(dev, "Marvell Cryptographic Engine and Security "
980	    "Accelerator");
981
982	return (BUS_PROBE_DEFAULT);
983}
984
985static int
986cesa_attach(device_t dev)
987{
988	struct cesa_softc *sc;
989	uint32_t d, r;
990	int error;
991	int i;
992
993	sc = device_get_softc(dev);
994	sc->sc_blocked = 0;
995	sc->sc_error = 0;
996	sc->sc_dev = dev;
997
998	error = cesa_setup_sram(sc);
999	if (error) {
1000		device_printf(dev, "could not setup SRAM\n");
1001		return (error);
1002	}
1003
1004	soc_id(&d, &r);
1005
1006	switch (d) {
1007	case MV_DEV_88F6281:
1008	case MV_DEV_88F6282:
1009		sc->sc_tperr = 0;
1010		break;
1011	case MV_DEV_MV78100:
1012	case MV_DEV_MV78100_Z0:
1013		sc->sc_tperr = CESA_ICR_TPERR;
1014		break;
1015	default:
1016		return (ENXIO);
1017	}
1018
1019	/* Initialize mutexes */
1020	mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1021	    "CESA Shared Data", MTX_DEF);
1022	mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1023	    "CESA TDMA Descriptors Pool", MTX_DEF);
1024	mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1025	    "CESA SA Descriptors Pool", MTX_DEF);
1026	mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1027	    "CESA Requests Pool", MTX_DEF);
1028	mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1029	    "CESA Sessions Pool", MTX_DEF);
1030
1031	/* Allocate I/O and IRQ resources */
1032	error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1033	if (error) {
1034		device_printf(dev, "could not allocate resources\n");
1035		goto err0;
1036	}
1037
1038	sc->sc_bsh = rman_get_bushandle(*(sc->sc_res));
1039	sc->sc_bst = rman_get_bustag(*(sc->sc_res));
1040
1041	/* Setup interrupt handler */
1042	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1043	    NULL, cesa_intr, sc, &(sc->sc_icookie));
1044	if (error) {
1045		device_printf(dev, "could not setup engine completion irq\n");
1046		goto err1;
1047	}
1048
1049	/* Create DMA tag for processed data */
1050	error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
1051	    1, 0,				/* alignment, boundary */
1052	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
1053	    BUS_SPACE_MAXADDR,			/* highaddr */
1054	    NULL, NULL,				/* filtfunc, filtfuncarg */
1055	    CESA_MAX_REQUEST_SIZE,		/* maxsize */
1056	    CESA_MAX_FRAGMENTS,			/* nsegments */
1057	    CESA_MAX_REQUEST_SIZE, 0,		/* maxsegsz, flags */
1058	    NULL, NULL,				/* lockfunc, lockfuncarg */
1059	    &sc->sc_data_dtag);			/* dmat */
1060	if (error)
1061		goto err2;
1062
1063	/* Initialize data structures: TDMA Descriptors Pool */
1064	error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1065	    CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1066	if (error)
1067		goto err3;
1068
1069	STAILQ_INIT(&sc->sc_free_tdesc);
1070	for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1071		sc->sc_tdesc[i].ctd_cthd =
1072		    (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1073		sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1074		    (i * sizeof(struct cesa_tdma_hdesc));
1075		STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1076		    ctd_stq);
1077	}
1078
1079	/* Initialize data structures: SA Descriptors Pool */
1080	error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1081	    CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1082	if (error)
1083		goto err4;
1084
1085	STAILQ_INIT(&sc->sc_free_sdesc);
1086	for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1087		sc->sc_sdesc[i].csd_cshd =
1088		    (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1089		sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1090		    (i * sizeof(struct cesa_sa_hdesc));
1091		STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1092		    csd_stq);
1093	}
1094
1095	/* Initialize data structures: Requests Pool */
1096	error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1097	    CESA_REQUESTS * sizeof(struct cesa_sa_data));
1098	if (error)
1099		goto err5;
1100
1101	STAILQ_INIT(&sc->sc_free_requests);
1102	STAILQ_INIT(&sc->sc_ready_requests);
1103	STAILQ_INIT(&sc->sc_queued_requests);
1104	for (i = 0; i < CESA_REQUESTS; i++) {
1105		sc->sc_requests[i].cr_csd =
1106		    (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1107		sc->sc_requests[i].cr_csd_paddr =
1108		    sc->sc_requests_cdm.cdm_paddr +
1109		    (i * sizeof(struct cesa_sa_data));
1110
1111		/* Preallocate DMA maps */
1112		error = bus_dmamap_create(sc->sc_data_dtag, 0,
1113		    &sc->sc_requests[i].cr_dmap);
1114		if (error && i > 0) {
1115			i--;
1116			do {
1117				bus_dmamap_destroy(sc->sc_data_dtag,
1118				    sc->sc_requests[i].cr_dmap);
1119			} while (i--);
1120
1121			goto err6;
1122		}
1123
1124		STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1125		    cr_stq);
1126	}
1127
1128	/* Initialize data structures: Sessions Pool */
1129	STAILQ_INIT(&sc->sc_free_sessions);
1130	for (i = 0; i < CESA_SESSIONS; i++) {
1131		sc->sc_sessions[i].cs_sid = i;
1132		STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i],
1133		    cs_stq);
1134	}
1135
1136	/*
1137	 * Initialize TDMA:
1138	 * - Burst limit: 128 bytes,
1139	 * - Outstanding reads enabled,
1140	 * - No byte-swap.
1141	 */
1142	CESA_WRITE(sc, CESA_TDMA_CR, CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1143	    CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE);
1144
1145	/*
1146	 * Initialize SA:
1147	 * - SA descriptor is present at beginning of CESA SRAM,
1148	 * - Multi-packet chain mode,
1149	 * - Cooperation with TDMA enabled.
1150	 */
1151	CESA_WRITE(sc, CESA_SA_DPR, 0);
1152	CESA_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1153	    CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1154
1155	/* Unmask interrupts */
1156	CESA_WRITE(sc, CESA_ICR, 0);
1157	CESA_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1158	CESA_WRITE(sc, CESA_TDMA_ECR, 0);
1159	CESA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1160	    CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1161	    CESA_TDMA_EMR_DATA_ERROR);
1162
1163	/* Register in OCF */
1164	sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
1165	if (sc->sc_cid) {
1166		device_printf(dev, "could not get crypto driver id\n");
1167		goto err7;
1168	}
1169
1170	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
1171	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
1172	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
1173	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
1174	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
1175	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
1176	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
1177
1178	return (0);
1179err7:
1180	for (i = 0; i < CESA_REQUESTS; i++)
1181		bus_dmamap_destroy(sc->sc_data_dtag,
1182		    sc->sc_requests[i].cr_dmap);
1183err6:
1184	cesa_free_dma_mem(&sc->sc_requests_cdm);
1185err5:
1186	cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1187err4:
1188	cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1189err3:
1190	bus_dma_tag_destroy(sc->sc_data_dtag);
1191err2:
1192	bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
1193err1:
1194	bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1195err0:
1196	mtx_destroy(&sc->sc_sessions_lock);
1197	mtx_destroy(&sc->sc_requests_lock);
1198	mtx_destroy(&sc->sc_sdesc_lock);
1199	mtx_destroy(&sc->sc_tdesc_lock);
1200	mtx_destroy(&sc->sc_sc_lock);
1201	return (ENXIO);
1202}
1203
1204static int
1205cesa_detach(device_t dev)
1206{
1207	struct cesa_softc *sc;
1208	int i;
1209
1210	sc = device_get_softc(dev);
1211
1212	/* TODO: Wait for queued requests completion before shutdown. */
1213
1214	/* Mask interrupts */
1215	CESA_WRITE(sc, CESA_ICM, 0);
1216	CESA_WRITE(sc, CESA_TDMA_EMR, 0);
1217
1218	/* Unregister from OCF */
1219	crypto_unregister_all(sc->sc_cid);
1220
1221	/* Free DMA Maps */
1222	for (i = 0; i < CESA_REQUESTS; i++)
1223		bus_dmamap_destroy(sc->sc_data_dtag,
1224		    sc->sc_requests[i].cr_dmap);
1225
1226	/* Free DMA Memory */
1227	cesa_free_dma_mem(&sc->sc_requests_cdm);
1228	cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1229	cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1230
1231	/* Free DMA Tag */
1232	bus_dma_tag_destroy(sc->sc_data_dtag);
1233
1234	/* Stop interrupt */
1235	bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie);
1236
1237	/* Relase I/O and IRQ resources */
1238	bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1239
1240	/* Destory mutexes */
1241	mtx_destroy(&sc->sc_sessions_lock);
1242	mtx_destroy(&sc->sc_requests_lock);
1243	mtx_destroy(&sc->sc_sdesc_lock);
1244	mtx_destroy(&sc->sc_tdesc_lock);
1245	mtx_destroy(&sc->sc_sc_lock);
1246
1247	return (0);
1248}
1249
1250static void
1251cesa_intr(void *arg)
1252{
1253	STAILQ_HEAD(, cesa_request) requests;
1254	struct cesa_request *cr, *tmp;
1255	struct cesa_softc *sc;
1256	uint32_t ecr, icr;
1257	int blocked;
1258
1259	sc = arg;
1260
1261	/* Ack interrupt */
1262	ecr = CESA_READ(sc, CESA_TDMA_ECR);
1263	CESA_WRITE(sc, CESA_TDMA_ECR, 0);
1264	icr = CESA_READ(sc, CESA_ICR);
1265	CESA_WRITE(sc, CESA_ICR, 0);
1266
1267	/* Check for TDMA errors */
1268	if (ecr & CESA_TDMA_ECR_MISS) {
1269		device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1270		sc->sc_error = EIO;
1271	}
1272
1273	if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1274		device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1275		sc->sc_error = EIO;
1276	}
1277
1278	if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1279		device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1280		sc->sc_error = EIO;
1281	}
1282
1283	if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1284		device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1285		sc->sc_error = EIO;
1286	}
1287
1288	/* Check for CESA errors */
1289	if (icr & sc->sc_tperr) {
1290		device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1291		sc->sc_error = EIO;
1292	}
1293
1294	/* If there is nothing more to do, return */
1295	if ((icr & CESA_ICR_ACCTDMA) == 0)
1296		return;
1297
1298	/* Get all finished requests */
1299	CESA_LOCK(sc, requests);
1300	STAILQ_INIT(&requests);
1301	STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1302	STAILQ_INIT(&sc->sc_queued_requests);
1303	CESA_UNLOCK(sc, requests);
1304
1305	/* Execute all ready requests */
1306	cesa_execute(sc);
1307
1308	/* Process completed requests */
1309	cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1310	    BUS_DMASYNC_POSTWRITE);
1311
1312	STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1313		bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1314		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1315
1316		cr->cr_crp->crp_etype = sc->sc_error;
1317		if (cr->cr_mac)
1318			crypto_copyback(cr->cr_crp->crp_flags,
1319			    cr->cr_crp->crp_buf, cr->cr_mac->crd_inject,
1320			    cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1321
1322		crypto_done(cr->cr_crp);
1323		cesa_free_request(sc, cr);
1324	}
1325
1326	cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1327	    BUS_DMASYNC_PREWRITE);
1328
1329	sc->sc_error = 0;
1330
1331	/* Unblock driver if it ran out of resources */
1332	CESA_LOCK(sc, sc);
1333	blocked = sc->sc_blocked;
1334	sc->sc_blocked = 0;
1335	CESA_UNLOCK(sc, sc);
1336
1337	if (blocked)
1338		crypto_unblock(sc->sc_cid, blocked);
1339}
1340
1341static int
1342cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
1343{
1344	struct cesa_session *cs;
1345	struct cesa_softc *sc;
1346	struct cryptoini *enc;
1347	struct cryptoini *mac;
1348	int error;
1349
1350	sc = device_get_softc(dev);
1351	enc = NULL;
1352	mac = NULL;
1353	error = 0;
1354
1355	/* Check and parse input */
1356	if (cesa_is_hash(cri->cri_alg))
1357		mac = cri;
1358	else
1359		enc = cri;
1360
1361	cri = cri->cri_next;
1362
1363	if (cri) {
1364		if (!enc && !cesa_is_hash(cri->cri_alg))
1365			enc = cri;
1366
1367		if (!mac && cesa_is_hash(cri->cri_alg))
1368			mac = cri;
1369
1370		if (cri->cri_next || !(enc && mac))
1371			return (EINVAL);
1372	}
1373
1374	if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) ||
1375	    (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN))
1376		return (E2BIG);
1377
1378	/* Allocate session */
1379	cs = cesa_alloc_session(sc);
1380	if (!cs)
1381		return (ENOMEM);
1382
1383	/* Prepare CESA configuration */
1384	cs->cs_config = 0;
1385	cs->cs_ivlen = 1;
1386	cs->cs_mblen = 1;
1387
1388	if (enc) {
1389		switch (enc->cri_alg) {
1390		case CRYPTO_AES_CBC:
1391			cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1392			cs->cs_ivlen = AES_BLOCK_LEN;
1393			break;
1394		case CRYPTO_DES_CBC:
1395			cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
1396			cs->cs_ivlen = DES_BLOCK_LEN;
1397			break;
1398		case CRYPTO_3DES_CBC:
1399			cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
1400			    CESA_CSHD_CBC;
1401			cs->cs_ivlen = DES3_BLOCK_LEN;
1402			break;
1403		default:
1404			error = EINVAL;
1405			break;
1406		}
1407	}
1408
1409	if (!error && mac) {
1410		switch (mac->cri_alg) {
1411		case CRYPTO_MD5:
1412			cs->cs_config |= CESA_CSHD_MD5;
1413			cs->cs_mblen = 1;
1414			cs->cs_hlen = MD5_HASH_LEN;
1415			break;
1416		case CRYPTO_MD5_HMAC:
1417			cs->cs_config |= CESA_CSHD_MD5_HMAC;
1418			cs->cs_mblen = MD5_HMAC_BLOCK_LEN;
1419			cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
1420			break;
1421		case CRYPTO_SHA1:
1422			cs->cs_config |= CESA_CSHD_SHA1;
1423			cs->cs_mblen = 1;
1424			cs->cs_hlen = SHA1_HASH_LEN;
1425			break;
1426		case CRYPTO_SHA1_HMAC:
1427			cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1428			cs->cs_mblen = SHA1_HMAC_BLOCK_LEN;
1429			cs->cs_hlen = CESA_HMAC_HASH_LENGTH;
1430			break;
1431		default:
1432			error = EINVAL;
1433			break;
1434		}
1435	}
1436
1437	/* Save cipher key */
1438	if (!error && enc && enc->cri_key) {
1439		cs->cs_klen = enc->cri_klen / 8;
1440		memcpy(cs->cs_key, enc->cri_key, cs->cs_klen);
1441		if (enc->cri_alg == CRYPTO_AES_CBC)
1442			error = cesa_prep_aes_key(cs);
1443	}
1444
1445	/* Save digest key */
1446	if (!error && mac && mac->cri_key)
1447		error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key,
1448		    mac->cri_klen / 8);
1449
1450	if (error) {
1451		cesa_free_session(sc, cs);
1452		return (EINVAL);
1453	}
1454
1455	*sidp = cs->cs_sid;
1456
1457	return (0);
1458}
1459
1460static int
1461cesa_freesession(device_t dev, uint64_t tid)
1462{
1463	struct cesa_session *cs;
1464	struct cesa_softc *sc;
1465
1466	sc = device_get_softc(dev);
1467	cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid));
1468	if (!cs)
1469		return (EINVAL);
1470
1471	/* Free session */
1472	cesa_free_session(sc, cs);
1473
1474	return (0);
1475}
1476
1477static int
1478cesa_process(device_t dev, struct cryptop *crp, int hint)
1479{
1480	struct cesa_request *cr;
1481	struct cesa_session *cs;
1482	struct cryptodesc *crd;
1483	struct cryptodesc *enc;
1484	struct cryptodesc *mac;
1485	struct cesa_softc *sc;
1486	int error;
1487
1488	sc = device_get_softc(dev);
1489	crd = crp->crp_desc;
1490	enc = NULL;
1491	mac = NULL;
1492	error = 0;
1493
1494	/* Check session ID */
1495	cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1496	if (!cs) {
1497		crp->crp_etype = EINVAL;
1498		crypto_done(crp);
1499		return (0);
1500	}
1501
1502	/* Check and parse input */
1503	if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) {
1504		crp->crp_etype = E2BIG;
1505		crypto_done(crp);
1506		return (0);
1507	}
1508
1509	if (cesa_is_hash(crd->crd_alg))
1510		mac = crd;
1511	else
1512		enc = crd;
1513
1514	crd = crd->crd_next;
1515
1516	if (crd) {
1517		if (!enc && !cesa_is_hash(crd->crd_alg))
1518			enc = crd;
1519
1520		if (!mac && cesa_is_hash(crd->crd_alg))
1521			mac = crd;
1522
1523		if (crd->crd_next || !(enc && mac)) {
1524			crp->crp_etype = EINVAL;
1525			crypto_done(crp);
1526			return (0);
1527		}
1528	}
1529
1530	/*
1531	 * Get request descriptor. Block driver if there is no free
1532	 * descriptors in pool.
1533	 */
1534	cr = cesa_alloc_request(sc);
1535	if (!cr) {
1536		CESA_LOCK(sc, sc);
1537		sc->sc_blocked = CRYPTO_SYMQ;
1538		CESA_UNLOCK(sc, sc);
1539		return (ERESTART);
1540	}
1541
1542	/* Prepare request */
1543	cr->cr_crp = crp;
1544	cr->cr_enc = enc;
1545	cr->cr_mac = mac;
1546	cr->cr_cs = cs;
1547
1548	CESA_LOCK(sc, sessions);
1549	cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1550
1551	if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1552		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1553			memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1554		else
1555			arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0);
1556
1557		if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1558			crypto_copyback(crp->crp_flags, crp->crp_buf,
1559			    enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1560	} else if (enc) {
1561		if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1562			memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
1563		else
1564			crypto_copydata(crp->crp_flags, crp->crp_buf,
1565			    enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
1566	}
1567
1568	if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1569		if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) {
1570			cs->cs_klen = enc->crd_klen / 8;
1571			memcpy(cs->cs_key, enc->crd_key, cs->cs_klen);
1572			if (enc->crd_alg == CRYPTO_AES_CBC)
1573				error = cesa_prep_aes_key(cs);
1574		} else
1575			error = E2BIG;
1576	}
1577
1578	if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1579		if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN)
1580			error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key,
1581			    mac->crd_klen / 8);
1582		else
1583			error = E2BIG;
1584	}
1585
1586	/* Convert request to chain of TDMA and SA descriptors */
1587	if (!error)
1588		error = cesa_create_chain(sc, cr);
1589
1590	cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1591	CESA_UNLOCK(sc, sessions);
1592
1593	if (error) {
1594		cesa_free_request(sc, cr);
1595		crp->crp_etype = error;
1596		crypto_done(crp);
1597		return (0);
1598	}
1599
1600	bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1601	    BUS_DMASYNC_PREWRITE);
1602
1603	/* Enqueue request to execution */
1604	cesa_enqueue_request(sc, cr);
1605
1606	/* Start execution, if we have no more requests in queue */
1607	if ((hint & CRYPTO_HINT_MORE) == 0)
1608		cesa_execute(sc);
1609
1610	return (0);
1611}
1612